|
@@ -58,8 +58,9 @@
|
|
|
#define THREAD_STACK_MAGIC 0xbaadf00d
|
|
|
|
|
|
/* Wait channel. */
|
|
|
-struct wchan {
|
|
|
- const char *wc_name; /* name for this channel */
|
|
|
+struct wchan
|
|
|
+{
|
|
|
+ const char * wc_name; /* name for this channel */
|
|
|
struct threadlist wc_threads; /* list of waiting threads */
|
|
|
struct spinlock wc_lock; /* lock for mutual exclusion */
|
|
|
};
|
|
@@ -81,7 +82,7 @@ static struct semaphore *cpu_startup_sem;
|
|
|
*/
|
|
|
static
|
|
|
void
|
|
|
-thread_checkstack_init(struct thread *thread)
|
|
|
+thread_checkstack_init(struct thread * thread)
|
|
|
{
|
|
|
((uint32_t *)thread->t_stack)[0] = THREAD_STACK_MAGIC;
|
|
|
((uint32_t *)thread->t_stack)[1] = THREAD_STACK_MAGIC;
|
|
@@ -99,9 +100,7 @@ thread_checkstack_init(struct thread *thread)
|
|
|
* cannot be freed (which in turn is the case if the stack is the boot
|
|
|
* stack, and the thread is the boot thread) this doesn't do anything.
|
|
|
*/
|
|
|
-static
|
|
|
-void
|
|
|
-thread_checkstack(struct thread *thread)
|
|
|
+static void thread_checkstack(struct thread * thread)
|
|
|
{
|
|
|
if (thread->t_stack != NULL) {
|
|
|
KASSERT(((uint32_t*)thread->t_stack)[0] == THREAD_STACK_MAGIC);
|
|
@@ -115,21 +114,21 @@ thread_checkstack(struct thread *thread)
|
|
|
* Create a thread. This is used both to create a first thread
|
|
|
* for each CPU and to create subsequent forked threads.
|
|
|
*/
|
|
|
-static
|
|
|
-struct thread *
|
|
|
-thread_create(const char *name)
|
|
|
+static struct thread * thread_create(const char * name)
|
|
|
{
|
|
|
- struct thread *thread;
|
|
|
+ struct thread * thread;
|
|
|
|
|
|
DEBUGASSERT(name != NULL);
|
|
|
|
|
|
thread = kmalloc(sizeof(*thread));
|
|
|
- if (thread == NULL) {
|
|
|
+ if (thread == NULL)
|
|
|
+ {
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
thread->t_name = kstrdup(name);
|
|
|
- if (thread->t_name == NULL) {
|
|
|
+ if (thread->t_name == NULL)
|
|
|
+ {
|
|
|
kfree(thread);
|
|
|
return NULL;
|
|
|
}
|
|
@@ -162,15 +161,15 @@ thread_create(const char *name)
|
|
|
* board config or whatnot) is tracked separately because it is not
|
|
|
* necessarily anything sane or meaningful.
|
|
|
*/
|
|
|
-struct cpu *
|
|
|
-cpu_create(unsigned hardware_number)
|
|
|
+struct cpu * cpu_create(unsigned hardware_number)
|
|
|
{
|
|
|
- struct cpu *c;
|
|
|
+ struct cpu * c;
|
|
|
int result;
|
|
|
char namebuf[16];
|
|
|
|
|
|
c = kmalloc(sizeof(*c));
|
|
|
- if (c == NULL) {
|
|
|
+ if (c == NULL)
|
|
|
+ {
|
|
|
panic("cpu_create: Out of memory\n");
|
|
|
}
|
|
|
|
|
@@ -190,21 +189,25 @@ cpu_create(unsigned hardware_number)
|
|
|
spinlock_init(&c->c_ipi_lock);
|
|
|
|
|
|
result = cpuarray_add(&allcpus, c, &c->c_number);
|
|
|
- if (result != 0) {
|
|
|
+ if (result != 0)
|
|
|
+ {
|
|
|
panic("cpu_create: array_add: %s\n", strerror(result));
|
|
|
}
|
|
|
|
|
|
snprintf(namebuf, sizeof(namebuf), "<boot #%d>", c->c_number);
|
|
|
c->c_curthread = thread_create(namebuf);
|
|
|
- if (c->c_curthread == NULL) {
|
|
|
+ if (c->c_curthread == NULL)
|
|
|
+ {
|
|
|
panic("cpu_create: thread_create failed\n");
|
|
|
}
|
|
|
result = proc_addthread(kproc, c->c_curthread);
|
|
|
- if (result) {
|
|
|
+ if (result)
|
|
|
+ {
|
|
|
panic("cpu_create: proc_addthread:: %s\n", strerror(result));
|
|
|
}
|
|
|
|
|
|
- if (c->c_number == 0) {
|
|
|
+ if (c->c_number == 0)
|
|
|
+ {
|
|
|
/*
|
|
|
* Leave c->c_curthread->t_stack NULL for the boot
|
|
|
* cpu. This means we're using the boot stack, which
|
|
@@ -213,9 +216,11 @@ cpu_create(unsigned hardware_number)
|
|
|
*/
|
|
|
/*c->c_curthread->t_stack = ... */
|
|
|
}
|
|
|
- else {
|
|
|
+ else
|
|
|
+ {
|
|
|
c->c_curthread->t_stack = kmalloc(STACK_SIZE);
|
|
|
- if (c->c_curthread->t_stack == NULL) {
|
|
|
+ if (c->c_curthread->t_stack == NULL)
|
|
|
+ {
|
|
|
panic("cpu_create: couldn't allocate stack");
|
|
|
}
|
|
|
thread_checkstack_init(c->c_curthread);
|
|
@@ -235,9 +240,7 @@ cpu_create(unsigned hardware_number)
|
|
|
*
|
|
|
* (Freeing the stack you're actually using to run is ... inadvisable.)
|
|
|
*/
|
|
|
-static
|
|
|
-void
|
|
|
-thread_destroy(struct thread *thread)
|
|
|
+static void thread_destroy(struct thread * thread)
|
|
|
{
|
|
|
KASSERT(thread != curthread);
|
|
|
KASSERT(thread->t_state != S_RUN);
|
|
@@ -249,7 +252,8 @@ thread_destroy(struct thread *thread)
|
|
|
|
|
|
/* Thread subsystem fields */
|
|
|
KASSERT(thread->t_proc == NULL);
|
|
|
- if (thread->t_stack != NULL) {
|
|
|
+ if (thread->t_stack != NULL)
|
|
|
+ {
|
|
|
kfree(thread->t_stack);
|
|
|
}
|
|
|
threadlistnode_cleanup(&thread->t_listnode);
|
|
@@ -268,13 +272,12 @@ thread_destroy(struct thread *thread)
|
|
|
*
|
|
|
* The list of zombies is per-cpu.
|
|
|
*/
|
|
|
-static
|
|
|
-void
|
|
|
-exorcise(void)
|
|
|
+static void exorcise(void)
|
|
|
{
|
|
|
- struct thread *z;
|
|
|
+ struct thread * z;
|
|
|
|
|
|
- while ((z = threadlist_remhead(&curcpu->c_zombies)) != NULL) {
|
|
|
+ while ((z = threadlist_remhead(&curcpu->c_zombies)) != NULL)
|
|
|
+ {
|
|
|
KASSERT(z != curthread);
|
|
|
KASSERT(z->t_state == S_ZOMBIE);
|
|
|
thread_destroy(z);
|
|
@@ -286,8 +289,7 @@ exorcise(void)
|
|
|
* possible) to make sure we don't end up letting any other threads
|
|
|
* run.
|
|
|
*/
|
|
|
-void
|
|
|
-thread_panic(void)
|
|
|
+void thread_panic(void)
|
|
|
{
|
|
|
/*
|
|
|
* Kill off other CPUs.
|
|
@@ -328,8 +330,7 @@ thread_panic(void)
|
|
|
/*
|
|
|
* At system shutdown, ask the other CPUs to switch off.
|
|
|
*/
|
|
|
-void
|
|
|
-thread_shutdown(void)
|
|
|
+void thread_shutdown(void)
|
|
|
{
|
|
|
/*
|
|
|
* Stop the other CPUs.
|
|
@@ -343,11 +344,10 @@ thread_shutdown(void)
|
|
|
/*
|
|
|
* Thread system initialization.
|
|
|
*/
|
|
|
-void
|
|
|
-thread_bootstrap(void)
|
|
|
+void thread_bootstrap(void)
|
|
|
{
|
|
|
- struct cpu *bootcpu;
|
|
|
- struct thread *bootthread;
|
|
|
+ struct cpu * bootcpu;
|
|
|
+ struct thread * bootthread;
|
|
|
|
|
|
cpuarray_init(&allcpus);
|
|
|
|
|
@@ -391,8 +391,7 @@ thread_bootstrap(void)
|
|
|
* to do anything. The startup thread can just exit; we only need it
|
|
|
* to be able to get into thread_switch() properly.
|
|
|
*/
|
|
|
-void
|
|
|
-cpu_hatch(unsigned software_number)
|
|
|
+void cpu_hatch(unsigned software_number)
|
|
|
{
|
|
|
KASSERT(curcpu != NULL);
|
|
|
KASSERT(curthread != NULL);
|
|
@@ -409,8 +408,7 @@ cpu_hatch(unsigned software_number)
|
|
|
/*
|
|
|
* Start up secondary cpus. Called from boot().
|
|
|
*/
|
|
|
-void
|
|
|
-thread_start_cpus(void)
|
|
|
+void thread_start_cpus(void)
|
|
|
{
|
|
|
unsigned i;
|
|
|
|
|
@@ -419,7 +417,8 @@ thread_start_cpus(void)
|
|
|
cpu_startup_sem = sem_create("cpu_hatch", 0);
|
|
|
mainbus_start_cpus();
|
|
|
|
|
|
- for (i=0; i<cpuarray_num(&allcpus) - 1; i++) {
|
|
|
+ for (i=0; i<cpuarray_num(&allcpus) - 1; i++)
|
|
|
+ {
|
|
|
P(cpu_startup_sem);
|
|
|
}
|
|
|
sem_destroy(cpu_startup_sem);
|
|
@@ -429,29 +428,30 @@ thread_start_cpus(void)
|
|
|
/*
|
|
|
* Make a thread runnable.
|
|
|
*
|
|
|
- * targetcpu might be curcpu; it might not be, too.
|
|
|
+ * targetcpu might be curcpu; it might not be, too.
|
|
|
*/
|
|
|
-static
|
|
|
-void
|
|
|
-thread_make_runnable(struct thread *target, bool already_have_lock)
|
|
|
+static void thread_make_runnable(struct thread * target, bool already_have_lock)
|
|
|
{
|
|
|
- struct cpu *targetcpu;
|
|
|
+ struct cpu * targetcpu;
|
|
|
bool isidle;
|
|
|
|
|
|
/* Lock the run queue of the target thread's cpu. */
|
|
|
targetcpu = target->t_cpu;
|
|
|
|
|
|
- if (already_have_lock) {
|
|
|
+ if (already_have_lock)
|
|
|
+ {
|
|
|
/* The target thread's cpu should be already locked. */
|
|
|
KASSERT(spinlock_do_i_hold(&targetcpu->c_runqueue_lock));
|
|
|
}
|
|
|
- else {
|
|
|
+ else
|
|
|
+ {
|
|
|
spinlock_acquire(&targetcpu->c_runqueue_lock);
|
|
|
}
|
|
|
|
|
|
isidle = targetcpu->c_isidle;
|
|
|
threadlist_addtail(&targetcpu->c_runqueue, target);
|
|
|
- if (isidle) {
|
|
|
+ if (isidle)
|
|
|
+ {
|
|
|
/*
|
|
|
* Other processor is idle; send interrupt to make
|
|
|
* sure it unidles.
|
|
@@ -459,7 +459,8 @@ thread_make_runnable(struct thread *target, bool already_have_lock)
|
|
|
ipi_send(targetcpu, IPI_UNIDLE);
|
|
|
}
|
|
|
|
|
|
- if (!already_have_lock) {
|
|
|
+ if (!already_have_lock)
|
|
|
+ {
|
|
|
spinlock_release(&targetcpu->c_runqueue_lock);
|
|
|
}
|
|
|
}
|
|
@@ -787,7 +788,7 @@ thread_exit(void)
|
|
|
#ifdef UW
|
|
|
/* threads for user processes should have detached from their process
|
|
|
in sys__exit */
|
|
|
- KASSERT(curproc == kproc || curproc == NULL);
|
|
|
+ KASSERT(curproc == kproc || curproc == NULL);
|
|
|
/* kernel threads don't go through sys__exit, so we detach them from kproc here */
|
|
|
if (curproc == kproc) {
|
|
|
proc_remthread(cur);
|