diff --git a/.gitignore b/.gitignore index a32ce6a..64acb72 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ kern/compile/ -build/ +build/* +**/build diff --git a/kern/include/spl.h b/kern/include/spl.h index 2a8d0e3..fe966ce 100644 --- a/kern/include/spl.h +++ b/kern/include/spl.h @@ -74,8 +74,8 @@ int splx(int); /* * Integer interrupt priority levels. */ -#define IPL_NONE 0 -#define IPL_HIGH 1 +#define IPL_NONE 0 +#define IPL_HIGH 1 /* * Lower-level functions for explicitly raising and lowering @@ -92,18 +92,9 @@ void spllower(int oldipl, int newipl); //////////////////////////////////////////////////////////// SPL_INLINE -int -spl0(void) -{ - return splx(IPL_NONE); -} +int spl0(void) { return splx(IPL_NONE); } SPL_INLINE -int -splhigh(void) -{ - return splx(IPL_HIGH); -} - +int splhigh(void) { return splx(IPL_HIGH); } #endif /* _SPL_H_ */ diff --git a/kern/include/synch.h b/kern/include/synch.h index 0431cd3..b1e8d50 100644 --- a/kern/include/synch.h +++ b/kern/include/synch.h @@ -34,7 +34,6 @@ * Header file for synchronization primitives. */ - #include /* @@ -44,10 +43,10 @@ * internally. */ struct semaphore { - char *sem_name; - struct wchan *sem_wchan; - struct spinlock sem_lock; - volatile unsigned sem_count; + char *sem_name; + struct wchan *sem_wchan; + struct spinlock sem_lock; + volatile unsigned sem_count; }; struct semaphore *sem_create(const char *name, unsigned initial_count); @@ -62,7 +61,6 @@ void sem_destroy(struct semaphore *); void P(struct semaphore *); void V(struct semaphore *); - /* * Simple lock for mutual exclusion. * @@ -73,10 +71,10 @@ void V(struct semaphore *); * (should be) made internally. */ struct lock { - char *lk_name; - HANGMAN_LOCKABLE(lk_hangman); /* Deadlock detector hook. */ - // add what you need here - // (don't forget to mark things volatile as needed) + char *lk_name; + HANGMAN_LOCKABLE(lk_hangman); /* Deadlock detector hook. */ + // add what you need here + // (don't forget to mark things volatile as needed) }; struct lock *lock_create(const char *name); @@ -97,7 +95,6 @@ void lock_acquire(struct lock *); void lock_release(struct lock *); bool lock_do_i_hold(struct lock *); - /* * Condition variable. * @@ -113,9 +110,9 @@ bool lock_do_i_hold(struct lock *); */ struct cv { - char *cv_name; - // add what you need here - // (don't forget to mark things volatile as needed) + char *cv_name; + // add what you need here + // (don't forget to mark things volatile as needed) }; struct cv *cv_create(const char *name); @@ -138,5 +135,4 @@ void cv_wait(struct cv *cv, struct lock *lock); void cv_signal(struct cv *cv, struct lock *lock); void cv_broadcast(struct cv *cv, struct lock *lock); - #endif /* _SYNCH_H_ */ diff --git a/kern/lib/kprintf.c b/kern/lib/kprintf.c index 3adc7a3..b2ae049 100644 --- a/kern/lib/kprintf.c +++ b/kern/lib/kprintf.c @@ -37,10 +37,9 @@ #include #include #include -#include // for vfs_sync() +#include // for vfs_sync() #include // for ltrace_stop() - /* Flags word for DEBUG() macro. */ uint32_t dbflags = 0; @@ -50,79 +49,66 @@ static struct lock *kprintf_lock; /* Lock for polled kprintfs */ static struct spinlock kprintf_spinlock; - /* * Warning: all this has to work from interrupt handlers and when * interrupts are disabled. */ - /* * Create the kprintf lock. Must be called before creating a second * thread or enabling a second CPU. */ -void -kprintf_bootstrap(void) -{ - KASSERT(kprintf_lock == NULL); +void kprintf_bootstrap(void) { + KASSERT(kprintf_lock == NULL); - kprintf_lock = lock_create("kprintf_lock"); - if (kprintf_lock == NULL) { - panic("Could not create kprintf_lock\n"); - } - spinlock_init(&kprintf_spinlock); + kprintf_lock = lock_create("kprintf_lock"); + if (kprintf_lock == NULL) { + panic("Could not create kprintf_lock\n"); + } + spinlock_init(&kprintf_spinlock); } /* * Send characters to the console. Backend for __printf. */ -static -void -console_send(void *junk, const char *data, size_t len) -{ - size_t i; +static void console_send(void *junk, const char *data, size_t len) { + size_t i; - (void)junk; + (void)junk; - for (i=0; it_in_interrupt == false - && curthread->t_curspl == 0 - && curcpu->c_spinlocks == 0; + dolock = kprintf_lock != NULL && curthread->t_in_interrupt == false && + curthread->t_curspl == 0 && curcpu->c_spinlocks == 0; - if (dolock) { - lock_acquire(kprintf_lock); - } - else { - spinlock_acquire(&kprintf_spinlock); - } + if (dolock) { + lock_acquire(kprintf_lock); + } else { + spinlock_acquire(&kprintf_spinlock); + } - va_start(ap, fmt); - chars = __vprintf(console_send, NULL, fmt, ap); - va_end(ap); + va_start(ap, fmt); + chars = __vprintf(console_send, NULL, fmt, ap); + va_end(ap); - if (dolock) { - lock_release(kprintf_lock); - } - else { - spinlock_release(&kprintf_spinlock); - } + if (dolock) { + lock_release(kprintf_lock); + } else { + spinlock_release(&kprintf_spinlock); + } - return chars; + return chars; } /* @@ -130,87 +116,83 @@ kprintf(const char *fmt, ...) * passed and then halts the system. */ -void -panic(const char *fmt, ...) -{ - va_list ap; +void panic(const char *fmt, ...) { + va_list ap; - /* - * When we reach panic, the system is usually fairly screwed up. - * It's not entirely uncommon for anything else we try to do - * here to trigger more panics. - * - * This variable makes sure that if we try to do something here, - * and it causes another panic, *that* panic doesn't try again; - * trying again almost inevitably causes infinite recursion. - * - * This is not excessively paranoid - these things DO happen! - */ - static volatile int evil; + /* + * When we reach panic, the system is usually fairly screwed up. + * It's not entirely uncommon for anything else we try to do + * here to trigger more panics. + * + * This variable makes sure that if we try to do something here, + * and it causes another panic, *that* panic doesn't try again; + * trying again almost inevitably causes infinite recursion. + * + * This is not excessively paranoid - these things DO happen! + */ + static volatile int evil; - if (evil == 0) { - evil = 1; + if (evil == 0) { + evil = 1; - /* - * Not only do we not want to be interrupted while - * panicking, but we also want the console to be - * printing in polling mode so as not to do context - * switches. So turn interrupts off on this CPU. - */ - splhigh(); - } + /* + * Not only do we not want to be interrupted while + * panicking, but we also want the console to be + * printing in polling mode so as not to do context + * switches. So turn interrupts off on this CPU. + */ + splhigh(); + } - if (evil == 1) { - evil = 2; + if (evil == 1) { + evil = 2; - /* Kill off other threads and halt other CPUs. */ - thread_panic(); - } + /* Kill off other threads and halt other CPUs. */ + thread_panic(); + } - if (evil == 2) { - evil = 3; + if (evil == 2) { + evil = 3; - /* Print the message. */ - kprintf("panic: "); - va_start(ap, fmt); - __vprintf(console_send, NULL, fmt, ap); - va_end(ap); - } + /* Print the message. */ + kprintf("panic: "); + va_start(ap, fmt); + __vprintf(console_send, NULL, fmt, ap); + va_end(ap); + } - if (evil == 3) { - evil = 4; + if (evil == 3) { + evil = 4; - /* Drop to the debugger. */ - ltrace_stop(0); - } + /* Drop to the debugger. */ + ltrace_stop(0); + } - if (evil == 4) { - evil = 5; + if (evil == 4) { + evil = 5; - /* Try to sync the disks. */ - vfs_sync(); - } + /* Try to sync the disks. */ + vfs_sync(); + } - if (evil == 5) { - evil = 6; + if (evil == 5) { + evil = 6; - /* Shut down or reboot the system. */ - mainbus_panic(); - } + /* Shut down or reboot the system. */ + mainbus_panic(); + } - /* - * Last resort, just in case. - */ + /* + * Last resort, just in case. + */ - for (;;); + for (;;) + ; } /* * Assertion failures go through this. */ -void -badassert(const char *expr, const char *file, int line, const char *func) -{ - panic("Assertion failed: %s, at %s:%d (%s)\n", - expr, file, line, func); +void badassert(const char *expr, const char *file, int line, const char *func) { + panic("Assertion failed: %s, at %s:%d (%s)\n", expr, file, line, func); } diff --git a/kern/main/main.c b/kern/main/main.c index d095480..0061ecb 100644 --- a/kern/main/main.c +++ b/kern/main/main.c @@ -49,8 +49,7 @@ #include #include #include -#include "autoconf.h" // for pseudoconfig - +#include "autoconf.h" // for pseudoconfig /* * These two pieces of data are maintained by the makefiles and build system. @@ -71,92 +70,85 @@ static const char harvard_copyright[] = "Copyright (c) 2000, 2001-2005, 2008-2011, 2013, 2014\n" " President and Fellows of Harvard College. All rights reserved.\n"; - /* * Initial boot sequence. */ -static -void -boot(void) -{ - /* - * The order of these is important! - * Don't go changing it without thinking about the consequences. - * - * Among other things, be aware that console output gets - * buffered up at first and does not actually appear until - * mainbus_bootstrap() attaches the console device. This can - * be remarkably confusing if a bug occurs at this point. So - * don't put new code before mainbus_bootstrap if you don't - * absolutely have to. - * - * Also note that the buffer for this is only 1k. If you - * overflow it, the system will crash without printing - * anything at all. You can make it larger though (it's in - * dev/generic/console.c). - */ +static void boot(void) { + /* + * The order of these is important! + * Don't go changing it without thinking about the consequences. + * + * Among other things, be aware that console output gets + * buffered up at first and does not actually appear until + * mainbus_bootstrap() attaches the console device. This can + * be remarkably confusing if a bug occurs at this point. So + * don't put new code before mainbus_bootstrap if you don't + * absolutely have to. + * + * Also note that the buffer for this is only 1k. If you + * overflow it, the system will crash without printing + * anything at all. You can make it larger though (it's in + * dev/generic/console.c). + */ - kprintf("\n"); - kprintf("OS/161 base system version %s\n", BASE_VERSION); - kprintf("%s", harvard_copyright); - kprintf("\n"); + kprintf("\n"); + kprintf("OS/161 base system version %s\n", BASE_VERSION); + kprintf("%s", harvard_copyright); + kprintf("\n"); - kprintf("Put-your-group-name-here's system version %s (%s #%d)\n", - GROUP_VERSION, buildconfig, buildversion); - kprintf("\n"); + kprintf("Minh Tran's system version %s (%s #%d)\n", GROUP_VERSION, + buildconfig, buildversion); + kprintf("\n"); - /* Early initialization. */ - ram_bootstrap(); - proc_bootstrap(); - thread_bootstrap(); - hardclock_bootstrap(); - vfs_bootstrap(); - kheap_nextgeneration(); + /* Early initialization. */ + ram_bootstrap(); + proc_bootstrap(); + thread_bootstrap(); + hardclock_bootstrap(); + vfs_bootstrap(); + kheap_nextgeneration(); - /* Probe and initialize devices. Interrupts should come on. */ - kprintf("Device probe...\n"); - KASSERT(curthread->t_curspl > 0); - mainbus_bootstrap(); - KASSERT(curthread->t_curspl == 0); - /* Now do pseudo-devices. */ - pseudoconfig(); - kprintf("\n"); - kheap_nextgeneration(); + /* Probe and initialize devices. Interrupts should come on. */ + kprintf("Device probe...\n"); + KASSERT(curthread->t_curspl > 0); + mainbus_bootstrap(); + KASSERT(curthread->t_curspl == 0); + /* Now do pseudo-devices. */ + pseudoconfig(); + kprintf("\n"); + kheap_nextgeneration(); - /* Late phase of initialization. */ - vm_bootstrap(); - kprintf_bootstrap(); - thread_start_cpus(); + /* Late phase of initialization. */ + vm_bootstrap(); + kprintf_bootstrap(); + thread_start_cpus(); - /* Default bootfs - but ignore failure, in case emu0 doesn't exist */ - vfs_setbootfs("emu0"); + /* Default bootfs - but ignore failure, in case emu0 doesn't exist */ + vfs_setbootfs("emu0"); - kheap_nextgeneration(); + kheap_nextgeneration(); - /* - * Make sure various things aren't screwed up. - */ - COMPILE_ASSERT(sizeof(userptr_t) == sizeof(char *)); - COMPILE_ASSERT(sizeof(*(userptr_t)0) == sizeof(char)); + /* + * Make sure various things aren't screwed up. + */ + COMPILE_ASSERT(sizeof(userptr_t) == sizeof(char *)); + COMPILE_ASSERT(sizeof(*(userptr_t)0) == sizeof(char)); } /* * Shutdown sequence. Opposite to boot(). */ -static -void -shutdown(void) -{ +static void shutdown(void) { - kprintf("Shutting down.\n"); + kprintf("Shutting down.\n"); - vfs_clearbootfs(); - vfs_clearcurdir(); - vfs_unmountall(); + vfs_clearbootfs(); + vfs_clearcurdir(); + vfs_unmountall(); - thread_shutdown(); + thread_shutdown(); - splhigh(); + splhigh(); } /*****************************************/ @@ -168,49 +160,45 @@ shutdown(void) * not because this is where system call code should go. Other syscall * code should probably live in the "syscall" directory. */ -int -sys_reboot(int code) -{ - switch (code) { - case RB_REBOOT: - case RB_HALT: - case RB_POWEROFF: - break; - default: - return EINVAL; - } +int sys_reboot(int code) { + switch (code) { + case RB_REBOOT: + case RB_HALT: + case RB_POWEROFF: + break; + default: + return EINVAL; + } - shutdown(); + shutdown(); - switch (code) { - case RB_HALT: - kprintf("The system is halted.\n"); - mainbus_halt(); - break; - case RB_REBOOT: - kprintf("Rebooting...\n"); - mainbus_reboot(); - break; - case RB_POWEROFF: - kprintf("The system is halted.\n"); - mainbus_poweroff(); - break; - } + switch (code) { + case RB_HALT: + kprintf("The system is halted.\n"); + mainbus_halt(); + break; + case RB_REBOOT: + kprintf("Rebooting...\n"); + mainbus_reboot(); + break; + case RB_POWEROFF: + kprintf("The system is halted.\n"); + mainbus_poweroff(); + break; + } - panic("reboot operation failed\n"); - return 0; + panic("reboot operation failed\n"); + return 0; } /* * Kernel main. Boot up, then fork the menu thread; wait for a reboot * request, and then shut down. */ -void -kmain(char *arguments) -{ - boot(); +void kmain(char *arguments) { + boot(); - menu(arguments); + menu(arguments); - /* Should not get here */ + /* Should not get here */ } diff --git a/kern/thread/synch.c b/kern/thread/synch.c index ad58934..d7cf202 100644 --- a/kern/thread/synch.c +++ b/kern/thread/synch.c @@ -44,226 +44,197 @@ // // Semaphore. -struct semaphore * -sem_create(const char *name, unsigned initial_count) -{ - struct semaphore *sem; +struct semaphore *sem_create(const char *name, unsigned initial_count) { + struct semaphore *sem; - sem = kmalloc(sizeof(*sem)); - if (sem == NULL) { - return NULL; - } + sem = kmalloc(sizeof(*sem)); + if (sem == NULL) { + return NULL; + } - sem->sem_name = kstrdup(name); - if (sem->sem_name == NULL) { - kfree(sem); - return NULL; - } + sem->sem_name = kstrdup(name); + if (sem->sem_name == NULL) { + kfree(sem); + return NULL; + } - sem->sem_wchan = wchan_create(sem->sem_name); - if (sem->sem_wchan == NULL) { - kfree(sem->sem_name); - kfree(sem); - return NULL; - } + sem->sem_wchan = wchan_create(sem->sem_name); + if (sem->sem_wchan == NULL) { + kfree(sem->sem_name); + kfree(sem); + return NULL; + } - spinlock_init(&sem->sem_lock); - sem->sem_count = initial_count; + spinlock_init(&sem->sem_lock); + sem->sem_count = initial_count; - return sem; + return sem; } -void -sem_destroy(struct semaphore *sem) -{ - KASSERT(sem != NULL); +void sem_destroy(struct semaphore *sem) { + KASSERT(sem != NULL); - /* wchan_cleanup will assert if anyone's waiting on it */ - spinlock_cleanup(&sem->sem_lock); - wchan_destroy(sem->sem_wchan); - kfree(sem->sem_name); - kfree(sem); + /* wchan_cleanup will assert if anyone's waiting on it */ + spinlock_cleanup(&sem->sem_lock); + wchan_destroy(sem->sem_wchan); + kfree(sem->sem_name); + kfree(sem); } -void -P(struct semaphore *sem) -{ - KASSERT(sem != NULL); +void P(struct semaphore *sem) { + KASSERT(sem != NULL); - /* - * May not block in an interrupt handler. - * - * For robustness, always check, even if we can actually - * complete the P without blocking. - */ - KASSERT(curthread->t_in_interrupt == false); + /* + * May not block in an interrupt handler. + * + * For robustness, always check, even if we can actually + * complete the P without blocking. + */ + KASSERT(curthread->t_in_interrupt == false); - /* Use the semaphore spinlock to protect the wchan as well. */ - spinlock_acquire(&sem->sem_lock); - while (sem->sem_count == 0) { - /* - * - * Note that we don't maintain strict FIFO ordering of - * threads going through the semaphore; that is, we - * might "get" it on the first try even if other - * threads are waiting. Apparently according to some - * textbooks semaphores must for some reason have - * strict ordering. Too bad. :-) - * - * Exercise: how would you implement strict FIFO - * ordering? - */ - wchan_sleep(sem->sem_wchan, &sem->sem_lock); - } - KASSERT(sem->sem_count > 0); - sem->sem_count--; - spinlock_release(&sem->sem_lock); + /* Use the semaphore spinlock to protect the wchan as well. */ + spinlock_acquire(&sem->sem_lock); + while (sem->sem_count == 0) { + /* + * + * Note that we don't maintain strict FIFO ordering of + * threads going through the semaphore; that is, we + * might "get" it on the first try even if other + * threads are waiting. Apparently according to some + * textbooks semaphores must for some reason have + * strict ordering. Too bad. :-) + * + * Exercise: how would you implement strict FIFO + * ordering? + */ + wchan_sleep(sem->sem_wchan, &sem->sem_lock); + } + KASSERT(sem->sem_count > 0); + sem->sem_count--; + spinlock_release(&sem->sem_lock); } -void -V(struct semaphore *sem) -{ - KASSERT(sem != NULL); +void V(struct semaphore *sem) { + KASSERT(sem != NULL); - spinlock_acquire(&sem->sem_lock); + spinlock_acquire(&sem->sem_lock); - sem->sem_count++; - KASSERT(sem->sem_count > 0); - wchan_wakeone(sem->sem_wchan, &sem->sem_lock); + sem->sem_count++; + KASSERT(sem->sem_count > 0); + wchan_wakeone(sem->sem_wchan, &sem->sem_lock); - spinlock_release(&sem->sem_lock); + spinlock_release(&sem->sem_lock); } //////////////////////////////////////////////////////////// // // Lock. -struct lock * -lock_create(const char *name) -{ - struct lock *lock; +struct lock *lock_create(const char *name) { + struct lock *lock; - lock = kmalloc(sizeof(*lock)); - if (lock == NULL) { - return NULL; - } + lock = kmalloc(sizeof(*lock)); + if (lock == NULL) { + return NULL; + } - lock->lk_name = kstrdup(name); - if (lock->lk_name == NULL) { - kfree(lock); - return NULL; - } + lock->lk_name = kstrdup(name); + if (lock->lk_name == NULL) { + kfree(lock); + return NULL; + } - HANGMAN_LOCKABLEINIT(&lock->lk_hangman, lock->lk_name); + HANGMAN_LOCKABLEINIT(&lock->lk_hangman, lock->lk_name); - // add stuff here as needed + // add stuff here as needed - return lock; + return lock; } -void -lock_destroy(struct lock *lock) -{ - KASSERT(lock != NULL); +void lock_destroy(struct lock *lock) { + KASSERT(lock != NULL); - // add stuff here as needed + // add stuff here as needed - kfree(lock->lk_name); - kfree(lock); + kfree(lock->lk_name); + kfree(lock); } -void -lock_acquire(struct lock *lock) -{ - /* Call this (atomically) before waiting for a lock */ - //HANGMAN_WAIT(&curthread->t_hangman, &lock->lk_hangman); +void lock_acquire(struct lock *lock) { + /* Call this (atomically) before waiting for a lock */ + // HANGMAN_WAIT(&curthread->t_hangman, &lock->lk_hangman); - // Write this + // Write this - (void)lock; // suppress warning until code gets written + (void)lock; // suppress warning until code gets written - /* Call this (atomically) once the lock is acquired */ - //HANGMAN_ACQUIRE(&curthread->t_hangman, &lock->lk_hangman); + /* Call this (atomically) once the lock is acquired */ + // HANGMAN_ACQUIRE(&curthread->t_hangman, &lock->lk_hangman); } -void -lock_release(struct lock *lock) -{ - /* Call this (atomically) when the lock is released */ - //HANGMAN_RELEASE(&curthread->t_hangman, &lock->lk_hangman); +void lock_release(struct lock *lock) { + /* Call this (atomically) when the lock is released */ + // HANGMAN_RELEASE(&curthread->t_hangman, &lock->lk_hangman); - // Write this + // Write this - (void)lock; // suppress warning until code gets written + (void)lock; // suppress warning until code gets written } -bool -lock_do_i_hold(struct lock *lock) -{ - // Write this +bool lock_do_i_hold(struct lock *lock) { + // Write this - (void)lock; // suppress warning until code gets written + (void)lock; // suppress warning until code gets written - return true; // dummy until code gets written + return true; // dummy until code gets written } //////////////////////////////////////////////////////////// // // CV +struct cv *cv_create(const char *name) { + struct cv *cv; -struct cv * -cv_create(const char *name) -{ - struct cv *cv; + cv = kmalloc(sizeof(*cv)); + if (cv == NULL) { + return NULL; + } - cv = kmalloc(sizeof(*cv)); - if (cv == NULL) { - return NULL; - } + cv->cv_name = kstrdup(name); + if (cv->cv_name == NULL) { + kfree(cv); + return NULL; + } - cv->cv_name = kstrdup(name); - if (cv->cv_name==NULL) { - kfree(cv); - return NULL; - } + // add stuff here as needed - // add stuff here as needed - - return cv; + return cv; } -void -cv_destroy(struct cv *cv) -{ - KASSERT(cv != NULL); +void cv_destroy(struct cv *cv) { + KASSERT(cv != NULL); - // add stuff here as needed + // add stuff here as needed - kfree(cv->cv_name); - kfree(cv); + kfree(cv->cv_name); + kfree(cv); } -void -cv_wait(struct cv *cv, struct lock *lock) -{ - // Write this - (void)cv; // suppress warning until code gets written - (void)lock; // suppress warning until code gets written +void cv_wait(struct cv *cv, struct lock *lock) { + // Write this + (void)cv; // suppress warning until code gets written + (void)lock; // suppress warning until code gets written } -void -cv_signal(struct cv *cv, struct lock *lock) -{ - // Write this - (void)cv; // suppress warning until code gets written - (void)lock; // suppress warning until code gets written +void cv_signal(struct cv *cv, struct lock *lock) { + // Write this + (void)cv; // suppress warning until code gets written + (void)lock; // suppress warning until code gets written } -void -cv_broadcast(struct cv *cv, struct lock *lock) -{ - // Write this - (void)cv; // suppress warning until code gets written - (void)lock; // suppress warning until code gets written +void cv_broadcast(struct cv *cv, struct lock *lock) { + // Write this + (void)cv; // suppress warning until code gets written + (void)lock; // suppress warning until code gets written } diff --git a/kern/thread/thread.c b/kern/thread/thread.c index e6fe983..647a412 100644 --- a/kern/thread/thread.c +++ b/kern/thread/thread.c @@ -51,14 +51,13 @@ #include #include - /* Magic number used as a guard value on kernel thread stacks. */ #define THREAD_STACK_MAGIC 0xbaadf00d /* Wait channel. A wchan is protected by an associated, passed-in spinlock. */ struct wchan { - const char *wc_name; /* name for this channel */ - struct threadlist wc_threads; /* list of waiting threads */ + const char *wc_name; /* name for this channel */ + struct threadlist wc_threads; /* list of waiting threads */ }; /* Master array of CPUs. */ @@ -76,14 +75,11 @@ static struct semaphore *cpu_startup_sem; * (sometimes) catch kernel stack overflows. Use thread_checkstack() * to test this. */ -static -void -thread_checkstack_init(struct thread *thread) -{ - ((uint32_t *)thread->t_stack)[0] = THREAD_STACK_MAGIC; - ((uint32_t *)thread->t_stack)[1] = THREAD_STACK_MAGIC; - ((uint32_t *)thread->t_stack)[2] = THREAD_STACK_MAGIC; - ((uint32_t *)thread->t_stack)[3] = THREAD_STACK_MAGIC; +static void thread_checkstack_init(struct thread *thread) { + ((uint32_t *)thread->t_stack)[0] = THREAD_STACK_MAGIC; + ((uint32_t *)thread->t_stack)[1] = THREAD_STACK_MAGIC; + ((uint32_t *)thread->t_stack)[2] = THREAD_STACK_MAGIC; + ((uint32_t *)thread->t_stack)[3] = THREAD_STACK_MAGIC; } /* @@ -96,60 +92,54 @@ thread_checkstack_init(struct thread *thread) * cannot be freed (which in turn is the case if the stack is the boot * stack, and the thread is the boot thread) this doesn't do anything. */ -static -void -thread_checkstack(struct thread *thread) -{ - if (thread->t_stack != NULL) { - KASSERT(((uint32_t*)thread->t_stack)[0] == THREAD_STACK_MAGIC); - KASSERT(((uint32_t*)thread->t_stack)[1] == THREAD_STACK_MAGIC); - KASSERT(((uint32_t*)thread->t_stack)[2] == THREAD_STACK_MAGIC); - KASSERT(((uint32_t*)thread->t_stack)[3] == THREAD_STACK_MAGIC); - } +static void thread_checkstack(struct thread *thread) { + if (thread->t_stack != NULL) { + KASSERT(((uint32_t *)thread->t_stack)[0] == THREAD_STACK_MAGIC); + KASSERT(((uint32_t *)thread->t_stack)[1] == THREAD_STACK_MAGIC); + KASSERT(((uint32_t *)thread->t_stack)[2] == THREAD_STACK_MAGIC); + KASSERT(((uint32_t *)thread->t_stack)[3] == THREAD_STACK_MAGIC); + } } /* * Create a thread. This is used both to create a first thread * for each CPU and to create subsequent forked threads. */ -static -struct thread * -thread_create(const char *name) -{ - struct thread *thread; +static struct thread *thread_create(const char *name) { + struct thread *thread; - DEBUGASSERT(name != NULL); + DEBUGASSERT(name != NULL); - thread = kmalloc(sizeof(*thread)); - if (thread == NULL) { - return NULL; - } + thread = kmalloc(sizeof(*thread)); + if (thread == NULL) { + return NULL; + } - thread->t_name = kstrdup(name); - if (thread->t_name == NULL) { - kfree(thread); - return NULL; - } - thread->t_wchan_name = "NEW"; - thread->t_state = S_READY; + thread->t_name = kstrdup(name); + if (thread->t_name == NULL) { + kfree(thread); + return NULL; + } + thread->t_wchan_name = "NEW"; + thread->t_state = S_READY; - /* Thread subsystem fields */ - thread_machdep_init(&thread->t_machdep); - threadlistnode_init(&thread->t_listnode, thread); - thread->t_stack = NULL; - thread->t_context = NULL; - thread->t_cpu = NULL; - thread->t_proc = NULL; - HANGMAN_ACTORINIT(&thread->t_hangman, thread->t_name); + /* Thread subsystem fields */ + thread_machdep_init(&thread->t_machdep); + threadlistnode_init(&thread->t_listnode, thread); + thread->t_stack = NULL; + thread->t_context = NULL; + thread->t_cpu = NULL; + thread->t_proc = NULL; + HANGMAN_ACTORINIT(&thread->t_hangman, thread->t_name); - /* Interrupt state fields */ - thread->t_in_interrupt = false; - thread->t_curspl = IPL_HIGH; - thread->t_iplhigh_count = 1; /* corresponding to t_curspl */ + /* Interrupt state fields */ + thread->t_in_interrupt = false; + thread->t_curspl = IPL_HIGH; + thread->t_iplhigh_count = 1; /* corresponding to t_curspl */ - /* If you add to struct thread, be sure to initialize here */ + /* If you add to struct thread, be sure to initialize here */ - return thread; + return thread; } /* @@ -160,96 +150,93 @@ thread_create(const char *name) * board config or whatnot) is tracked separately because it is not * necessarily anything sane or meaningful. */ -struct cpu * -cpu_create(unsigned hardware_number) -{ - struct cpu *c; - int result; - char namebuf[16]; +struct cpu *cpu_create(unsigned hardware_number) { + struct cpu *c; + int result; + char namebuf[16]; - c = kmalloc(sizeof(*c)); - if (c == NULL) { - panic("cpu_create: Out of memory\n"); - } + c = kmalloc(sizeof(*c)); + if (c == NULL) { + panic("cpu_create: Out of memory\n"); + } - c->c_self = c; - c->c_hardware_number = hardware_number; + c->c_self = c; + c->c_hardware_number = hardware_number; - c->c_curthread = NULL; - threadlist_init(&c->c_zombies); - c->c_hardclocks = 0; - c->c_spinlocks = 0; + c->c_curthread = NULL; + threadlist_init(&c->c_zombies); + c->c_hardclocks = 0; + c->c_spinlocks = 0; - c->c_isidle = false; - threadlist_init(&c->c_runqueue); - spinlock_init(&c->c_runqueue_lock); + c->c_isidle = false; + threadlist_init(&c->c_runqueue); + spinlock_init(&c->c_runqueue_lock); - c->c_ipi_pending = 0; - c->c_numshootdown = 0; - spinlock_init(&c->c_ipi_lock); + c->c_ipi_pending = 0; + c->c_numshootdown = 0; + spinlock_init(&c->c_ipi_lock); - result = cpuarray_add(&allcpus, c, &c->c_number); - if (result != 0) { - panic("cpu_create: array_add: %s\n", strerror(result)); - } + result = cpuarray_add(&allcpus, c, &c->c_number); + if (result != 0) { + panic("cpu_create: array_add: %s\n", strerror(result)); + } - snprintf(namebuf, sizeof(namebuf), "", c->c_number); - c->c_curthread = thread_create(namebuf); - if (c->c_curthread == NULL) { - panic("cpu_create: thread_create failed\n"); - } - c->c_curthread->t_cpu = c; + snprintf(namebuf, sizeof(namebuf), "", c->c_number); + c->c_curthread = thread_create(namebuf); + if (c->c_curthread == NULL) { + panic("cpu_create: thread_create failed\n"); + } + c->c_curthread->t_cpu = c; - if (c->c_number == 0) { - /* - * Leave c->c_curthread->t_stack NULL for the boot - * cpu. This means we're using the boot stack, which - * can't be freed. (Exercise: what would it take to - * make it possible to free the boot stack?) - */ - /*c->c_curthread->t_stack = ... */ - } - else { - c->c_curthread->t_stack = kmalloc(STACK_SIZE); - if (c->c_curthread->t_stack == NULL) { - panic("cpu_create: couldn't allocate stack"); - } - thread_checkstack_init(c->c_curthread); - } + if (c->c_number == 0) { + /* + * Leave c->c_curthread->t_stack NULL for the boot + * cpu. This means we're using the boot stack, which + * can't be freed. (Exercise: what would it take to + * make it possible to free the boot stack?) + */ + /*c->c_curthread->t_stack = ... */ + } else { + c->c_curthread->t_stack = kmalloc(STACK_SIZE); + if (c->c_curthread->t_stack == NULL) { + panic("cpu_create: couldn't allocate stack"); + } + thread_checkstack_init(c->c_curthread); + } - /* - * If there is no curcpu (or curthread) yet, we are creating - * the first (boot) cpu. Initialize curcpu and curthread as - * early as possible so that other code can take locks without - * exploding. - */ - if (!CURCPU_EXISTS()) { - /* - * Initializing curcpu and curthread is - * machine-dependent because either of curcpu and - * curthread might be defined in terms of the other. - */ - INIT_CURCPU(c, c->c_curthread); + /* + * If there is no curcpu (or curthread) yet, we are creating + * the first (boot) cpu. Initialize curcpu and curthread as + * early as possible so that other code can take locks without + * exploding. + */ + if (!CURCPU_EXISTS()) { + /* + * Initializing curcpu and curthread is + * machine-dependent because either of curcpu and + * curthread might be defined in terms of the other. + */ + INIT_CURCPU(c, c->c_curthread); - /* - * Now make sure both t_cpu and c_curthread are - * set. This might be partially redundant with - * INIT_CURCPU depending on how things are defined. - */ - curthread->t_cpu = curcpu; - curcpu->c_curthread = curthread; - } + /* + * Now make sure both t_cpu and c_curthread are + * set. This might be partially redundant with + * INIT_CURCPU depending on how things are defined. + */ + curthread->t_cpu = curcpu; + curcpu->c_curthread = curthread; + } - HANGMAN_ACTORINIT(&c->c_hangman, "cpu"); + HANGMAN_ACTORINIT(&c->c_hangman, "cpu"); - result = proc_addthread(kproc, c->c_curthread); - if (result) { - panic("cpu_create: proc_addthread:: %s\n", strerror(result)); - } + result = proc_addthread(kproc, c->c_curthread); + if (result) { + panic("cpu_create: proc_addthread:: %s\n", strerror(result)); + } - cpu_machdep_init(c); + cpu_machdep_init(c); - return c; + return c; } /* @@ -260,31 +247,28 @@ cpu_create(unsigned hardware_number) * * (Freeing the stack you're actually using to run is ... inadvisable.) */ -static -void -thread_destroy(struct thread *thread) -{ - KASSERT(thread != curthread); - KASSERT(thread->t_state != S_RUN); +static void thread_destroy(struct thread *thread) { + KASSERT(thread != curthread); + KASSERT(thread->t_state != S_RUN); - /* - * If you add things to struct thread, be sure to clean them up - * either here or in thread_exit(). (And not both...) - */ + /* + * If you add things to struct thread, be sure to clean them up + * either here or in thread_exit(). (And not both...) + */ - /* Thread subsystem fields */ - KASSERT(thread->t_proc == NULL); - if (thread->t_stack != NULL) { - kfree(thread->t_stack); - } - threadlistnode_cleanup(&thread->t_listnode); - thread_machdep_cleanup(&thread->t_machdep); + /* Thread subsystem fields */ + KASSERT(thread->t_proc == NULL); + if (thread->t_stack != NULL) { + kfree(thread->t_stack); + } + threadlistnode_cleanup(&thread->t_listnode); + thread_machdep_cleanup(&thread->t_machdep); - /* sheer paranoia */ - thread->t_wchan_name = "DESTROYED"; + /* sheer paranoia */ + thread->t_wchan_name = "DESTROYED"; - kfree(thread->t_name); - kfree(thread); + kfree(thread->t_name); + kfree(thread); } /* @@ -293,17 +277,14 @@ thread_destroy(struct thread *thread) * * The list of zombies is per-cpu. */ -static -void -exorcise(void) -{ - struct thread *z; +static void exorcise(void) { + struct thread *z; - while ((z = threadlist_remhead(&curcpu->c_zombies)) != NULL) { - KASSERT(z != curthread); - KASSERT(z->t_state == S_ZOMBIE); - thread_destroy(z); - } + while ((z = threadlist_remhead(&curcpu->c_zombies)) != NULL) { + KASSERT(z != curthread); + KASSERT(z->t_state == S_ZOMBIE); + thread_destroy(z); + } } /* @@ -311,87 +292,81 @@ exorcise(void) * possible) to make sure we don't end up letting any other threads * run. */ -void -thread_panic(void) -{ - /* - * Kill off other CPUs. - * - * We could wait for them to stop, except that they might not. - */ - ipi_broadcast(IPI_PANIC); +void thread_panic(void) { + /* + * Kill off other CPUs. + * + * We could wait for them to stop, except that they might not. + */ + ipi_broadcast(IPI_PANIC); - /* - * Drop runnable threads on the floor. - * - * Don't try to get the run queue lock; we might not be able - * to. Instead, blat the list structure by hand, and take the - * risk that it might not be quite atomic. - */ - curcpu->c_runqueue.tl_count = 0; - curcpu->c_runqueue.tl_head.tln_next = &curcpu->c_runqueue.tl_tail; - curcpu->c_runqueue.tl_tail.tln_prev = &curcpu->c_runqueue.tl_head; + /* + * Drop runnable threads on the floor. + * + * Don't try to get the run queue lock; we might not be able + * to. Instead, blat the list structure by hand, and take the + * risk that it might not be quite atomic. + */ + curcpu->c_runqueue.tl_count = 0; + curcpu->c_runqueue.tl_head.tln_next = &curcpu->c_runqueue.tl_tail; + curcpu->c_runqueue.tl_tail.tln_prev = &curcpu->c_runqueue.tl_head; - /* - * Ideally, we want to make sure sleeping threads don't wake - * up and start running. However, there's no good way to track - * down all the wchans floating around the system. Another - * alternative would be to set a global flag to make the wchan - * wakeup operations do nothing; but that would mean we - * ourselves couldn't sleep to wait for an I/O completion - * interrupt, and we'd like to be able to do that if the - * system isn't that badly hosed. - * - * So, do nothing else here. - * - * This may prove inadequate in practice and further steps - * might be needed. It may also be necessary to go through and - * forcibly unlock all locks or the like... - */ + /* + * Ideally, we want to make sure sleeping threads don't wake + * up and start running. However, there's no good way to track + * down all the wchans floating around the system. Another + * alternative would be to set a global flag to make the wchan + * wakeup operations do nothing; but that would mean we + * ourselves couldn't sleep to wait for an I/O completion + * interrupt, and we'd like to be able to do that if the + * system isn't that badly hosed. + * + * So, do nothing else here. + * + * This may prove inadequate in practice and further steps + * might be needed. It may also be necessary to go through and + * forcibly unlock all locks or the like... + */ } /* * At system shutdown, ask the other CPUs to switch off. */ -void -thread_shutdown(void) -{ - /* - * Stop the other CPUs. - * - * We should probably wait for them to stop and shut them off - * on the system board. - */ - ipi_broadcast(IPI_OFFLINE); +void thread_shutdown(void) { + /* + * Stop the other CPUs. + * + * We should probably wait for them to stop and shut them off + * on the system board. + */ + ipi_broadcast(IPI_OFFLINE); } /* * Thread system initialization. */ -void -thread_bootstrap(void) -{ - cpuarray_init(&allcpus); +void thread_bootstrap(void) { + cpuarray_init(&allcpus); - /* - * Create the cpu structure for the bootup CPU, the one we're - * currently running on. Assume the hardware number is 0; that - * might be updated later by mainbus-type code. This also - * creates a thread structure for the first thread, the one - * that's already implicitly running when the kernel is - * started from the bootloader. - */ - KASSERT(CURCPU_EXISTS() == false); - (void)cpu_create(0); - KASSERT(CURCPU_EXISTS() == true); + /* + * Create the cpu structure for the bootup CPU, the one we're + * currently running on. Assume the hardware number is 0; that + * might be updated later by mainbus-type code. This also + * creates a thread structure for the first thread, the one + * that's already implicitly running when the kernel is + * started from the bootloader. + */ + KASSERT(CURCPU_EXISTS() == false); + (void)cpu_create(0); + KASSERT(CURCPU_EXISTS() == true); - /* cpu_create() should also have set t_proc. */ - KASSERT(curcpu != NULL); - KASSERT(curthread != NULL); - KASSERT(curthread->t_proc != NULL); - KASSERT(curthread->t_proc == kproc); + /* cpu_create() should also have set t_proc. */ + KASSERT(curcpu != NULL); + KASSERT(curthread != NULL); + KASSERT(curthread->t_proc != NULL); + KASSERT(curthread->t_proc == kproc); - /* Done */ + /* Done */ } /* @@ -402,44 +377,40 @@ thread_bootstrap(void) * to do anything. The startup thread can just exit; we only need it * to be able to get into thread_switch() properly. */ -void -cpu_hatch(unsigned software_number) -{ - char buf[64]; +void cpu_hatch(unsigned software_number) { + char buf[64]; - KASSERT(curcpu != NULL); - KASSERT(curthread != NULL); - KASSERT(curcpu->c_number == software_number); + KASSERT(curcpu != NULL); + KASSERT(curthread != NULL); + KASSERT(curcpu->c_number == software_number); - spl0(); - cpu_identify(buf, sizeof(buf)); + spl0(); + cpu_identify(buf, sizeof(buf)); - kprintf("cpu%u: %s\n", software_number, buf); + kprintf("cpu%u: %s\n", software_number, buf); - V(cpu_startup_sem); - thread_exit(); + V(cpu_startup_sem); + thread_exit(); } /* * Start up secondary cpus. Called from boot(). */ -void -thread_start_cpus(void) -{ - char buf[64]; - unsigned i; +void thread_start_cpus(void) { + char buf[64]; + unsigned i; - cpu_identify(buf, sizeof(buf)); - kprintf("cpu0: %s\n", buf); + cpu_identify(buf, sizeof(buf)); + kprintf("cpu0: %s\n", buf); - cpu_startup_sem = sem_create("cpu_hatch", 0); - mainbus_start_cpus(); + cpu_startup_sem = sem_create("cpu_hatch", 0); + mainbus_start_cpus(); - for (i=0; it_cpu; + /* Lock the run queue of the target thread's cpu. */ + targetcpu = target->t_cpu; - if (already_have_lock) { - /* The target thread's cpu should be already locked. */ - KASSERT(spinlock_do_i_hold(&targetcpu->c_runqueue_lock)); - } - else { - spinlock_acquire(&targetcpu->c_runqueue_lock); - } + if (already_have_lock) { + /* The target thread's cpu should be already locked. */ + KASSERT(spinlock_do_i_hold(&targetcpu->c_runqueue_lock)); + } else { + spinlock_acquire(&targetcpu->c_runqueue_lock); + } - /* Target thread is now ready to run; put it on the run queue. */ - target->t_state = S_READY; - threadlist_addtail(&targetcpu->c_runqueue, target); + /* Target thread is now ready to run; put it on the run queue. */ + target->t_state = S_READY; + threadlist_addtail(&targetcpu->c_runqueue, target); - if (targetcpu->c_isidle && targetcpu != curcpu->c_self) { - /* - * Other processor is idle; send interrupt to make - * sure it unidles. - */ - ipi_send(targetcpu, IPI_UNIDLE); - } + if (targetcpu->c_isidle && targetcpu != curcpu->c_self) { + /* + * Other processor is idle; send interrupt to make + * sure it unidles. + */ + ipi_send(targetcpu, IPI_UNIDLE); + } - if (!already_have_lock) { - spinlock_release(&targetcpu->c_runqueue_lock); - } + if (!already_have_lock) { + spinlock_release(&targetcpu->c_runqueue_lock); + } } /* @@ -491,60 +459,57 @@ thread_make_runnable(struct thread *target, bool already_have_lock) * process is inherited from the caller. It will start on the same CPU * as the caller, unless the scheduler intervenes first. */ -int -thread_fork(const char *name, - struct proc *proc, - void (*entrypoint)(void *data1, unsigned long data2), - void *data1, unsigned long data2) -{ - struct thread *newthread; - int result; +int thread_fork(const char *name, struct proc *proc, + void (*entrypoint)(void *data1, unsigned long data2), + void *data1, unsigned long data2) { + struct thread *newthread; + int result; - newthread = thread_create(name); - if (newthread == NULL) { - return ENOMEM; - } + newthread = thread_create(name); + if (newthread == NULL) { + return ENOMEM; + } - /* Allocate a stack */ - newthread->t_stack = kmalloc(STACK_SIZE); - if (newthread->t_stack == NULL) { - thread_destroy(newthread); - return ENOMEM; - } - thread_checkstack_init(newthread); + /* Allocate a stack */ + newthread->t_stack = kmalloc(STACK_SIZE); + if (newthread->t_stack == NULL) { + thread_destroy(newthread); + return ENOMEM; + } + thread_checkstack_init(newthread); - /* - * Now we clone various fields from the parent thread. - */ + /* + * Now we clone various fields from the parent thread. + */ - /* Thread subsystem fields */ - newthread->t_cpu = curthread->t_cpu; + /* Thread subsystem fields */ + newthread->t_cpu = curthread->t_cpu; - /* Attach the new thread to its process */ - if (proc == NULL) { - proc = curthread->t_proc; - } - result = proc_addthread(proc, newthread); - if (result) { - /* thread_destroy will clean up the stack */ - thread_destroy(newthread); - return result; - } + /* Attach the new thread to its process */ + if (proc == NULL) { + proc = curthread->t_proc; + } + result = proc_addthread(proc, newthread); + if (result) { + /* thread_destroy will clean up the stack */ + thread_destroy(newthread); + return result; + } - /* - * Because new threads come out holding the cpu runqueue lock - * (see notes at bottom of thread_switch), we need to account - * for the spllower() that will be done releasing it. - */ - newthread->t_iplhigh_count++; + /* + * Because new threads come out holding the cpu runqueue lock + * (see notes at bottom of thread_switch), we need to account + * for the spllower() that will be done releasing it. + */ + newthread->t_iplhigh_count++; - /* Set up the switchframe so entrypoint() gets called */ - switchframe_init(newthread, entrypoint, data1, data2); + /* Set up the switchframe so entrypoint() gets called */ + switchframe_init(newthread, entrypoint, data1, data2); - /* Lock the current cpu's run queue and make the new thread runnable */ - thread_make_runnable(newthread, false); + /* Lock the current cpu's run queue and make the new thread runnable */ + thread_make_runnable(newthread, false); - return 0; + return 0; } /* @@ -557,174 +522,171 @@ thread_fork(const char *name, * WC, protected by the spinlock LK. Otherwise WC and Lk should be * NULL. */ -static -void -thread_switch(threadstate_t newstate, struct wchan *wc, struct spinlock *lk) -{ - struct thread *cur, *next; - int spl; +static void thread_switch(threadstate_t newstate, struct wchan *wc, + struct spinlock *lk) { + struct thread *cur, *next; + int spl; - DEBUGASSERT(curcpu->c_curthread == curthread); - DEBUGASSERT(curthread->t_cpu == curcpu->c_self); + DEBUGASSERT(curcpu->c_curthread == curthread); + DEBUGASSERT(curthread->t_cpu == curcpu->c_self); - /* Explicitly disable interrupts on this processor */ - spl = splhigh(); + /* Explicitly disable interrupts on this processor */ + spl = splhigh(); - cur = curthread; + cur = curthread; - /* - * If we're idle, return without doing anything. This happens - * when the timer interrupt interrupts the idle loop. - */ - if (curcpu->c_isidle) { - splx(spl); - return; - } + /* + * If we're idle, return without doing anything. This happens + * when the timer interrupt interrupts the idle loop. + */ + if (curcpu->c_isidle) { + splx(spl); + return; + } - /* Check the stack guard band. */ - thread_checkstack(cur); + /* Check the stack guard band. */ + thread_checkstack(cur); - /* Lock the run queue. */ - spinlock_acquire(&curcpu->c_runqueue_lock); + /* Lock the run queue. */ + spinlock_acquire(&curcpu->c_runqueue_lock); - /* Micro-optimization: if nothing to do, just return */ - if (newstate == S_READY && threadlist_isempty(&curcpu->c_runqueue)) { - spinlock_release(&curcpu->c_runqueue_lock); - splx(spl); - return; - } + /* Micro-optimization: if nothing to do, just return */ + if (newstate == S_READY && threadlist_isempty(&curcpu->c_runqueue)) { + spinlock_release(&curcpu->c_runqueue_lock); + splx(spl); + return; + } - /* Put the thread in the right place. */ - switch (newstate) { - case S_RUN: - panic("Illegal S_RUN in thread_switch\n"); - case S_READY: - thread_make_runnable(cur, true /*have lock*/); - break; - case S_SLEEP: - cur->t_wchan_name = wc->wc_name; - /* - * Add the thread to the list in the wait channel, and - * unlock same. To avoid a race with someone else - * calling wchan_wake*, we must keep the wchan's - * associated spinlock locked from the point the - * caller of wchan_sleep locked it until the thread is - * on the list. - */ - threadlist_addtail(&wc->wc_threads, cur); - spinlock_release(lk); - break; - case S_ZOMBIE: - cur->t_wchan_name = "ZOMBIE"; - threadlist_addtail(&curcpu->c_zombies, cur); - break; - } - cur->t_state = newstate; + /* Put the thread in the right place. */ + switch (newstate) { + case S_RUN: + panic("Illegal S_RUN in thread_switch\n"); + case S_READY: + thread_make_runnable(cur, true /*have lock*/); + break; + case S_SLEEP: + cur->t_wchan_name = wc->wc_name; + /* + * Add the thread to the list in the wait channel, and + * unlock same. To avoid a race with someone else + * calling wchan_wake*, we must keep the wchan's + * associated spinlock locked from the point the + * caller of wchan_sleep locked it until the thread is + * on the list. + */ + threadlist_addtail(&wc->wc_threads, cur); + spinlock_release(lk); + break; + case S_ZOMBIE: + cur->t_wchan_name = "ZOMBIE"; + threadlist_addtail(&curcpu->c_zombies, cur); + break; + } + cur->t_state = newstate; - /* - * Get the next thread. While there isn't one, call cpu_idle(). - * curcpu->c_isidle must be true when cpu_idle is - * called. Unlock the runqueue while idling too, to make sure - * things can be added to it. - * - * Note that we don't need to unlock the runqueue atomically - * with idling; becoming unidle requires receiving an - * interrupt (either a hardware interrupt or an interprocessor - * interrupt from another cpu posting a wakeup) and idling - * *is* atomic with respect to re-enabling interrupts. - * - * Note that c_isidle becomes true briefly even if we don't go - * idle. However, because one is supposed to hold the runqueue - * lock to look at it, this should not be visible or matter. - */ + /* + * Get the next thread. While there isn't one, call cpu_idle(). + * curcpu->c_isidle must be true when cpu_idle is + * called. Unlock the runqueue while idling too, to make sure + * things can be added to it. + * + * Note that we don't need to unlock the runqueue atomically + * with idling; becoming unidle requires receiving an + * interrupt (either a hardware interrupt or an interprocessor + * interrupt from another cpu posting a wakeup) and idling + * *is* atomic with respect to re-enabling interrupts. + * + * Note that c_isidle becomes true briefly even if we don't go + * idle. However, because one is supposed to hold the runqueue + * lock to look at it, this should not be visible or matter. + */ - /* The current cpu is now idle. */ - curcpu->c_isidle = true; - do { - next = threadlist_remhead(&curcpu->c_runqueue); - if (next == NULL) { - spinlock_release(&curcpu->c_runqueue_lock); - cpu_idle(); - spinlock_acquire(&curcpu->c_runqueue_lock); - } - } while (next == NULL); - curcpu->c_isidle = false; + /* The current cpu is now idle. */ + curcpu->c_isidle = true; + do { + next = threadlist_remhead(&curcpu->c_runqueue); + if (next == NULL) { + spinlock_release(&curcpu->c_runqueue_lock); + cpu_idle(); + spinlock_acquire(&curcpu->c_runqueue_lock); + } + } while (next == NULL); + curcpu->c_isidle = false; - /* - * Note that curcpu->c_curthread may be the same variable as - * curthread and it may not be, depending on how curthread and - * curcpu are defined by the MD code. We'll assign both and - * assume the compiler will optimize one away if they're the - * same. - */ - curcpu->c_curthread = next; - curthread = next; + /* + * Note that curcpu->c_curthread may be the same variable as + * curthread and it may not be, depending on how curthread and + * curcpu are defined by the MD code. We'll assign both and + * assume the compiler will optimize one away if they're the + * same. + */ + curcpu->c_curthread = next; + curthread = next; - /* do the switch (in assembler in switch.S) */ - switchframe_switch(&cur->t_context, &next->t_context); + /* do the switch (in assembler in switch.S) */ + switchframe_switch(&cur->t_context, &next->t_context); - /* - * When we get to this point we are either running in the next - * thread, or have come back to the same thread again, - * depending on how you look at it. That is, - * switchframe_switch returns immediately in another thread - * context, which in general will be executing here with a - * different stack and different values in the local - * variables. (Although new threads go to thread_startup - * instead.) But, later on when the processor, or some - * processor, comes back to the previous thread, it's also - * executing here with the *same* value in the local - * variables. - * - * The upshot, however, is as follows: - * - * - The thread now currently running is "cur", not "next", - * because when we return from switchrame_switch on the - * same stack, we're back to the thread that - * switchframe_switch call switched away from, which is - * "cur". - * - * - "cur" is _not_ the thread that just *called* - * switchframe_switch. - * - * - If newstate is S_ZOMB we never get back here in that - * context at all. - * - * - If the thread just chosen to run ("next") was a new - * thread, we don't get to this code again until - * *another* context switch happens, because when new - * threads return from switchframe_switch they teleport - * to thread_startup. - * - * - At this point the thread whose stack we're now on may - * have been migrated to another cpu since it last ran. - * - * The above is inherently confusing and will probably take a - * while to get used to. - * - * However, the important part is that code placed here, after - * the call to switchframe_switch, does not necessarily run on - * every context switch. Thus any such code must be either - * skippable on some switches or also called from - * thread_startup. - */ + /* + * When we get to this point we are either running in the next + * thread, or have come back to the same thread again, + * depending on how you look at it. That is, + * switchframe_switch returns immediately in another thread + * context, which in general will be executing here with a + * different stack and different values in the local + * variables. (Although new threads go to thread_startup + * instead.) But, later on when the processor, or some + * processor, comes back to the previous thread, it's also + * executing here with the *same* value in the local + * variables. + * + * The upshot, however, is as follows: + * + * - The thread now currently running is "cur", not "next", + * because when we return from switchrame_switch on the + * same stack, we're back to the thread that + * switchframe_switch call switched away from, which is + * "cur". + * + * - "cur" is _not_ the thread that just *called* + * switchframe_switch. + * + * - If newstate is S_ZOMB we never get back here in that + * context at all. + * + * - If the thread just chosen to run ("next") was a new + * thread, we don't get to this code again until + * *another* context switch happens, because when new + * threads return from switchframe_switch they teleport + * to thread_startup. + * + * - At this point the thread whose stack we're now on may + * have been migrated to another cpu since it last ran. + * + * The above is inherently confusing and will probably take a + * while to get used to. + * + * However, the important part is that code placed here, after + * the call to switchframe_switch, does not necessarily run on + * every context switch. Thus any such code must be either + * skippable on some switches or also called from + * thread_startup. + */ + /* Clear the wait channel and set the thread state. */ + cur->t_wchan_name = NULL; + cur->t_state = S_RUN; - /* Clear the wait channel and set the thread state. */ - cur->t_wchan_name = NULL; - cur->t_state = S_RUN; + /* Unlock the run queue. */ + spinlock_release(&curcpu->c_runqueue_lock); - /* Unlock the run queue. */ - spinlock_release(&curcpu->c_runqueue_lock); + /* Activate our address space in the MMU. */ + as_activate(); - /* Activate our address space in the MMU. */ - as_activate(); + /* Clean up dead threads. */ + exorcise(); - /* Clean up dead threads. */ - exorcise(); - - /* Turn interrupts back on. */ - splx(spl); + /* Turn interrupts back on. */ + splx(spl); } /* @@ -735,35 +697,33 @@ thread_switch(threadstate_t newstate, struct wchan *wc, struct spinlock *lk) * thread_switch, the beginning part of this function must match the * tail of thread_switch. */ -void -thread_startup(void (*entrypoint)(void *data1, unsigned long data2), - void *data1, unsigned long data2) -{ - struct thread *cur; +void thread_startup(void (*entrypoint)(void *data1, unsigned long data2), + void *data1, unsigned long data2) { + struct thread *cur; - cur = curthread; + cur = curthread; - /* Clear the wait channel and set the thread state. */ - cur->t_wchan_name = NULL; - cur->t_state = S_RUN; + /* Clear the wait channel and set the thread state. */ + cur->t_wchan_name = NULL; + cur->t_state = S_RUN; - /* Release the runqueue lock acquired in thread_switch. */ - spinlock_release(&curcpu->c_runqueue_lock); + /* Release the runqueue lock acquired in thread_switch. */ + spinlock_release(&curcpu->c_runqueue_lock); - /* Activate our address space in the MMU. */ - as_activate(); + /* Activate our address space in the MMU. */ + as_activate(); - /* Clean up dead threads. */ - exorcise(); + /* Clean up dead threads. */ + exorcise(); - /* Enable interrupts. */ - spl0(); + /* Enable interrupts. */ + spl0(); - /* Call the function. */ - entrypoint(data1, data2); + /* Call the function. */ + entrypoint(data1, data2); - /* Done. */ - thread_exit(); + /* Done. */ + thread_exit(); } /* @@ -775,39 +735,33 @@ thread_startup(void (*entrypoint)(void *data1, unsigned long data2), * * Does not return. */ -void -thread_exit(void) -{ - struct thread *cur; +void thread_exit(void) { + struct thread *cur; - cur = curthread; + cur = curthread; - /* - * Detach from our process. You might need to move this action - * around, depending on how your wait/exit works. - */ - proc_remthread(cur); + /* + * Detach from our process. You might need to move this action + * around, depending on how your wait/exit works. + */ + proc_remthread(cur); - /* Make sure we *are* detached (move this only if you're sure!) */ - KASSERT(cur->t_proc == NULL); + /* Make sure we *are* detached (move this only if you're sure!) */ + KASSERT(cur->t_proc == NULL); - /* Check the stack guard band. */ - thread_checkstack(cur); + /* Check the stack guard band. */ + thread_checkstack(cur); - /* Interrupts off on this processor */ - splhigh(); - thread_switch(S_ZOMBIE, NULL, NULL); - panic("braaaaaaaiiiiiiiiiiinssssss\n"); + /* Interrupts off on this processor */ + splhigh(); + thread_switch(S_ZOMBIE, NULL, NULL); + panic("braaaaaaaiiiiiiiiiiinssssss\n"); } /* * Yield the cpu to another process, but stay runnable. */ -void -thread_yield(void) -{ - thread_switch(S_READY, NULL, NULL); -} +void thread_yield(void) { thread_switch(S_READY, NULL, NULL); } //////////////////////////////////////////////////////////// @@ -818,13 +772,11 @@ thread_yield(void) * the current CPU's run queue by job priority. */ -void -schedule(void) -{ - /* - * You can write this. If we do nothing, threads will run in - * round-robin fashion. - */ +void schedule(void) { + /* + * You can write this. If we do nothing, threads will run in + * round-robin fashion. + */ } /* @@ -844,109 +796,106 @@ schedule(void) * System/161 does not (yet) model such cache effects, we'll be very * aggressive. */ -void -thread_consider_migration(void) -{ - unsigned my_count, total_count, one_share, to_send; - unsigned i, numcpus; - struct cpu *c; - struct threadlist victims; - struct thread *t; +void thread_consider_migration(void) { + unsigned my_count, total_count, one_share, to_send; + unsigned i, numcpus; + struct cpu *c; + struct threadlist victims; + struct thread *t; - my_count = total_count = 0; - numcpus = cpuarray_num(&allcpus); - for (i=0; ic_runqueue_lock); - total_count += c->c_runqueue.tl_count; - if (c == curcpu->c_self) { - my_count = c->c_runqueue.tl_count; - } - spinlock_release(&c->c_runqueue_lock); - } + my_count = total_count = 0; + numcpus = cpuarray_num(&allcpus); + for (i = 0; i < numcpus; i++) { + c = cpuarray_get(&allcpus, i); + spinlock_acquire(&c->c_runqueue_lock); + total_count += c->c_runqueue.tl_count; + if (c == curcpu->c_self) { + my_count = c->c_runqueue.tl_count; + } + spinlock_release(&c->c_runqueue_lock); + } - one_share = DIVROUNDUP(total_count, numcpus); - if (my_count < one_share) { - return; - } + one_share = DIVROUNDUP(total_count, numcpus); + if (my_count < one_share) { + return; + } - to_send = my_count - one_share; - threadlist_init(&victims); - spinlock_acquire(&curcpu->c_runqueue_lock); - for (i=0; ic_runqueue); - threadlist_addhead(&victims, t); - } - spinlock_release(&curcpu->c_runqueue_lock); + to_send = my_count - one_share; + threadlist_init(&victims); + spinlock_acquire(&curcpu->c_runqueue_lock); + for (i = 0; i < to_send; i++) { + t = threadlist_remtail(&curcpu->c_runqueue); + threadlist_addhead(&victims, t); + } + spinlock_release(&curcpu->c_runqueue_lock); - for (i=0; i < numcpus && to_send > 0; i++) { - c = cpuarray_get(&allcpus, i); - if (c == curcpu->c_self) { - continue; - } - spinlock_acquire(&c->c_runqueue_lock); - while (c->c_runqueue.tl_count < one_share && to_send > 0) { - t = threadlist_remhead(&victims); - /* - * Ordinarily, curthread will not appear on - * the run queue. However, it can under the - * following circumstances: - * - it went to sleep; - * - the processor became idle, so it - * remained curthread; - * - it was reawakened, so it was put on the - * run queue; - * - and the processor hasn't fully unidled - * yet, so all these things are still true. - * - * If the timer interrupt happens at (almost) - * exactly the proper moment, we can come here - * while things are in this state and see - * curthread. However, *migrating* curthread - * can cause bad things to happen (Exercise: - * Why? And what?) so shuffle it to the end of - * the list and decrement to_send in order to - * skip it. Then it goes back on our own run - * queue below. - */ - if (t == curthread) { - threadlist_addtail(&victims, t); - to_send--; - continue; - } + for (i = 0; i < numcpus && to_send > 0; i++) { + c = cpuarray_get(&allcpus, i); + if (c == curcpu->c_self) { + continue; + } + spinlock_acquire(&c->c_runqueue_lock); + while (c->c_runqueue.tl_count < one_share && to_send > 0) { + t = threadlist_remhead(&victims); + /* + * Ordinarily, curthread will not appear on + * the run queue. However, it can under the + * following circumstances: + * - it went to sleep; + * - the processor became idle, so it + * remained curthread; + * - it was reawakened, so it was put on the + * run queue; + * - and the processor hasn't fully unidled + * yet, so all these things are still true. + * + * If the timer interrupt happens at (almost) + * exactly the proper moment, we can come here + * while things are in this state and see + * curthread. However, *migrating* curthread + * can cause bad things to happen (Exercise: + * Why? And what?) so shuffle it to the end of + * the list and decrement to_send in order to + * skip it. Then it goes back on our own run + * queue below. + */ + if (t == curthread) { + threadlist_addtail(&victims, t); + to_send--; + continue; + } - t->t_cpu = c; - threadlist_addtail(&c->c_runqueue, t); - DEBUG(DB_THREADS, - "Migrated thread %s: cpu %u -> %u", - t->t_name, curcpu->c_number, c->c_number); - to_send--; - if (c->c_isidle) { - /* - * Other processor is idle; send - * interrupt to make sure it unidles. - */ - ipi_send(c, IPI_UNIDLE); - } - } - spinlock_release(&c->c_runqueue_lock); - } + t->t_cpu = c; + threadlist_addtail(&c->c_runqueue, t); + DEBUG(DB_THREADS, "Migrated thread %s: cpu %u -> %u", t->t_name, + curcpu->c_number, c->c_number); + to_send--; + if (c->c_isidle) { + /* + * Other processor is idle; send + * interrupt to make sure it unidles. + */ + ipi_send(c, IPI_UNIDLE); + } + } + spinlock_release(&c->c_runqueue_lock); + } - /* - * Because the code above isn't atomic, the thread counts may have - * changed while we were working and we may end up with leftovers. - * Don't panic; just put them back on our own run queue. - */ - if (!threadlist_isempty(&victims)) { - spinlock_acquire(&curcpu->c_runqueue_lock); - while ((t = threadlist_remhead(&victims)) != NULL) { - threadlist_addtail(&curcpu->c_runqueue, t); - } - spinlock_release(&curcpu->c_runqueue_lock); - } + /* + * Because the code above isn't atomic, the thread counts may have + * changed while we were working and we may end up with leftovers. + * Don't panic; just put them back on our own run queue. + */ + if (!threadlist_isempty(&victims)) { + spinlock_acquire(&curcpu->c_runqueue_lock); + while ((t = threadlist_remhead(&victims)) != NULL) { + threadlist_addtail(&curcpu->c_runqueue, t); + } + spinlock_release(&curcpu->c_runqueue_lock); + } - KASSERT(threadlist_isempty(&victims)); - threadlist_cleanup(&victims); + KASSERT(threadlist_isempty(&victims)); + threadlist_cleanup(&victims); } //////////////////////////////////////////////////////////// @@ -963,30 +912,26 @@ thread_consider_migration(void) * arrangements should be made to free it after the wait channel is * destroyed. */ -struct wchan * -wchan_create(const char *name) -{ - struct wchan *wc; +struct wchan *wchan_create(const char *name) { + struct wchan *wc; - wc = kmalloc(sizeof(*wc)); - if (wc == NULL) { - return NULL; - } - threadlist_init(&wc->wc_threads); - wc->wc_name = name; + wc = kmalloc(sizeof(*wc)); + if (wc == NULL) { + return NULL; + } + threadlist_init(&wc->wc_threads); + wc->wc_name = name; - return wc; + return wc; } /* * Destroy a wait channel. Must be empty and unlocked. * (The corresponding cleanup functions require this.) */ -void -wchan_destroy(struct wchan *wc) -{ - threadlist_cleanup(&wc->wc_threads); - kfree(wc); +void wchan_destroy(struct wchan *wc) { + threadlist_cleanup(&wc->wc_threads); + kfree(wc); } /* @@ -996,97 +941,89 @@ wchan_destroy(struct wchan *wc) * be locked. The call to thread_switch unlocks it; we relock it * before returning. */ -void -wchan_sleep(struct wchan *wc, struct spinlock *lk) -{ - /* may not sleep in an interrupt handler */ - KASSERT(!curthread->t_in_interrupt); +void wchan_sleep(struct wchan *wc, struct spinlock *lk) { + /* may not sleep in an interrupt handler */ + KASSERT(!curthread->t_in_interrupt); - /* must hold the spinlock */ - KASSERT(spinlock_do_i_hold(lk)); + /* must hold the spinlock */ + KASSERT(spinlock_do_i_hold(lk)); - /* must not hold other spinlocks */ - KASSERT(curcpu->c_spinlocks == 1); + /* must not hold other spinlocks */ + KASSERT(curcpu->c_spinlocks == 1); - thread_switch(S_SLEEP, wc, lk); - spinlock_acquire(lk); + thread_switch(S_SLEEP, wc, lk); + spinlock_acquire(lk); } /* * Wake up one thread sleeping on a wait channel. */ -void -wchan_wakeone(struct wchan *wc, struct spinlock *lk) -{ - struct thread *target; +void wchan_wakeone(struct wchan *wc, struct spinlock *lk) { + struct thread *target; - KASSERT(spinlock_do_i_hold(lk)); + KASSERT(spinlock_do_i_hold(lk)); - /* Grab a thread from the channel */ - target = threadlist_remhead(&wc->wc_threads); + /* Grab a thread from the channel */ + target = threadlist_remhead(&wc->wc_threads); - if (target == NULL) { - /* Nobody was sleeping. */ - return; - } + if (target == NULL) { + /* Nobody was sleeping. */ + return; + } - /* - * Note that thread_make_runnable acquires a runqueue lock - * while we're holding LK. This is ok; all spinlocks - * associated with wchans must come before the runqueue locks, - * as we also bridge from the wchan lock to the runqueue lock - * in thread_switch. - */ + /* + * Note that thread_make_runnable acquires a runqueue lock + * while we're holding LK. This is ok; all spinlocks + * associated with wchans must come before the runqueue locks, + * as we also bridge from the wchan lock to the runqueue lock + * in thread_switch. + */ - thread_make_runnable(target, false); + thread_make_runnable(target, false); } /* * Wake up all threads sleeping on a wait channel. */ -void -wchan_wakeall(struct wchan *wc, struct spinlock *lk) -{ - struct thread *target; - struct threadlist list; +void wchan_wakeall(struct wchan *wc, struct spinlock *lk) { + struct thread *target; + struct threadlist list; - KASSERT(spinlock_do_i_hold(lk)); + KASSERT(spinlock_do_i_hold(lk)); - threadlist_init(&list); + threadlist_init(&list); - /* - * Grab all the threads from the channel, moving them to a - * private list. - */ - while ((target = threadlist_remhead(&wc->wc_threads)) != NULL) { - threadlist_addtail(&list, target); - } + /* + * Grab all the threads from the channel, moving them to a + * private list. + */ + while ((target = threadlist_remhead(&wc->wc_threads)) != NULL) { + threadlist_addtail(&list, target); + } - /* - * We could conceivably sort by cpu first to cause fewer lock - * ops and fewer IPIs, but for now at least don't bother. Just - * make each thread runnable. - */ - while ((target = threadlist_remhead(&list)) != NULL) { - thread_make_runnable(target, false); - } + /* + * We could conceivably sort by cpu first to cause fewer lock + * ops and fewer IPIs, but for now at least don't bother. Just + * make each thread runnable. + */ + while ((target = threadlist_remhead(&list)) != NULL) { + thread_make_runnable(target, false); + } - threadlist_cleanup(&list); + threadlist_cleanup(&list); } /* * Return nonzero if there are no threads sleeping on the channel. * This is meant to be used only for diagnostic purposes. */ -bool -wchan_isempty(struct wchan *wc, struct spinlock *lk) -{ - bool ret; +bool wchan_isempty(struct wchan *wc, struct spinlock *lk) { + bool ret; - KASSERT(spinlock_do_i_hold(lk)); - ret = threadlist_isempty(&wc->wc_threads); + KASSERT(spinlock_do_i_hold(lk)); + ret = threadlist_isempty(&wc->wc_threads); - return ret; + return ret; } //////////////////////////////////////////////////////////// @@ -1098,115 +1035,105 @@ wchan_isempty(struct wchan *wc, struct spinlock *lk) /* * Send an IPI (inter-processor interrupt) to the specified CPU. */ -void -ipi_send(struct cpu *target, int code) -{ - KASSERT(code >= 0 && code < 32); +void ipi_send(struct cpu *target, int code) { + KASSERT(code >= 0 && code < 32); - spinlock_acquire(&target->c_ipi_lock); - target->c_ipi_pending |= (uint32_t)1 << code; - mainbus_send_ipi(target); - spinlock_release(&target->c_ipi_lock); + spinlock_acquire(&target->c_ipi_lock); + target->c_ipi_pending |= (uint32_t)1 << code; + mainbus_send_ipi(target); + spinlock_release(&target->c_ipi_lock); } /* * Send an IPI to all CPUs. */ -void -ipi_broadcast(int code) -{ - unsigned i; - struct cpu *c; +void ipi_broadcast(int code) { + unsigned i; + struct cpu *c; - for (i=0; i < cpuarray_num(&allcpus); i++) { - c = cpuarray_get(&allcpus, i); - if (c != curcpu->c_self) { - ipi_send(c, code); - } - } + for (i = 0; i < cpuarray_num(&allcpus); i++) { + c = cpuarray_get(&allcpus, i); + if (c != curcpu->c_self) { + ipi_send(c, code); + } + } } /* * Send a TLB shootdown IPI to the specified CPU. */ -void -ipi_tlbshootdown(struct cpu *target, const struct tlbshootdown *mapping) -{ - unsigned n; +void ipi_tlbshootdown(struct cpu *target, const struct tlbshootdown *mapping) { + unsigned n; - spinlock_acquire(&target->c_ipi_lock); + spinlock_acquire(&target->c_ipi_lock); - n = target->c_numshootdown; - if (n == TLBSHOOTDOWN_MAX) { - /* - * If you have problems with this panic going off, - * consider: (1) increasing the maximum, (2) putting - * logic here to sleep until space appears (may - * interact awkwardly with VM system locking), (3) - * putting logic here to coalesce requests together, - * and/or (4) improving VM system state tracking to - * reduce the number of unnecessary shootdowns. - */ - panic("ipi_tlbshootdown: Too many shootdowns queued\n"); - } - else { - target->c_shootdown[n] = *mapping; - target->c_numshootdown = n+1; - } + n = target->c_numshootdown; + if (n == TLBSHOOTDOWN_MAX) { + /* + * If you have problems with this panic going off, + * consider: (1) increasing the maximum, (2) putting + * logic here to sleep until space appears (may + * interact awkwardly with VM system locking), (3) + * putting logic here to coalesce requests together, + * and/or (4) improving VM system state tracking to + * reduce the number of unnecessary shootdowns. + */ + panic("ipi_tlbshootdown: Too many shootdowns queued\n"); + } else { + target->c_shootdown[n] = *mapping; + target->c_numshootdown = n + 1; + } - target->c_ipi_pending |= (uint32_t)1 << IPI_TLBSHOOTDOWN; - mainbus_send_ipi(target); + target->c_ipi_pending |= (uint32_t)1 << IPI_TLBSHOOTDOWN; + mainbus_send_ipi(target); - spinlock_release(&target->c_ipi_lock); + spinlock_release(&target->c_ipi_lock); } /* * Handle an incoming interprocessor interrupt. */ -void -interprocessor_interrupt(void) -{ - uint32_t bits; - unsigned i; +void interprocessor_interrupt(void) { + uint32_t bits; + unsigned i; - spinlock_acquire(&curcpu->c_ipi_lock); - bits = curcpu->c_ipi_pending; + spinlock_acquire(&curcpu->c_ipi_lock); + bits = curcpu->c_ipi_pending; - if (bits & (1U << IPI_PANIC)) { - /* panic on another cpu - just stop dead */ - spinlock_release(&curcpu->c_ipi_lock); - cpu_halt(); - } - if (bits & (1U << IPI_OFFLINE)) { - /* offline request */ - spinlock_release(&curcpu->c_ipi_lock); - spinlock_acquire(&curcpu->c_runqueue_lock); - if (!curcpu->c_isidle) { - kprintf("cpu%d: offline: warning: not idle\n", - curcpu->c_number); - } - spinlock_release(&curcpu->c_runqueue_lock); - kprintf("cpu%d: offline.\n", curcpu->c_number); - cpu_halt(); - } - if (bits & (1U << IPI_UNIDLE)) { - /* - * The cpu has already unidled itself to take the - * interrupt; don't need to do anything else. - */ - } - if (bits & (1U << IPI_TLBSHOOTDOWN)) { - /* - * Note: depending on your VM system locking you might - * need to release the ipi lock while calling - * vm_tlbshootdown. - */ - for (i=0; ic_numshootdown; i++) { - vm_tlbshootdown(&curcpu->c_shootdown[i]); - } - curcpu->c_numshootdown = 0; - } + if (bits & (1U << IPI_PANIC)) { + /* panic on another cpu - just stop dead */ + spinlock_release(&curcpu->c_ipi_lock); + cpu_halt(); + } + if (bits & (1U << IPI_OFFLINE)) { + /* offline request */ + spinlock_release(&curcpu->c_ipi_lock); + spinlock_acquire(&curcpu->c_runqueue_lock); + if (!curcpu->c_isidle) { + kprintf("cpu%d: offline: warning: not idle\n", curcpu->c_number); + } + spinlock_release(&curcpu->c_runqueue_lock); + kprintf("cpu%d: offline.\n", curcpu->c_number); + cpu_halt(); + } + if (bits & (1U << IPI_UNIDLE)) { + /* + * The cpu has already unidled itself to take the + * interrupt; don't need to do anything else. + */ + } + if (bits & (1U << IPI_TLBSHOOTDOWN)) { + /* + * Note: depending on your VM system locking you might + * need to release the ipi lock while calling + * vm_tlbshootdown. + */ + for (i = 0; i < curcpu->c_numshootdown; i++) { + vm_tlbshootdown(&curcpu->c_shootdown[i]); + } + curcpu->c_numshootdown = 0; + } - curcpu->c_ipi_pending = 0; - spinlock_release(&curcpu->c_ipi_lock); + curcpu->c_ipi_pending = 0; + spinlock_release(&curcpu->c_ipi_lock); } diff --git a/kern/vm/kmalloc.c b/kern/vm/kmalloc.c index b8a1204..0aad387 100644 --- a/kern/vm/kmalloc.c +++ b/kern/vm/kmalloc.c @@ -36,20 +36,16 @@ * Kernel malloc. */ - /* * Fill a block with 0xdeadbeef. */ -static -void -fill_deadbeef(void *vptr, size_t len) -{ - uint32_t *ptr = vptr; - size_t i; +static void fill_deadbeef(void *vptr, size_t len) { + uint32_t *ptr = vptr; + size_t i; - for (i=0; ipageaddr_and_blocktype & PAGE_FRAME) +#define PR_PAGEADDR(pr) ((pr)->pageaddr_and_blocktype & PAGE_FRAME) #define PR_BLOCKTYPE(pr) ((pr)->pageaddr_and_blocktype & ~PAGE_FRAME) -#define MKPAB(pa, blk) (((pa)&PAGE_FRAME) | ((blk) & ~PAGE_FRAME)) +#define MKPAB(pa, blk) (((pa) & PAGE_FRAME) | ((blk) & ~PAGE_FRAME)) //////////////////////////////////////// @@ -181,7 +177,7 @@ static struct spinlock kmalloc_spinlock = SPINLOCK_INITIALIZER; #define NPAGEREFS_PER_PAGE (PAGE_SIZE / sizeof(struct pageref)) struct pagerefpage { - struct pageref refs[NPAGEREFS_PER_PAGE]; + struct pageref refs[NPAGEREFS_PER_PAGE]; }; /* @@ -192,9 +188,9 @@ struct pagerefpage { #define INUSE_WORDS (NPAGEREFS_PER_PAGE / 32) struct kheap_root { - struct pagerefpage *page; - uint32_t pagerefs_inuse[INUSE_WORDS]; - unsigned numinuse; + struct pagerefpage *page; + uint32_t pagerefs_inuse[INUSE_WORDS]; + unsigned numinuse; }; /* @@ -215,125 +211,116 @@ static struct kheap_root kheaproots[NUM_PAGEREFPAGES]; /* * Allocate a page to hold pagerefs. */ -static -void -allocpagerefpage(struct kheap_root *root) -{ - vaddr_t va; +static void allocpagerefpage(struct kheap_root *root) { + vaddr_t va; - KASSERT(root->page == NULL); + KASSERT(root->page == NULL); - /* - * We release the spinlock while calling alloc_kpages. This - * avoids deadlock if alloc_kpages needs to come back here. - * Note that this means things can change behind our back... - */ - spinlock_release(&kmalloc_spinlock); - va = alloc_kpages(1); - spinlock_acquire(&kmalloc_spinlock); - if (va == 0) { - kprintf("kmalloc: Couldn't get a pageref page\n"); - return; - } - KASSERT(va % PAGE_SIZE == 0); + /* + * We release the spinlock while calling alloc_kpages. This + * avoids deadlock if alloc_kpages needs to come back here. + * Note that this means things can change behind our back... + */ + spinlock_release(&kmalloc_spinlock); + va = alloc_kpages(1); + spinlock_acquire(&kmalloc_spinlock); + if (va == 0) { + kprintf("kmalloc: Couldn't get a pageref page\n"); + return; + } + KASSERT(va % PAGE_SIZE == 0); - if (root->page != NULL) { - /* Oops, somebody else allocated it. */ - spinlock_release(&kmalloc_spinlock); - free_kpages(va); - spinlock_acquire(&kmalloc_spinlock); - /* Once allocated it isn't ever freed. */ - KASSERT(root->page != NULL); - return; - } + if (root->page != NULL) { + /* Oops, somebody else allocated it. */ + spinlock_release(&kmalloc_spinlock); + free_kpages(va); + spinlock_acquire(&kmalloc_spinlock); + /* Once allocated it isn't ever freed. */ + KASSERT(root->page != NULL); + return; + } - root->page = (struct pagerefpage *)va; + root->page = (struct pagerefpage *)va; } /* * Allocate a pageref structure. */ -static -struct pageref * -allocpageref(void) -{ - unsigned i,j; - uint32_t k; - unsigned whichroot; - struct kheap_root *root; +static struct pageref *allocpageref(void) { + unsigned i, j; + uint32_t k; + unsigned whichroot; + struct kheap_root *root; - for (whichroot=0; whichroot < NUM_PAGEREFPAGES; whichroot++) { - root = &kheaproots[whichroot]; - if (root->numinuse >= NPAGEREFS_PER_PAGE) { - continue; - } + for (whichroot = 0; whichroot < NUM_PAGEREFPAGES; whichroot++) { + root = &kheaproots[whichroot]; + if (root->numinuse >= NPAGEREFS_PER_PAGE) { + continue; + } - /* - * This should probably not be a linear search. - */ - for (i=0; ipagerefs_inuse[i]==0xffffffff) { - /* full */ - continue; - } - for (k=1,j=0; k!=0; k<<=1,j++) { - if ((root->pagerefs_inuse[i] & k)==0) { - root->pagerefs_inuse[i] |= k; - root->numinuse++; - if (root->page == NULL) { - allocpagerefpage(root); - } - if (root->page == NULL) { - return NULL; - } - return &root->page->refs[i*32 + j]; - } - } - KASSERT(0); - } - } + /* + * This should probably not be a linear search. + */ + for (i = 0; i < INUSE_WORDS; i++) { + if (root->pagerefs_inuse[i] == 0xffffffff) { + /* full */ + continue; + } + for (k = 1, j = 0; k != 0; k <<= 1, j++) { + if ((root->pagerefs_inuse[i] & k) == 0) { + root->pagerefs_inuse[i] |= k; + root->numinuse++; + if (root->page == NULL) { + allocpagerefpage(root); + } + if (root->page == NULL) { + return NULL; + } + return &root->page->refs[i * 32 + j]; + } + } + KASSERT(0); + } + } - /* ran out */ - return NULL; + /* ran out */ + return NULL; } /* * Release a pageref structure. */ -static -void -freepageref(struct pageref *p) -{ - size_t i, j; - uint32_t k; - unsigned whichroot; - struct kheap_root *root; - struct pagerefpage *page; +static void freepageref(struct pageref *p) { + size_t i, j; + uint32_t k; + unsigned whichroot; + struct kheap_root *root; + struct pagerefpage *page; - for (whichroot=0; whichroot < NUM_PAGEREFPAGES; whichroot++) { - root = &kheaproots[whichroot]; + for (whichroot = 0; whichroot < NUM_PAGEREFPAGES; whichroot++) { + root = &kheaproots[whichroot]; - page = root->page; - if (page == NULL) { - KASSERT(root->numinuse == 0); - continue; - } + page = root->page; + if (page == NULL) { + KASSERT(root->numinuse == 0); + continue; + } - j = p-page->refs; - /* note: j is unsigned, don't test < 0 */ - if (j < NPAGEREFS_PER_PAGE) { - /* on this page */ - i = j/32; - k = ((uint32_t)1) << (j%32); - KASSERT((root->pagerefs_inuse[i] & k) != 0); - root->pagerefs_inuse[i] &= ~k; - KASSERT(root->numinuse > 0); - root->numinuse--; - return; - } - } - /* pageref wasn't on any of the pages */ - KASSERT(0); + j = p - page->refs; + /* note: j is unsigned, don't test < 0 */ + if (j < NPAGEREFS_PER_PAGE) { + /* on this page */ + i = j / 32; + k = ((uint32_t)1) << (j % 32); + KASSERT((root->pagerefs_inuse[i] & k) != 0); + root->pagerefs_inuse[i] &= ~k; + KASSERT(root->numinuse > 0); + root->numinuse--; + return; + } + } + /* pageref wasn't on any of the pages */ + KASSERT(0); } //////////////////////////////////////// @@ -365,74 +352,70 @@ static struct pageref *allbase; /* * Set up the guard values in a block we're about to return. */ -static -void * -establishguardband(void *block, size_t clientsize, size_t blocksize) -{ - vaddr_t lowguard, lowsize, data, enddata, highguard, highsize, i; +static void *establishguardband(void *block, size_t clientsize, + size_t blocksize) { + vaddr_t lowguard, lowsize, data, enddata, highguard, highsize, i; - KASSERT(clientsize + GUARD_OVERHEAD <= blocksize); - KASSERT(clientsize < 65536U); + KASSERT(clientsize + GUARD_OVERHEAD <= blocksize); + KASSERT(clientsize < 65536U); - lowguard = (vaddr_t)block; - lowsize = lowguard + 2; - data = lowsize + 2; - enddata = data + clientsize; - highguard = lowguard + blocksize - 4; - highsize = highguard + 2; + lowguard = (vaddr_t)block; + lowsize = lowguard + 2; + data = lowsize + 2; + enddata = data + clientsize; + highguard = lowguard + blocksize - 4; + highsize = highguard + 2; - *(uint16_t *)lowguard = GUARD_HALFWORD; - *(uint16_t *)lowsize = clientsize; - for (i=data; i smallerblocksize); - KASSERT(clientsize + GUARD_OVERHEAD <= blocksize); - enddata = data + clientsize; - for (i=enddata; i smallerblocksize); + KASSERT(clientsize + GUARD_OVERHEAD <= blocksize); + enddata = data + clientsize; + for (i = enddata; i < highguard; i++) { + KASSERT(*(uint8_t *)i == GUARD_FILLBYTE); + } } #else /* not GUARDS */ @@ -457,16 +440,13 @@ checkguardband(vaddr_t blockaddr, size_t smallerblocksize, size_t blocksize) * The first word of the block is a freelist pointer and should not be * deadbeef; the rest of the block should be only deadbeef. */ -static -void -checkdeadbeef(void *block, size_t blocksize) -{ - uint32_t *ptr = block; - size_t i; +static void checkdeadbeef(void *block, size_t blocksize) { + uint32_t *ptr = block; + size_t i; - for (i=1; i < blocksize/sizeof(uint32_t); i++) { - KASSERT(ptr[i] == 0xdeadbeef); - } + for (i = 1; i < blocksize / sizeof(uint32_t); i++) { + KASSERT(ptr[i] == 0xdeadbeef); + } } #endif /* CHECKBEEF */ @@ -490,80 +470,76 @@ checkdeadbeef(void *block, size_t blocksize) * assertion as a bit in isfree is set twice; if not, a circular * freelist will cause an infinite loop. */ -static -void -checksubpage(struct pageref *pr) -{ - vaddr_t prpage, fla; - struct freelist *fl; - int blktype; - int nfree=0; - size_t blocksize; +static void checksubpage(struct pageref *pr) { + vaddr_t prpage, fla; + struct freelist *fl; + int blktype; + int nfree = 0; + size_t blocksize; #ifdef CHECKGUARDS - const unsigned maxblocks = PAGE_SIZE / SMALLEST_SUBPAGE_SIZE; - const unsigned numfreewords = DIVROUNDUP(maxblocks, 32); - uint32_t isfree[numfreewords], mask; - unsigned numblocks, blocknum, i; - size_t smallerblocksize; + const unsigned maxblocks = PAGE_SIZE / SMALLEST_SUBPAGE_SIZE; + const unsigned numfreewords = DIVROUNDUP(maxblocks, 32); + uint32_t isfree[numfreewords], mask; + unsigned numblocks, blocknum, i; + size_t smallerblocksize; #endif - KASSERT(spinlock_do_i_hold(&kmalloc_spinlock)); + KASSERT(spinlock_do_i_hold(&kmalloc_spinlock)); - if (pr->freelist_offset == INVALID_OFFSET) { - KASSERT(pr->nfree==0); - return; - } + if (pr->freelist_offset == INVALID_OFFSET) { + KASSERT(pr->nfree == 0); + return; + } - prpage = PR_PAGEADDR(pr); - blktype = PR_BLOCKTYPE(pr); - KASSERT(blktype >= 0 && blktype < NSIZES); - blocksize = sizes[blktype]; + prpage = PR_PAGEADDR(pr); + blktype = PR_BLOCKTYPE(pr); + KASSERT(blktype >= 0 && blktype < NSIZES); + blocksize = sizes[blktype]; #ifdef CHECKGUARDS - smallerblocksize = blktype > 0 ? sizes[blktype - 1] : 0; - for (i=0; i 0 ? sizes[blktype - 1] : 0; + for (i = 0; i < numfreewords; i++) { + isfree[i] = 0; + } #endif #ifdef __mips__ - KASSERT(prpage >= MIPS_KSEG0); - KASSERT(prpage < MIPS_KSEG1); + KASSERT(prpage >= MIPS_KSEG0); + KASSERT(prpage < MIPS_KSEG1); #endif - KASSERT(pr->freelist_offset < PAGE_SIZE); - KASSERT(pr->freelist_offset % blocksize == 0); + KASSERT(pr->freelist_offset < PAGE_SIZE); + KASSERT(pr->freelist_offset % blocksize == 0); - fla = prpage + pr->freelist_offset; - fl = (struct freelist *)fla; + fla = prpage + pr->freelist_offset; + fl = (struct freelist *)fla; - for (; fl != NULL; fl = fl->next) { - fla = (vaddr_t)fl; - KASSERT(fla >= prpage && fla < prpage + PAGE_SIZE); - KASSERT((fla-prpage) % blocksize == 0); + for (; fl != NULL; fl = fl->next) { + fla = (vaddr_t)fl; + KASSERT(fla >= prpage && fla < prpage + PAGE_SIZE); + KASSERT((fla - prpage) % blocksize == 0); #ifdef CHECKBEEF - checkdeadbeef(fl, blocksize); + checkdeadbeef(fl, blocksize); #endif #ifdef CHECKGUARDS - blocknum = (fla-prpage) / blocksize; - mask = 1U << (blocknum % 32); - KASSERT((isfree[blocknum / 32] & mask) == 0); - isfree[blocknum / 32] |= mask; + blocknum = (fla - prpage) / blocksize; + mask = 1U << (blocknum % 32); + KASSERT((isfree[blocknum / 32] & mask) == 0); + isfree[blocknum / 32] |= mask; #endif - KASSERT(fl->next != fl); - nfree++; - } - KASSERT(nfree==pr->nfree); + KASSERT(fl->next != fl); + nfree++; + } + KASSERT(nfree == pr->nfree); #ifdef CHECKGUARDS - numblocks = PAGE_SIZE / blocksize; - for (i=0; inext_samesize) { - checksubpage(pr); - KASSERT(sc < TOTAL_PAGEREFS); - sc++; - } - } + for (i = 0; i < NSIZES; i++) { + for (pr = sizebases[i]; pr != NULL; pr = pr->next_samesize) { + checksubpage(pr); + KASSERT(sc < TOTAL_PAGEREFS); + sc++; + } + } - for (pr = allbase; pr != NULL; pr = pr->next_all) { - checksubpage(pr); - KASSERT(ac < TOTAL_PAGEREFS); - ac++; - } + for (pr = allbase; pr != NULL; pr = pr->next_all) { + checksubpage(pr); + KASSERT(ac < TOTAL_PAGEREFS); + ac++; + } - KASSERT(sc==ac); + KASSERT(sc == ac); } #else #define checksubpages() @@ -613,8 +586,8 @@ checksubpages(void) #define LABEL_OVERHEAD LABEL_PTROFFSET struct malloclabel { - vaddr_t label; - unsigned generation; + vaddr_t label; + unsigned generation; }; static unsigned mallocgeneration; @@ -622,73 +595,64 @@ static unsigned mallocgeneration; /* * Label a block of memory. */ -static -void * -establishlabel(void *block, vaddr_t label) -{ - struct malloclabel *ml; +static void *establishlabel(void *block, vaddr_t label) { + struct malloclabel *ml; - ml = block; - ml->label = label; - ml->generation = mallocgeneration; - ml++; - return ml; + ml = block; + ml->label = label; + ml->generation = mallocgeneration; + ml++; + return ml; } -static -void -dump_subpage(struct pageref *pr, unsigned generation) -{ - unsigned blocksize = sizes[PR_BLOCKTYPE(pr)]; - unsigned numblocks = PAGE_SIZE / blocksize; - unsigned numfreewords = DIVROUNDUP(numblocks, 32); - uint32_t isfree[numfreewords], mask; - vaddr_t prpage; - struct freelist *fl; - vaddr_t blockaddr; - struct malloclabel *ml; - unsigned i; +static void dump_subpage(struct pageref *pr, unsigned generation) { + unsigned blocksize = sizes[PR_BLOCKTYPE(pr)]; + unsigned numblocks = PAGE_SIZE / blocksize; + unsigned numfreewords = DIVROUNDUP(numblocks, 32); + uint32_t isfree[numfreewords], mask; + vaddr_t prpage; + struct freelist *fl; + vaddr_t blockaddr; + struct malloclabel *ml; + unsigned i; - for (i=0; ifreelist_offset); - for (; fl != NULL; fl = fl->next) { - i = ((vaddr_t)fl - prpage) / blocksize; - mask = 1U << (i % 32); - isfree[i / 32] |= mask; - } + prpage = PR_PAGEADDR(pr); + fl = (struct freelist *)(prpage + pr->freelist_offset); + for (; fl != NULL; fl = fl->next) { + i = ((vaddr_t)fl - prpage) / blocksize; + mask = 1U << (i % 32); + isfree[i / 32] |= mask; + } - for (i=0; igeneration != generation) { - continue; - } - kprintf("%5zu bytes at %p, allocated at %p\n", - blocksize, (void *)blockaddr, (void *)ml->label); - } + for (i = 0; i < numblocks; i++) { + mask = 1U << (i % 32); + if (isfree[i / 32] & mask) { + continue; + } + blockaddr = prpage + i * blocksize; + ml = (struct malloclabel *)blockaddr; + if (ml->generation != generation) { + continue; + } + kprintf("%5zu bytes at %p, allocated at %p\n", blocksize, (void *)blockaddr, + (void *)ml->label); + } } -static -void -dump_subpages(unsigned generation) -{ - struct pageref *pr; - int i; +static void dump_subpages(unsigned generation) { + struct pageref *pr; + int i; - kprintf("Remaining allocations from generation %u:\n", generation); - for (i=0; inext_samesize) { - dump_subpage(pr, generation); - } - } + kprintf("Remaining allocations from generation %u:\n", generation); + for (i = 0; i < NSIZES; i++) { + for (pr = sizebases[i]; pr != NULL; pr = pr->next_samesize) { + dump_subpage(pr, generation); + } + } } #else @@ -697,43 +661,37 @@ dump_subpages(unsigned generation) #endif /* LABELS */ -void -kheap_nextgeneration(void) -{ +void kheap_nextgeneration(void) { #ifdef LABELS - spinlock_acquire(&kmalloc_spinlock); - mallocgeneration++; - spinlock_release(&kmalloc_spinlock); + spinlock_acquire(&kmalloc_spinlock); + mallocgeneration++; + spinlock_release(&kmalloc_spinlock); #endif } -void -kheap_dump(void) -{ +void kheap_dump(void) { #ifdef LABELS - /* print the whole thing with interrupts off */ - spinlock_acquire(&kmalloc_spinlock); - dump_subpages(mallocgeneration); - spinlock_release(&kmalloc_spinlock); + /* print the whole thing with interrupts off */ + spinlock_acquire(&kmalloc_spinlock); + dump_subpages(mallocgeneration); + spinlock_release(&kmalloc_spinlock); #else - kprintf("Enable LABELS in kmalloc.c to use this functionality.\n"); + kprintf("Enable LABELS in kmalloc.c to use this functionality.\n"); #endif } -void -kheap_dumpall(void) -{ +void kheap_dumpall(void) { #ifdef LABELS - unsigned i; + unsigned i; - /* print the whole thing with interrupts off */ - spinlock_acquire(&kmalloc_spinlock); - for (i=0; i<=mallocgeneration; i++) { - dump_subpages(i); - } - spinlock_release(&kmalloc_spinlock); + /* print the whole thing with interrupts off */ + spinlock_acquire(&kmalloc_spinlock); + for (i = 0; i <= mallocgeneration; i++) { + dump_subpages(i); + } + spinlock_release(&kmalloc_spinlock); #else - kprintf("Enable LABELS in kmalloc.c to use this functionality.\n"); + kprintf("Enable LABELS in kmalloc.c to use this functionality.\n"); #endif } @@ -742,76 +700,70 @@ kheap_dumpall(void) /* * Print the allocated/freed map of a single kernel heap page. */ -static -void -subpage_stats(struct pageref *pr) -{ - vaddr_t prpage, fla; - struct freelist *fl; - int blktype; - unsigned i, n, index; - uint32_t freemap[PAGE_SIZE / (SMALLEST_SUBPAGE_SIZE*32)]; +static void subpage_stats(struct pageref *pr) { + vaddr_t prpage, fla; + struct freelist *fl; + int blktype; + unsigned i, n, index; + uint32_t freemap[PAGE_SIZE / (SMALLEST_SUBPAGE_SIZE * 32)]; - checksubpage(pr); - KASSERT(spinlock_do_i_hold(&kmalloc_spinlock)); + checksubpage(pr); + KASSERT(spinlock_do_i_hold(&kmalloc_spinlock)); - /* clear freemap[] */ - for (i=0; i= 0 && blktype < NSIZES); + prpage = PR_PAGEADDR(pr); + blktype = PR_BLOCKTYPE(pr); + KASSERT(blktype >= 0 && blktype < NSIZES); - /* compute how many bits we need in freemap and assert we fit */ - n = PAGE_SIZE / sizes[blktype]; - KASSERT(n <= 32 * ARRAYCOUNT(freemap)); + /* compute how many bits we need in freemap and assert we fit */ + n = PAGE_SIZE / sizes[blktype]; + KASSERT(n <= 32 * ARRAYCOUNT(freemap)); - if (pr->freelist_offset != INVALID_OFFSET) { - fla = prpage + pr->freelist_offset; - fl = (struct freelist *)fla; + if (pr->freelist_offset != INVALID_OFFSET) { + fla = prpage + pr->freelist_offset; + fl = (struct freelist *)fla; - for (; fl != NULL; fl = fl->next) { - fla = (vaddr_t)fl; - index = (fla-prpage) / sizes[blktype]; - KASSERT(indexnext) { + fla = (vaddr_t)fl; + index = (fla - prpage) / sizes[blktype]; + KASSERT(index < n); + freemap[index / 32] |= (1 << (index % 32)); + } + } - kprintf("at 0x%08lx: size %-4lu %u/%u free\n", - (unsigned long)prpage, (unsigned long) sizes[blktype], - (unsigned) pr->nfree, n); - kprintf(" "); - for (i=0; infree, n); + kprintf(" "); + for (i = 0; i < n; i++) { + int val = (freemap[i / 32] & (1 << (i % 32))) != 0; + kprintf("%c", val ? '.' : '*'); + if (i % 64 == 63 && i < n - 1) { + kprintf("\n "); + } + } + kprintf("\n"); } /* * Print the whole heap. */ -void -kheap_printstats(void) -{ - struct pageref *pr; +void kheap_printstats(void) { + struct pageref *pr; - /* print the whole thing with interrupts off */ - spinlock_acquire(&kmalloc_spinlock); + /* print the whole thing with interrupts off */ + spinlock_acquire(&kmalloc_spinlock); - kprintf("Subpage allocator status:\n"); + kprintf("Subpage allocator status:\n"); - for (pr = allbase; pr != NULL; pr = pr->next_all) { - subpage_stats(pr); - } + for (pr = allbase; pr != NULL; pr = pr->next_all) { + subpage_stats(pr); + } - spinlock_release(&kmalloc_spinlock); + spinlock_release(&kmalloc_spinlock); } //////////////////////////////////////// @@ -819,344 +771,330 @@ kheap_printstats(void) /* * Remove a pageref from both lists that it's on. */ -static -void -remove_lists(struct pageref *pr, int blktype) -{ - struct pageref **guy; +static void remove_lists(struct pageref *pr, int blktype) { + struct pageref **guy; - KASSERT(blktype>=0 && blktype= 0 && blktype < NSIZES); - for (guy = &sizebases[blktype]; *guy; guy = &(*guy)->next_samesize) { - checksubpage(*guy); - if (*guy == pr) { - *guy = pr->next_samesize; - break; - } - } + for (guy = &sizebases[blktype]; *guy; guy = &(*guy)->next_samesize) { + checksubpage(*guy); + if (*guy == pr) { + *guy = pr->next_samesize; + break; + } + } - for (guy = &allbase; *guy; guy = &(*guy)->next_all) { - checksubpage(*guy); - if (*guy == pr) { - *guy = pr->next_all; - break; - } - } + for (guy = &allbase; *guy; guy = &(*guy)->next_all) { + checksubpage(*guy); + if (*guy == pr) { + *guy = pr->next_all; + break; + } + } } /* * Given a requested client size, return the block type, that is, the * index into the sizes[] array for the block size to use. */ -static -inline -int blocktype(size_t clientsz) -{ - unsigned i; - for (i=0; inext_samesize) { + for (pr = sizebases[blktype]; pr != NULL; pr = pr->next_samesize) { - /* check for corruption */ - KASSERT(PR_BLOCKTYPE(pr) == blktype); - checksubpage(pr); + /* check for corruption */ + KASSERT(PR_BLOCKTYPE(pr) == blktype); + checksubpage(pr); - if (pr->nfree > 0) { + if (pr->nfree > 0) { - doalloc: /* comes here after getting a whole fresh page */ + doalloc: /* comes here after getting a whole fresh page */ - KASSERT(pr->freelist_offset < PAGE_SIZE); - prpage = PR_PAGEADDR(pr); - fla = prpage + pr->freelist_offset; - fl = (struct freelist *)fla; + KASSERT(pr->freelist_offset < PAGE_SIZE); + prpage = PR_PAGEADDR(pr); + fla = prpage + pr->freelist_offset; + fl = (struct freelist *)fla; - retptr = fl; - fl = fl->next; - pr->nfree--; + retptr = fl; + fl = fl->next; + pr->nfree--; - if (fl != NULL) { - KASSERT(pr->nfree > 0); - fla = (vaddr_t)fl; - KASSERT(fla - prpage < PAGE_SIZE); - pr->freelist_offset = fla - prpage; - } - else { - KASSERT(pr->nfree == 0); - pr->freelist_offset = INVALID_OFFSET; - } + if (fl != NULL) { + KASSERT(pr->nfree > 0); + fla = (vaddr_t)fl; + KASSERT(fla - prpage < PAGE_SIZE); + pr->freelist_offset = fla - prpage; + } else { + KASSERT(pr->nfree == 0); + pr->freelist_offset = INVALID_OFFSET; + } #ifdef GUARDS - retptr = establishguardband(retptr, clientsz, sz); + retptr = establishguardband(retptr, clientsz, sz); #endif #ifdef LABELS - retptr = establishlabel(retptr, label); + retptr = establishlabel(retptr, label); #endif - checksubpages(); + checksubpages(); - spinlock_release(&kmalloc_spinlock); - return retptr; - } - } + spinlock_release(&kmalloc_spinlock); + return retptr; + } + } - /* - * No page of the right size available. - * Make a new one. - * - * We release the spinlock while calling alloc_kpages. This - * avoids deadlock if alloc_kpages needs to come back here. - * Note that this means things can change behind our back... - */ + /* + * No page of the right size available. + * Make a new one. + * + * We release the spinlock while calling alloc_kpages. This + * avoids deadlock if alloc_kpages needs to come back here. + * Note that this means things can change behind our back... + */ - spinlock_release(&kmalloc_spinlock); - prpage = alloc_kpages(1); - if (prpage==0) { - /* Out of memory. */ - kprintf("kmalloc: Subpage allocator couldn't get a page\n"); - return NULL; - } - KASSERT(prpage % PAGE_SIZE == 0); + spinlock_release(&kmalloc_spinlock); + prpage = alloc_kpages(1); + if (prpage == 0) { + /* Out of memory. */ + kprintf("kmalloc: Subpage allocator couldn't get a page\n"); + return NULL; + } + KASSERT(prpage % PAGE_SIZE == 0); #ifdef CHECKBEEF - /* deadbeef the whole page, as it probably starts zeroed */ - fill_deadbeef((void *)prpage, PAGE_SIZE); + /* deadbeef the whole page, as it probably starts zeroed */ + fill_deadbeef((void *)prpage, PAGE_SIZE); #endif - spinlock_acquire(&kmalloc_spinlock); + spinlock_acquire(&kmalloc_spinlock); - pr = allocpageref(); - if (pr==NULL) { - /* Couldn't allocate accounting space for the new page. */ - spinlock_release(&kmalloc_spinlock); - free_kpages(prpage); - kprintf("kmalloc: Subpage allocator couldn't get pageref\n"); - return NULL; - } + pr = allocpageref(); + if (pr == NULL) { + /* Couldn't allocate accounting space for the new page. */ + spinlock_release(&kmalloc_spinlock); + free_kpages(prpage); + kprintf("kmalloc: Subpage allocator couldn't get pageref\n"); + return NULL; + } - pr->pageaddr_and_blocktype = MKPAB(prpage, blktype); - pr->nfree = PAGE_SIZE / sizes[blktype]; + pr->pageaddr_and_blocktype = MKPAB(prpage, blktype); + pr->nfree = PAGE_SIZE / sizes[blktype]; - /* - * Note: fl is volatile because the MIPS toolchain we were - * using in spring 2001 attempted to optimize this loop and - * blew it. Making fl volatile inhibits the optimization. - */ + /* + * Note: fl is volatile because the MIPS toolchain we were + * using in spring 2001 attempted to optimize this loop and + * blew it. Making fl volatile inhibits the optimization. + */ - fla = prpage; - fl = (struct freelist *)fla; - fl->next = NULL; - for (i=1; infree; i++) { - fl = (struct freelist *)(fla + i*sizes[blktype]); - fl->next = (struct freelist *)(fla + (i-1)*sizes[blktype]); - KASSERT(fl != fl->next); - } - fla = (vaddr_t) fl; - pr->freelist_offset = fla - prpage; - KASSERT(pr->freelist_offset == (pr->nfree-1)*sizes[blktype]); + fla = prpage; + fl = (struct freelist *)fla; + fl->next = NULL; + for (i = 1; i < pr->nfree; i++) { + fl = (struct freelist *)(fla + i * sizes[blktype]); + fl->next = (struct freelist *)(fla + (i - 1) * sizes[blktype]); + KASSERT(fl != fl->next); + } + fla = (vaddr_t)fl; + pr->freelist_offset = fla - prpage; + KASSERT(pr->freelist_offset == (pr->nfree - 1) * sizes[blktype]); - pr->next_samesize = sizebases[blktype]; - sizebases[blktype] = pr; + pr->next_samesize = sizebases[blktype]; + sizebases[blktype] = pr; - pr->next_all = allbase; - allbase = pr; + pr->next_all = allbase; + allbase = pr; - /* This is kind of cheesy, but avoids duplicating the alloc code. */ - goto doalloc; + /* This is kind of cheesy, but avoids duplicating the alloc code. */ + goto doalloc; } /* * Free a pointer previously returned from subpage_kmalloc. If the * pointer is not on any heap page we recognize, return -1. */ -static -int -subpage_kfree(void *ptr) -{ - int blktype; // index into sizes[] that we're using - vaddr_t ptraddr; // same as ptr - struct pageref *pr; // pageref for page we're freeing in - vaddr_t prpage; // PR_PAGEADDR(pr) - vaddr_t fla; // free list entry address - struct freelist *fl; // free list entry - vaddr_t offset; // offset into page +static int subpage_kfree(void *ptr) { + int blktype; // index into sizes[] that we're using + vaddr_t ptraddr; // same as ptr + struct pageref *pr; // pageref for page we're freeing in + vaddr_t prpage; // PR_PAGEADDR(pr) + vaddr_t fla; // free list entry address + struct freelist *fl; // free list entry + vaddr_t offset; // offset into page #ifdef GUARDS - size_t blocksize, smallerblocksize; + size_t blocksize, smallerblocksize; #endif - ptraddr = (vaddr_t)ptr; + ptraddr = (vaddr_t)ptr; #ifdef GUARDS - if (ptraddr % PAGE_SIZE == 0) { - /* - * With guard bands, all client-facing subpage - * pointers are offset by GUARD_PTROFFSET (which is 4) - * from the underlying blocks and are therefore not - * page-aligned. So a page-aligned pointer is not one - * of ours. Catch this up front, as otherwise - * subtracting GUARD_PTROFFSET could give a pointer on - * a page we *do* own, and then we'll panic because - * it's not a valid one. - */ - return -1; - } - ptraddr -= GUARD_PTROFFSET; + if (ptraddr % PAGE_SIZE == 0) { + /* + * With guard bands, all client-facing subpage + * pointers are offset by GUARD_PTROFFSET (which is 4) + * from the underlying blocks and are therefore not + * page-aligned. So a page-aligned pointer is not one + * of ours. Catch this up front, as otherwise + * subtracting GUARD_PTROFFSET could give a pointer on + * a page we *do* own, and then we'll panic because + * it's not a valid one. + */ + return -1; + } + ptraddr -= GUARD_PTROFFSET; #endif #ifdef LABELS - if (ptraddr % PAGE_SIZE == 0) { - /* ditto */ - return -1; - } - ptraddr -= LABEL_PTROFFSET; + if (ptraddr % PAGE_SIZE == 0) { + /* ditto */ + return -1; + } + ptraddr -= LABEL_PTROFFSET; #endif - spinlock_acquire(&kmalloc_spinlock); + spinlock_acquire(&kmalloc_spinlock); - checksubpages(); + checksubpages(); - /* Silence warnings with gcc 4.8 -Og (but not -O2) */ - prpage = 0; - blktype = 0; + /* Silence warnings with gcc 4.8 -Og (but not -O2) */ + prpage = 0; + blktype = 0; - for (pr = allbase; pr; pr = pr->next_all) { - prpage = PR_PAGEADDR(pr); - blktype = PR_BLOCKTYPE(pr); - KASSERT(blktype >= 0 && blktype < NSIZES); + for (pr = allbase; pr; pr = pr->next_all) { + prpage = PR_PAGEADDR(pr); + blktype = PR_BLOCKTYPE(pr); + KASSERT(blktype >= 0 && blktype < NSIZES); - /* check for corruption */ - KASSERT(blktype>=0 && blktype= 0 && blktype < NSIZES); + checksubpage(pr); - if (ptraddr >= prpage && ptraddr < prpage + PAGE_SIZE) { - break; - } - } + if (ptraddr >= prpage && ptraddr < prpage + PAGE_SIZE) { + break; + } + } - if (pr==NULL) { - /* Not on any of our pages - not a subpage allocation */ - spinlock_release(&kmalloc_spinlock); - return -1; - } + if (pr == NULL) { + /* Not on any of our pages - not a subpage allocation */ + spinlock_release(&kmalloc_spinlock); + return -1; + } - offset = ptraddr - prpage; + offset = ptraddr - prpage; - /* Check for proper positioning and alignment */ - if (offset >= PAGE_SIZE || offset % sizes[blktype] != 0) { - panic("kfree: subpage free of invalid addr %p\n", ptr); - } + /* Check for proper positioning and alignment */ + if (offset >= PAGE_SIZE || offset % sizes[blktype] != 0) { + panic("kfree: subpage free of invalid addr %p\n", ptr); + } #ifdef GUARDS - blocksize = sizes[blktype]; - smallerblocksize = blktype > 0 ? sizes[blktype - 1] : 0; - checkguardband(ptraddr, smallerblocksize, blocksize); + blocksize = sizes[blktype]; + smallerblocksize = blktype > 0 ? sizes[blktype - 1] : 0; + checkguardband(ptraddr, smallerblocksize, blocksize); #endif - /* - * Clear the block to 0xdeadbeef to make it easier to detect - * uses of dangling pointers. - */ - fill_deadbeef((void *)ptraddr, sizes[blktype]); + /* + * Clear the block to 0xdeadbeef to make it easier to detect + * uses of dangling pointers. + */ + fill_deadbeef((void *)ptraddr, sizes[blktype]); - /* - * We probably ought to check for free twice by seeing if the block - * is already on the free list. But that's expensive, so we don't. - */ + /* + * We probably ought to check for free twice by seeing if the block + * is already on the free list. But that's expensive, so we don't. + */ - fla = prpage + offset; - fl = (struct freelist *)fla; - if (pr->freelist_offset == INVALID_OFFSET) { - fl->next = NULL; - } else { - fl->next = (struct freelist *)(prpage + pr->freelist_offset); + fla = prpage + offset; + fl = (struct freelist *)fla; + if (pr->freelist_offset == INVALID_OFFSET) { + fl->next = NULL; + } else { + fl->next = (struct freelist *)(prpage + pr->freelist_offset); - /* this block should not already be on the free list! */ + /* this block should not already be on the free list! */ #ifdef SLOW - { - struct freelist *fl2; + { + struct freelist *fl2; - for (fl2 = fl->next; fl2 != NULL; fl2 = fl2->next) { - KASSERT(fl2 != fl); - } - } + for (fl2 = fl->next; fl2 != NULL; fl2 = fl2->next) { + KASSERT(fl2 != fl); + } + } #else - /* check just the head */ - KASSERT(fl != fl->next); + /* check just the head */ + KASSERT(fl != fl->next); #endif - } - pr->freelist_offset = offset; - pr->nfree++; + } + pr->freelist_offset = offset; + pr->nfree++; - KASSERT(pr->nfree <= PAGE_SIZE / sizes[blktype]); - if (pr->nfree == PAGE_SIZE / sizes[blktype]) { - /* Whole page is free. */ - remove_lists(pr, blktype); - freepageref(pr); - /* Call free_kpages without kmalloc_spinlock. */ - spinlock_release(&kmalloc_spinlock); - free_kpages(prpage); - } - else { - spinlock_release(&kmalloc_spinlock); - } + KASSERT(pr->nfree <= PAGE_SIZE / sizes[blktype]); + if (pr->nfree == PAGE_SIZE / sizes[blktype]) { + /* Whole page is free. */ + remove_lists(pr, blktype); + freepageref(pr); + /* Call free_kpages without kmalloc_spinlock. */ + spinlock_release(&kmalloc_spinlock); + free_kpages(prpage); + } else { + spinlock_release(&kmalloc_spinlock); + } #ifdef SLOWER /* Don't get the lock unless checksubpages does something. */ - spinlock_acquire(&kmalloc_spinlock); - checksubpages(); - spinlock_release(&kmalloc_spinlock); + spinlock_acquire(&kmalloc_spinlock); + checksubpages(); + spinlock_release(&kmalloc_spinlock); #endif - return 0; + return 0; } // @@ -1166,59 +1104,54 @@ subpage_kfree(void *ptr) * Allocate a block of size SZ. Redirect either to subpage_kmalloc or * alloc_kpages depending on how big SZ is. */ -void * -kmalloc(size_t sz) -{ - size_t checksz; +void *kmalloc(size_t sz) { + size_t checksz; #ifdef LABELS - vaddr_t label; + vaddr_t label; #endif #ifdef LABELS #ifdef __GNUC__ - label = (vaddr_t)__builtin_return_address(0); + label = (vaddr_t)__builtin_return_address(0); #else #error "Don't know how to get return address with this compiler" #endif /* __GNUC__ */ #endif /* LABELS */ - checksz = sz + GUARD_OVERHEAD + LABEL_OVERHEAD; - if (checksz >= LARGEST_SUBPAGE_SIZE) { - unsigned long npages; - vaddr_t address; + checksz = sz + GUARD_OVERHEAD + LABEL_OVERHEAD; + if (checksz >= LARGEST_SUBPAGE_SIZE) { + unsigned long npages; + vaddr_t address; - /* Round up to a whole number of pages. */ - npages = (sz + PAGE_SIZE - 1)/PAGE_SIZE; - address = alloc_kpages(npages); - if (address==0) { - return NULL; - } - KASSERT(address % PAGE_SIZE == 0); + /* Round up to a whole number of pages. */ + npages = (sz + PAGE_SIZE - 1) / PAGE_SIZE; + address = alloc_kpages(npages); + if (address == 0) { + return NULL; + } + KASSERT(address % PAGE_SIZE == 0); - return (void *)address; - } + return (void *)address; + } #ifdef LABELS - return subpage_kmalloc(sz, label); + return subpage_kmalloc(sz, label); #else - return subpage_kmalloc(sz); + return subpage_kmalloc(sz); #endif } /* * Free a block previously returned from kmalloc. */ -void -kfree(void *ptr) -{ - /* - * Try subpage first; if that fails, assume it's a big allocation. - */ - if (ptr == NULL) { - return; - } else if (subpage_kfree(ptr)) { - KASSERT((vaddr_t)ptr%PAGE_SIZE==0); - free_kpages((vaddr_t)ptr); - } +void kfree(void *ptr) { + /* + * Try subpage first; if that fails, assume it's a big allocation. + */ + if (ptr == NULL) { + return; + } else if (subpage_kfree(ptr)) { + KASSERT((vaddr_t)ptr % PAGE_SIZE == 0); + free_kpages((vaddr_t)ptr); + } } -