fix: format + dont track build folders

This commit is contained in:
minhtrannhat 2025-03-02 18:08:51 -05:00
parent b77ad042ca
commit c173f2b9b8
Signed by: minhtrannhat
GPG Key ID: E13CFA85C53F8062
8 changed files with 1651 additions and 1862 deletions

3
.gitignore vendored
View File

@ -1,2 +1,3 @@
kern/compile/ kern/compile/
build/ build/*
**/build

View File

@ -92,18 +92,9 @@ void spllower(int oldipl, int newipl);
//////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////
SPL_INLINE SPL_INLINE
int int spl0(void) { return splx(IPL_NONE); }
spl0(void)
{
return splx(IPL_NONE);
}
SPL_INLINE SPL_INLINE
int int splhigh(void) { return splx(IPL_HIGH); }
splhigh(void)
{
return splx(IPL_HIGH);
}
#endif /* _SPL_H_ */ #endif /* _SPL_H_ */

View File

@ -34,7 +34,6 @@
* Header file for synchronization primitives. * Header file for synchronization primitives.
*/ */
#include <spinlock.h> #include <spinlock.h>
/* /*
@ -62,7 +61,6 @@ void sem_destroy(struct semaphore *);
void P(struct semaphore *); void P(struct semaphore *);
void V(struct semaphore *); void V(struct semaphore *);
/* /*
* Simple lock for mutual exclusion. * Simple lock for mutual exclusion.
* *
@ -97,7 +95,6 @@ void lock_acquire(struct lock *);
void lock_release(struct lock *); void lock_release(struct lock *);
bool lock_do_i_hold(struct lock *); bool lock_do_i_hold(struct lock *);
/* /*
* Condition variable. * Condition variable.
* *
@ -138,5 +135,4 @@ void cv_wait(struct cv *cv, struct lock *lock);
void cv_signal(struct cv *cv, struct lock *lock); void cv_signal(struct cv *cv, struct lock *lock);
void cv_broadcast(struct cv *cv, struct lock *lock); void cv_broadcast(struct cv *cv, struct lock *lock);
#endif /* _SYNCH_H_ */ #endif /* _SYNCH_H_ */

View File

@ -40,7 +40,6 @@
#include <vfs.h> // for vfs_sync() #include <vfs.h> // for vfs_sync()
#include <lamebus/ltrace.h> // for ltrace_stop() #include <lamebus/ltrace.h> // for ltrace_stop()
/* Flags word for DEBUG() macro. */ /* Flags word for DEBUG() macro. */
uint32_t dbflags = 0; uint32_t dbflags = 0;
@ -50,20 +49,16 @@ static struct lock *kprintf_lock;
/* Lock for polled kprintfs */ /* Lock for polled kprintfs */
static struct spinlock kprintf_spinlock; static struct spinlock kprintf_spinlock;
/* /*
* Warning: all this has to work from interrupt handlers and when * Warning: all this has to work from interrupt handlers and when
* interrupts are disabled. * interrupts are disabled.
*/ */
/* /*
* Create the kprintf lock. Must be called before creating a second * Create the kprintf lock. Must be called before creating a second
* thread or enabling a second CPU. * thread or enabling a second CPU.
*/ */
void void kprintf_bootstrap(void) {
kprintf_bootstrap(void)
{
KASSERT(kprintf_lock == NULL); KASSERT(kprintf_lock == NULL);
kprintf_lock = lock_create("kprintf_lock"); kprintf_lock = lock_create("kprintf_lock");
@ -76,10 +71,7 @@ kprintf_bootstrap(void)
/* /*
* Send characters to the console. Backend for __printf. * Send characters to the console. Backend for __printf.
*/ */
static static void console_send(void *junk, const char *data, size_t len) {
void
console_send(void *junk, const char *data, size_t len)
{
size_t i; size_t i;
(void)junk; (void)junk;
@ -92,22 +84,17 @@ console_send(void *junk, const char *data, size_t len)
/* /*
* Printf to the console. * Printf to the console.
*/ */
int int kprintf(const char *fmt, ...) {
kprintf(const char *fmt, ...)
{
int chars; int chars;
va_list ap; va_list ap;
bool dolock; bool dolock;
dolock = kprintf_lock != NULL dolock = kprintf_lock != NULL && curthread->t_in_interrupt == false &&
&& curthread->t_in_interrupt == false curthread->t_curspl == 0 && curcpu->c_spinlocks == 0;
&& curthread->t_curspl == 0
&& curcpu->c_spinlocks == 0;
if (dolock) { if (dolock) {
lock_acquire(kprintf_lock); lock_acquire(kprintf_lock);
} } else {
else {
spinlock_acquire(&kprintf_spinlock); spinlock_acquire(&kprintf_spinlock);
} }
@ -117,8 +104,7 @@ kprintf(const char *fmt, ...)
if (dolock) { if (dolock) {
lock_release(kprintf_lock); lock_release(kprintf_lock);
} } else {
else {
spinlock_release(&kprintf_spinlock); spinlock_release(&kprintf_spinlock);
} }
@ -130,9 +116,7 @@ kprintf(const char *fmt, ...)
* passed and then halts the system. * passed and then halts the system.
*/ */
void void panic(const char *fmt, ...) {
panic(const char *fmt, ...)
{
va_list ap; va_list ap;
/* /*
@ -202,15 +186,13 @@ panic(const char *fmt, ...)
* Last resort, just in case. * Last resort, just in case.
*/ */
for (;;); for (;;)
;
} }
/* /*
* Assertion failures go through this. * Assertion failures go through this.
*/ */
void void badassert(const char *expr, const char *file, int line, const char *func) {
badassert(const char *expr, const char *file, int line, const char *func) panic("Assertion failed: %s, at %s:%d (%s)\n", expr, file, line, func);
{
panic("Assertion failed: %s, at %s:%d (%s)\n",
expr, file, line, func);
} }

View File

@ -51,7 +51,6 @@
#include <version.h> #include <version.h>
#include "autoconf.h" // for pseudoconfig #include "autoconf.h" // for pseudoconfig
/* /*
* These two pieces of data are maintained by the makefiles and build system. * These two pieces of data are maintained by the makefiles and build system.
* buildconfig is the name of the config file the kernel was configured with. * buildconfig is the name of the config file the kernel was configured with.
@ -71,14 +70,10 @@ static const char harvard_copyright[] =
"Copyright (c) 2000, 2001-2005, 2008-2011, 2013, 2014\n" "Copyright (c) 2000, 2001-2005, 2008-2011, 2013, 2014\n"
" President and Fellows of Harvard College. All rights reserved.\n"; " President and Fellows of Harvard College. All rights reserved.\n";
/* /*
* Initial boot sequence. * Initial boot sequence.
*/ */
static static void boot(void) {
void
boot(void)
{
/* /*
* The order of these is important! * The order of these is important!
* Don't go changing it without thinking about the consequences. * Don't go changing it without thinking about the consequences.
@ -101,8 +96,8 @@ boot(void)
kprintf("%s", harvard_copyright); kprintf("%s", harvard_copyright);
kprintf("\n"); kprintf("\n");
kprintf("Put-your-group-name-here's system version %s (%s #%d)\n", kprintf("Minh Tran's system version %s (%s #%d)\n", GROUP_VERSION,
GROUP_VERSION, buildconfig, buildversion); buildconfig, buildversion);
kprintf("\n"); kprintf("\n");
/* Early initialization. */ /* Early initialization. */
@ -143,10 +138,7 @@ boot(void)
/* /*
* Shutdown sequence. Opposite to boot(). * Shutdown sequence. Opposite to boot().
*/ */
static static void shutdown(void) {
void
shutdown(void)
{
kprintf("Shutting down.\n"); kprintf("Shutting down.\n");
@ -168,9 +160,7 @@ shutdown(void)
* not because this is where system call code should go. Other syscall * not because this is where system call code should go. Other syscall
* code should probably live in the "syscall" directory. * code should probably live in the "syscall" directory.
*/ */
int int sys_reboot(int code) {
sys_reboot(int code)
{
switch (code) { switch (code) {
case RB_REBOOT: case RB_REBOOT:
case RB_HALT: case RB_HALT:
@ -205,9 +195,7 @@ sys_reboot(int code)
* Kernel main. Boot up, then fork the menu thread; wait for a reboot * Kernel main. Boot up, then fork the menu thread; wait for a reboot
* request, and then shut down. * request, and then shut down.
*/ */
void void kmain(char *arguments) {
kmain(char *arguments)
{
boot(); boot();
menu(arguments); menu(arguments);

View File

@ -44,9 +44,7 @@
// //
// Semaphore. // Semaphore.
struct semaphore * struct semaphore *sem_create(const char *name, unsigned initial_count) {
sem_create(const char *name, unsigned initial_count)
{
struct semaphore *sem; struct semaphore *sem;
sem = kmalloc(sizeof(*sem)); sem = kmalloc(sizeof(*sem));
@ -73,9 +71,7 @@ sem_create(const char *name, unsigned initial_count)
return sem; return sem;
} }
void void sem_destroy(struct semaphore *sem) {
sem_destroy(struct semaphore *sem)
{
KASSERT(sem != NULL); KASSERT(sem != NULL);
/* wchan_cleanup will assert if anyone's waiting on it */ /* wchan_cleanup will assert if anyone's waiting on it */
@ -85,9 +81,7 @@ sem_destroy(struct semaphore *sem)
kfree(sem); kfree(sem);
} }
void void P(struct semaphore *sem) {
P(struct semaphore *sem)
{
KASSERT(sem != NULL); KASSERT(sem != NULL);
/* /*
@ -120,9 +114,7 @@ P(struct semaphore *sem)
spinlock_release(&sem->sem_lock); spinlock_release(&sem->sem_lock);
} }
void void V(struct semaphore *sem) {
V(struct semaphore *sem)
{
KASSERT(sem != NULL); KASSERT(sem != NULL);
spinlock_acquire(&sem->sem_lock); spinlock_acquire(&sem->sem_lock);
@ -138,9 +130,7 @@ V(struct semaphore *sem)
// //
// Lock. // Lock.
struct lock * struct lock *lock_create(const char *name) {
lock_create(const char *name)
{
struct lock *lock; struct lock *lock;
lock = kmalloc(sizeof(*lock)); lock = kmalloc(sizeof(*lock));
@ -161,9 +151,7 @@ lock_create(const char *name)
return lock; return lock;
} }
void void lock_destroy(struct lock *lock) {
lock_destroy(struct lock *lock)
{
KASSERT(lock != NULL); KASSERT(lock != NULL);
// add stuff here as needed // add stuff here as needed
@ -172,9 +160,7 @@ lock_destroy(struct lock *lock)
kfree(lock); kfree(lock);
} }
void void lock_acquire(struct lock *lock) {
lock_acquire(struct lock *lock)
{
/* Call this (atomically) before waiting for a lock */ /* Call this (atomically) before waiting for a lock */
// HANGMAN_WAIT(&curthread->t_hangman, &lock->lk_hangman); // HANGMAN_WAIT(&curthread->t_hangman, &lock->lk_hangman);
@ -186,9 +172,7 @@ lock_acquire(struct lock *lock)
// HANGMAN_ACQUIRE(&curthread->t_hangman, &lock->lk_hangman); // HANGMAN_ACQUIRE(&curthread->t_hangman, &lock->lk_hangman);
} }
void void lock_release(struct lock *lock) {
lock_release(struct lock *lock)
{
/* Call this (atomically) when the lock is released */ /* Call this (atomically) when the lock is released */
// HANGMAN_RELEASE(&curthread->t_hangman, &lock->lk_hangman); // HANGMAN_RELEASE(&curthread->t_hangman, &lock->lk_hangman);
@ -197,9 +181,7 @@ lock_release(struct lock *lock)
(void)lock; // suppress warning until code gets written (void)lock; // suppress warning until code gets written
} }
bool bool lock_do_i_hold(struct lock *lock) {
lock_do_i_hold(struct lock *lock)
{
// Write this // Write this
(void)lock; // suppress warning until code gets written (void)lock; // suppress warning until code gets written
@ -211,10 +193,7 @@ lock_do_i_hold(struct lock *lock)
// //
// CV // CV
struct cv *cv_create(const char *name) {
struct cv *
cv_create(const char *name)
{
struct cv *cv; struct cv *cv;
cv = kmalloc(sizeof(*cv)); cv = kmalloc(sizeof(*cv));
@ -233,9 +212,7 @@ cv_create(const char *name)
return cv; return cv;
} }
void void cv_destroy(struct cv *cv) {
cv_destroy(struct cv *cv)
{
KASSERT(cv != NULL); KASSERT(cv != NULL);
// add stuff here as needed // add stuff here as needed
@ -244,25 +221,19 @@ cv_destroy(struct cv *cv)
kfree(cv); kfree(cv);
} }
void void cv_wait(struct cv *cv, struct lock *lock) {
cv_wait(struct cv *cv, struct lock *lock)
{
// Write this // Write this
(void)cv; // suppress warning until code gets written (void)cv; // suppress warning until code gets written
(void)lock; // suppress warning until code gets written (void)lock; // suppress warning until code gets written
} }
void void cv_signal(struct cv *cv, struct lock *lock) {
cv_signal(struct cv *cv, struct lock *lock)
{
// Write this // Write this
(void)cv; // suppress warning until code gets written (void)cv; // suppress warning until code gets written
(void)lock; // suppress warning until code gets written (void)lock; // suppress warning until code gets written
} }
void void cv_broadcast(struct cv *cv, struct lock *lock) {
cv_broadcast(struct cv *cv, struct lock *lock)
{
// Write this // Write this
(void)cv; // suppress warning until code gets written (void)cv; // suppress warning until code gets written
(void)lock; // suppress warning until code gets written (void)lock; // suppress warning until code gets written

View File

@ -51,7 +51,6 @@
#include <mainbus.h> #include <mainbus.h>
#include <vnode.h> #include <vnode.h>
/* Magic number used as a guard value on kernel thread stacks. */ /* Magic number used as a guard value on kernel thread stacks. */
#define THREAD_STACK_MAGIC 0xbaadf00d #define THREAD_STACK_MAGIC 0xbaadf00d
@ -76,10 +75,7 @@ static struct semaphore *cpu_startup_sem;
* (sometimes) catch kernel stack overflows. Use thread_checkstack() * (sometimes) catch kernel stack overflows. Use thread_checkstack()
* to test this. * to test this.
*/ */
static static void thread_checkstack_init(struct thread *thread) {
void
thread_checkstack_init(struct thread *thread)
{
((uint32_t *)thread->t_stack)[0] = THREAD_STACK_MAGIC; ((uint32_t *)thread->t_stack)[0] = THREAD_STACK_MAGIC;
((uint32_t *)thread->t_stack)[1] = THREAD_STACK_MAGIC; ((uint32_t *)thread->t_stack)[1] = THREAD_STACK_MAGIC;
((uint32_t *)thread->t_stack)[2] = THREAD_STACK_MAGIC; ((uint32_t *)thread->t_stack)[2] = THREAD_STACK_MAGIC;
@ -96,10 +92,7 @@ thread_checkstack_init(struct thread *thread)
* cannot be freed (which in turn is the case if the stack is the boot * cannot be freed (which in turn is the case if the stack is the boot
* stack, and the thread is the boot thread) this doesn't do anything. * stack, and the thread is the boot thread) this doesn't do anything.
*/ */
static static void thread_checkstack(struct thread *thread) {
void
thread_checkstack(struct thread *thread)
{
if (thread->t_stack != NULL) { if (thread->t_stack != NULL) {
KASSERT(((uint32_t *)thread->t_stack)[0] == THREAD_STACK_MAGIC); KASSERT(((uint32_t *)thread->t_stack)[0] == THREAD_STACK_MAGIC);
KASSERT(((uint32_t *)thread->t_stack)[1] == THREAD_STACK_MAGIC); KASSERT(((uint32_t *)thread->t_stack)[1] == THREAD_STACK_MAGIC);
@ -112,10 +105,7 @@ thread_checkstack(struct thread *thread)
* Create a thread. This is used both to create a first thread * Create a thread. This is used both to create a first thread
* for each CPU and to create subsequent forked threads. * for each CPU and to create subsequent forked threads.
*/ */
static static struct thread *thread_create(const char *name) {
struct thread *
thread_create(const char *name)
{
struct thread *thread; struct thread *thread;
DEBUGASSERT(name != NULL); DEBUGASSERT(name != NULL);
@ -160,9 +150,7 @@ thread_create(const char *name)
* board config or whatnot) is tracked separately because it is not * board config or whatnot) is tracked separately because it is not
* necessarily anything sane or meaningful. * necessarily anything sane or meaningful.
*/ */
struct cpu * struct cpu *cpu_create(unsigned hardware_number) {
cpu_create(unsigned hardware_number)
{
struct cpu *c; struct cpu *c;
int result; int result;
char namebuf[16]; char namebuf[16];
@ -208,8 +196,7 @@ cpu_create(unsigned hardware_number)
* make it possible to free the boot stack?) * make it possible to free the boot stack?)
*/ */
/*c->c_curthread->t_stack = ... */ /*c->c_curthread->t_stack = ... */
} } else {
else {
c->c_curthread->t_stack = kmalloc(STACK_SIZE); c->c_curthread->t_stack = kmalloc(STACK_SIZE);
if (c->c_curthread->t_stack == NULL) { if (c->c_curthread->t_stack == NULL) {
panic("cpu_create: couldn't allocate stack"); panic("cpu_create: couldn't allocate stack");
@ -260,10 +247,7 @@ cpu_create(unsigned hardware_number)
* *
* (Freeing the stack you're actually using to run is ... inadvisable.) * (Freeing the stack you're actually using to run is ... inadvisable.)
*/ */
static static void thread_destroy(struct thread *thread) {
void
thread_destroy(struct thread *thread)
{
KASSERT(thread != curthread); KASSERT(thread != curthread);
KASSERT(thread->t_state != S_RUN); KASSERT(thread->t_state != S_RUN);
@ -293,10 +277,7 @@ thread_destroy(struct thread *thread)
* *
* The list of zombies is per-cpu. * The list of zombies is per-cpu.
*/ */
static static void exorcise(void) {
void
exorcise(void)
{
struct thread *z; struct thread *z;
while ((z = threadlist_remhead(&curcpu->c_zombies)) != NULL) { while ((z = threadlist_remhead(&curcpu->c_zombies)) != NULL) {
@ -311,9 +292,7 @@ exorcise(void)
* possible) to make sure we don't end up letting any other threads * possible) to make sure we don't end up letting any other threads
* run. * run.
*/ */
void void thread_panic(void) {
thread_panic(void)
{
/* /*
* Kill off other CPUs. * Kill off other CPUs.
* *
@ -353,9 +332,7 @@ thread_panic(void)
/* /*
* At system shutdown, ask the other CPUs to switch off. * At system shutdown, ask the other CPUs to switch off.
*/ */
void void thread_shutdown(void) {
thread_shutdown(void)
{
/* /*
* Stop the other CPUs. * Stop the other CPUs.
* *
@ -368,9 +345,7 @@ thread_shutdown(void)
/* /*
* Thread system initialization. * Thread system initialization.
*/ */
void void thread_bootstrap(void) {
thread_bootstrap(void)
{
cpuarray_init(&allcpus); cpuarray_init(&allcpus);
/* /*
@ -402,9 +377,7 @@ thread_bootstrap(void)
* to do anything. The startup thread can just exit; we only need it * to do anything. The startup thread can just exit; we only need it
* to be able to get into thread_switch() properly. * to be able to get into thread_switch() properly.
*/ */
void void cpu_hatch(unsigned software_number) {
cpu_hatch(unsigned software_number)
{
char buf[64]; char buf[64];
KASSERT(curcpu != NULL); KASSERT(curcpu != NULL);
@ -423,9 +396,7 @@ cpu_hatch(unsigned software_number)
/* /*
* Start up secondary cpus. Called from boot(). * Start up secondary cpus. Called from boot().
*/ */
void void thread_start_cpus(void) {
thread_start_cpus(void)
{
char buf[64]; char buf[64];
unsigned i; unsigned i;
@ -447,10 +418,8 @@ thread_start_cpus(void)
* *
* targetcpu might be curcpu; it might not be, too. * targetcpu might be curcpu; it might not be, too.
*/ */
static static void thread_make_runnable(struct thread *target,
void bool already_have_lock) {
thread_make_runnable(struct thread *target, bool already_have_lock)
{
struct cpu *targetcpu; struct cpu *targetcpu;
/* Lock the run queue of the target thread's cpu. */ /* Lock the run queue of the target thread's cpu. */
@ -459,8 +428,7 @@ thread_make_runnable(struct thread *target, bool already_have_lock)
if (already_have_lock) { if (already_have_lock) {
/* The target thread's cpu should be already locked. */ /* The target thread's cpu should be already locked. */
KASSERT(spinlock_do_i_hold(&targetcpu->c_runqueue_lock)); KASSERT(spinlock_do_i_hold(&targetcpu->c_runqueue_lock));
} } else {
else {
spinlock_acquire(&targetcpu->c_runqueue_lock); spinlock_acquire(&targetcpu->c_runqueue_lock);
} }
@ -491,12 +459,9 @@ thread_make_runnable(struct thread *target, bool already_have_lock)
* process is inherited from the caller. It will start on the same CPU * process is inherited from the caller. It will start on the same CPU
* as the caller, unless the scheduler intervenes first. * as the caller, unless the scheduler intervenes first.
*/ */
int int thread_fork(const char *name, struct proc *proc,
thread_fork(const char *name,
struct proc *proc,
void (*entrypoint)(void *data1, unsigned long data2), void (*entrypoint)(void *data1, unsigned long data2),
void *data1, unsigned long data2) void *data1, unsigned long data2) {
{
struct thread *newthread; struct thread *newthread;
int result; int result;
@ -557,10 +522,8 @@ thread_fork(const char *name,
* WC, protected by the spinlock LK. Otherwise WC and Lk should be * WC, protected by the spinlock LK. Otherwise WC and Lk should be
* NULL. * NULL.
*/ */
static static void thread_switch(threadstate_t newstate, struct wchan *wc,
void struct spinlock *lk) {
thread_switch(threadstate_t newstate, struct wchan *wc, struct spinlock *lk)
{
struct thread *cur, *next; struct thread *cur, *next;
int spl; int spl;
@ -709,7 +672,6 @@ thread_switch(threadstate_t newstate, struct wchan *wc, struct spinlock *lk)
* thread_startup. * thread_startup.
*/ */
/* Clear the wait channel and set the thread state. */ /* Clear the wait channel and set the thread state. */
cur->t_wchan_name = NULL; cur->t_wchan_name = NULL;
cur->t_state = S_RUN; cur->t_state = S_RUN;
@ -735,10 +697,8 @@ thread_switch(threadstate_t newstate, struct wchan *wc, struct spinlock *lk)
* thread_switch, the beginning part of this function must match the * thread_switch, the beginning part of this function must match the
* tail of thread_switch. * tail of thread_switch.
*/ */
void void thread_startup(void (*entrypoint)(void *data1, unsigned long data2),
thread_startup(void (*entrypoint)(void *data1, unsigned long data2), void *data1, unsigned long data2) {
void *data1, unsigned long data2)
{
struct thread *cur; struct thread *cur;
cur = curthread; cur = curthread;
@ -775,9 +735,7 @@ thread_startup(void (*entrypoint)(void *data1, unsigned long data2),
* *
* Does not return. * Does not return.
*/ */
void void thread_exit(void) {
thread_exit(void)
{
struct thread *cur; struct thread *cur;
cur = curthread; cur = curthread;
@ -803,11 +761,7 @@ thread_exit(void)
/* /*
* Yield the cpu to another process, but stay runnable. * Yield the cpu to another process, but stay runnable.
*/ */
void void thread_yield(void) { thread_switch(S_READY, NULL, NULL); }
thread_yield(void)
{
thread_switch(S_READY, NULL, NULL);
}
//////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////
@ -818,9 +772,7 @@ thread_yield(void)
* the current CPU's run queue by job priority. * the current CPU's run queue by job priority.
*/ */
void void schedule(void) {
schedule(void)
{
/* /*
* You can write this. If we do nothing, threads will run in * You can write this. If we do nothing, threads will run in
* round-robin fashion. * round-robin fashion.
@ -844,9 +796,7 @@ schedule(void)
* System/161 does not (yet) model such cache effects, we'll be very * System/161 does not (yet) model such cache effects, we'll be very
* aggressive. * aggressive.
*/ */
void void thread_consider_migration(void) {
thread_consider_migration(void)
{
unsigned my_count, total_count, one_share, to_send; unsigned my_count, total_count, one_share, to_send;
unsigned i, numcpus; unsigned i, numcpus;
struct cpu *c; struct cpu *c;
@ -917,9 +867,8 @@ thread_consider_migration(void)
t->t_cpu = c; t->t_cpu = c;
threadlist_addtail(&c->c_runqueue, t); threadlist_addtail(&c->c_runqueue, t);
DEBUG(DB_THREADS, DEBUG(DB_THREADS, "Migrated thread %s: cpu %u -> %u", t->t_name,
"Migrated thread %s: cpu %u -> %u", curcpu->c_number, c->c_number);
t->t_name, curcpu->c_number, c->c_number);
to_send--; to_send--;
if (c->c_isidle) { if (c->c_isidle) {
/* /*
@ -963,9 +912,7 @@ thread_consider_migration(void)
* arrangements should be made to free it after the wait channel is * arrangements should be made to free it after the wait channel is
* destroyed. * destroyed.
*/ */
struct wchan * struct wchan *wchan_create(const char *name) {
wchan_create(const char *name)
{
struct wchan *wc; struct wchan *wc;
wc = kmalloc(sizeof(*wc)); wc = kmalloc(sizeof(*wc));
@ -982,9 +929,7 @@ wchan_create(const char *name)
* Destroy a wait channel. Must be empty and unlocked. * Destroy a wait channel. Must be empty and unlocked.
* (The corresponding cleanup functions require this.) * (The corresponding cleanup functions require this.)
*/ */
void void wchan_destroy(struct wchan *wc) {
wchan_destroy(struct wchan *wc)
{
threadlist_cleanup(&wc->wc_threads); threadlist_cleanup(&wc->wc_threads);
kfree(wc); kfree(wc);
} }
@ -996,9 +941,7 @@ wchan_destroy(struct wchan *wc)
* be locked. The call to thread_switch unlocks it; we relock it * be locked. The call to thread_switch unlocks it; we relock it
* before returning. * before returning.
*/ */
void void wchan_sleep(struct wchan *wc, struct spinlock *lk) {
wchan_sleep(struct wchan *wc, struct spinlock *lk)
{
/* may not sleep in an interrupt handler */ /* may not sleep in an interrupt handler */
KASSERT(!curthread->t_in_interrupt); KASSERT(!curthread->t_in_interrupt);
@ -1015,9 +958,7 @@ wchan_sleep(struct wchan *wc, struct spinlock *lk)
/* /*
* Wake up one thread sleeping on a wait channel. * Wake up one thread sleeping on a wait channel.
*/ */
void void wchan_wakeone(struct wchan *wc, struct spinlock *lk) {
wchan_wakeone(struct wchan *wc, struct spinlock *lk)
{
struct thread *target; struct thread *target;
KASSERT(spinlock_do_i_hold(lk)); KASSERT(spinlock_do_i_hold(lk));
@ -1044,9 +985,7 @@ wchan_wakeone(struct wchan *wc, struct spinlock *lk)
/* /*
* Wake up all threads sleeping on a wait channel. * Wake up all threads sleeping on a wait channel.
*/ */
void void wchan_wakeall(struct wchan *wc, struct spinlock *lk) {
wchan_wakeall(struct wchan *wc, struct spinlock *lk)
{
struct thread *target; struct thread *target;
struct threadlist list; struct threadlist list;
@ -1078,9 +1017,7 @@ wchan_wakeall(struct wchan *wc, struct spinlock *lk)
* Return nonzero if there are no threads sleeping on the channel. * Return nonzero if there are no threads sleeping on the channel.
* This is meant to be used only for diagnostic purposes. * This is meant to be used only for diagnostic purposes.
*/ */
bool bool wchan_isempty(struct wchan *wc, struct spinlock *lk) {
wchan_isempty(struct wchan *wc, struct spinlock *lk)
{
bool ret; bool ret;
KASSERT(spinlock_do_i_hold(lk)); KASSERT(spinlock_do_i_hold(lk));
@ -1098,9 +1035,7 @@ wchan_isempty(struct wchan *wc, struct spinlock *lk)
/* /*
* Send an IPI (inter-processor interrupt) to the specified CPU. * Send an IPI (inter-processor interrupt) to the specified CPU.
*/ */
void void ipi_send(struct cpu *target, int code) {
ipi_send(struct cpu *target, int code)
{
KASSERT(code >= 0 && code < 32); KASSERT(code >= 0 && code < 32);
spinlock_acquire(&target->c_ipi_lock); spinlock_acquire(&target->c_ipi_lock);
@ -1112,9 +1047,7 @@ ipi_send(struct cpu *target, int code)
/* /*
* Send an IPI to all CPUs. * Send an IPI to all CPUs.
*/ */
void void ipi_broadcast(int code) {
ipi_broadcast(int code)
{
unsigned i; unsigned i;
struct cpu *c; struct cpu *c;
@ -1129,9 +1062,7 @@ ipi_broadcast(int code)
/* /*
* Send a TLB shootdown IPI to the specified CPU. * Send a TLB shootdown IPI to the specified CPU.
*/ */
void void ipi_tlbshootdown(struct cpu *target, const struct tlbshootdown *mapping) {
ipi_tlbshootdown(struct cpu *target, const struct tlbshootdown *mapping)
{
unsigned n; unsigned n;
spinlock_acquire(&target->c_ipi_lock); spinlock_acquire(&target->c_ipi_lock);
@ -1148,8 +1079,7 @@ ipi_tlbshootdown(struct cpu *target, const struct tlbshootdown *mapping)
* reduce the number of unnecessary shootdowns. * reduce the number of unnecessary shootdowns.
*/ */
panic("ipi_tlbshootdown: Too many shootdowns queued\n"); panic("ipi_tlbshootdown: Too many shootdowns queued\n");
} } else {
else {
target->c_shootdown[n] = *mapping; target->c_shootdown[n] = *mapping;
target->c_numshootdown = n + 1; target->c_numshootdown = n + 1;
} }
@ -1163,9 +1093,7 @@ ipi_tlbshootdown(struct cpu *target, const struct tlbshootdown *mapping)
/* /*
* Handle an incoming interprocessor interrupt. * Handle an incoming interprocessor interrupt.
*/ */
void void interprocessor_interrupt(void) {
interprocessor_interrupt(void)
{
uint32_t bits; uint32_t bits;
unsigned i; unsigned i;
@ -1182,8 +1110,7 @@ interprocessor_interrupt(void)
spinlock_release(&curcpu->c_ipi_lock); spinlock_release(&curcpu->c_ipi_lock);
spinlock_acquire(&curcpu->c_runqueue_lock); spinlock_acquire(&curcpu->c_runqueue_lock);
if (!curcpu->c_isidle) { if (!curcpu->c_isidle) {
kprintf("cpu%d: offline: warning: not idle\n", kprintf("cpu%d: offline: warning: not idle\n", curcpu->c_number);
curcpu->c_number);
} }
spinlock_release(&curcpu->c_runqueue_lock); spinlock_release(&curcpu->c_runqueue_lock);
kprintf("cpu%d: offline.\n", curcpu->c_number); kprintf("cpu%d: offline.\n", curcpu->c_number);

View File

@ -36,14 +36,10 @@
* Kernel malloc. * Kernel malloc.
*/ */
/* /*
* Fill a block with 0xdeadbeef. * Fill a block with 0xdeadbeef.
*/ */
static static void fill_deadbeef(void *vptr, size_t len) {
void
fill_deadbeef(void *vptr, size_t len)
{
uint32_t *ptr = vptr; uint32_t *ptr = vptr;
size_t i; size_t i;
@ -215,10 +211,7 @@ static struct kheap_root kheaproots[NUM_PAGEREFPAGES];
/* /*
* Allocate a page to hold pagerefs. * Allocate a page to hold pagerefs.
*/ */
static static void allocpagerefpage(struct kheap_root *root) {
void
allocpagerefpage(struct kheap_root *root)
{
vaddr_t va; vaddr_t va;
KASSERT(root->page == NULL); KASSERT(root->page == NULL);
@ -253,10 +246,7 @@ allocpagerefpage(struct kheap_root *root)
/* /*
* Allocate a pageref structure. * Allocate a pageref structure.
*/ */
static static struct pageref *allocpageref(void) {
struct pageref *
allocpageref(void)
{
unsigned i, j; unsigned i, j;
uint32_t k; uint32_t k;
unsigned whichroot; unsigned whichroot;
@ -300,10 +290,7 @@ allocpageref(void)
/* /*
* Release a pageref structure. * Release a pageref structure.
*/ */
static static void freepageref(struct pageref *p) {
void
freepageref(struct pageref *p)
{
size_t i, j; size_t i, j;
uint32_t k; uint32_t k;
unsigned whichroot; unsigned whichroot;
@ -365,10 +352,8 @@ static struct pageref *allbase;
/* /*
* Set up the guard values in a block we're about to return. * Set up the guard values in a block we're about to return.
*/ */
static static void *establishguardband(void *block, size_t clientsize,
void * size_t blocksize) {
establishguardband(void *block, size_t clientsize, size_t blocksize)
{
vaddr_t lowguard, lowsize, data, enddata, highguard, highsize, i; vaddr_t lowguard, lowsize, data, enddata, highguard, highsize, i;
KASSERT(clientsize + GUARD_OVERHEAD <= blocksize); KASSERT(clientsize + GUARD_OVERHEAD <= blocksize);
@ -398,10 +383,8 @@ establishguardband(void *block, size_t clientsize, size_t blocksize)
/* /*
* Validate the guard values in an existing block. * Validate the guard values in an existing block.
*/ */
static static void checkguardband(vaddr_t blockaddr, size_t smallerblocksize,
void size_t blocksize) {
checkguardband(vaddr_t blockaddr, size_t smallerblocksize, size_t blocksize)
{
/* /*
* The first two bytes of the block are the lower guard band. * The first two bytes of the block are the lower guard band.
* The next two bytes are the real size (the size of the * The next two bytes are the real size (the size of the
@ -457,10 +440,7 @@ checkguardband(vaddr_t blockaddr, size_t smallerblocksize, size_t blocksize)
* The first word of the block is a freelist pointer and should not be * The first word of the block is a freelist pointer and should not be
* deadbeef; the rest of the block should be only deadbeef. * deadbeef; the rest of the block should be only deadbeef.
*/ */
static static void checkdeadbeef(void *block, size_t blocksize) {
void
checkdeadbeef(void *block, size_t blocksize)
{
uint32_t *ptr = block; uint32_t *ptr = block;
size_t i; size_t i;
@ -490,10 +470,7 @@ checkdeadbeef(void *block, size_t blocksize)
* assertion as a bit in isfree is set twice; if not, a circular * assertion as a bit in isfree is set twice; if not, a circular
* freelist will cause an infinite loop. * freelist will cause an infinite loop.
*/ */
static static void checksubpage(struct pageref *pr) {
void
checksubpage(struct pageref *pr)
{
vaddr_t prpage, fla; vaddr_t prpage, fla;
struct freelist *fl; struct freelist *fl;
int blktype; int blktype;
@ -560,8 +537,7 @@ checksubpage(struct pageref *pr)
for (i = 0; i < numblocks; i++) { for (i = 0; i < numblocks; i++) {
mask = 1U << (i % 32); mask = 1U << (i % 32);
if ((isfree[i / 32] & mask) == 0) { if ((isfree[i / 32] & mask) == 0) {
checkguardband(prpage + i * blocksize, checkguardband(prpage + i * blocksize, smallerblocksize, blocksize);
smallerblocksize, blocksize);
} }
} }
#endif #endif
@ -575,10 +551,7 @@ checksubpage(struct pageref *pr)
* Run checksubpage on all heap pages. This also checks that the * Run checksubpage on all heap pages. This also checks that the
* linked lists of pagerefs are more or less intact. * linked lists of pagerefs are more or less intact.
*/ */
static static void checksubpages(void) {
void
checksubpages(void)
{
struct pageref *pr; struct pageref *pr;
int i; int i;
unsigned sc = 0, ac = 0; unsigned sc = 0, ac = 0;
@ -622,10 +595,7 @@ static unsigned mallocgeneration;
/* /*
* Label a block of memory. * Label a block of memory.
*/ */
static static void *establishlabel(void *block, vaddr_t label) {
void *
establishlabel(void *block, vaddr_t label)
{
struct malloclabel *ml; struct malloclabel *ml;
ml = block; ml = block;
@ -635,10 +605,7 @@ establishlabel(void *block, vaddr_t label)
return ml; return ml;
} }
static static void dump_subpage(struct pageref *pr, unsigned generation) {
void
dump_subpage(struct pageref *pr, unsigned generation)
{
unsigned blocksize = sizes[PR_BLOCKTYPE(pr)]; unsigned blocksize = sizes[PR_BLOCKTYPE(pr)];
unsigned numblocks = PAGE_SIZE / blocksize; unsigned numblocks = PAGE_SIZE / blocksize;
unsigned numfreewords = DIVROUNDUP(numblocks, 32); unsigned numfreewords = DIVROUNDUP(numblocks, 32);
@ -671,15 +638,12 @@ dump_subpage(struct pageref *pr, unsigned generation)
if (ml->generation != generation) { if (ml->generation != generation) {
continue; continue;
} }
kprintf("%5zu bytes at %p, allocated at %p\n", kprintf("%5zu bytes at %p, allocated at %p\n", blocksize, (void *)blockaddr,
blocksize, (void *)blockaddr, (void *)ml->label); (void *)ml->label);
} }
} }
static static void dump_subpages(unsigned generation) {
void
dump_subpages(unsigned generation)
{
struct pageref *pr; struct pageref *pr;
int i; int i;
@ -697,9 +661,7 @@ dump_subpages(unsigned generation)
#endif /* LABELS */ #endif /* LABELS */
void void kheap_nextgeneration(void) {
kheap_nextgeneration(void)
{
#ifdef LABELS #ifdef LABELS
spinlock_acquire(&kmalloc_spinlock); spinlock_acquire(&kmalloc_spinlock);
mallocgeneration++; mallocgeneration++;
@ -707,9 +669,7 @@ kheap_nextgeneration(void)
#endif #endif
} }
void void kheap_dump(void) {
kheap_dump(void)
{
#ifdef LABELS #ifdef LABELS
/* print the whole thing with interrupts off */ /* print the whole thing with interrupts off */
spinlock_acquire(&kmalloc_spinlock); spinlock_acquire(&kmalloc_spinlock);
@ -720,9 +680,7 @@ kheap_dump(void)
#endif #endif
} }
void void kheap_dumpall(void) {
kheap_dumpall(void)
{
#ifdef LABELS #ifdef LABELS
unsigned i; unsigned i;
@ -742,10 +700,7 @@ kheap_dumpall(void)
/* /*
* Print the allocated/freed map of a single kernel heap page. * Print the allocated/freed map of a single kernel heap page.
*/ */
static static void subpage_stats(struct pageref *pr) {
void
subpage_stats(struct pageref *pr)
{
vaddr_t prpage, fla; vaddr_t prpage, fla;
struct freelist *fl; struct freelist *fl;
int blktype; int blktype;
@ -780,9 +735,8 @@ subpage_stats(struct pageref *pr)
} }
} }
kprintf("at 0x%08lx: size %-4lu %u/%u free\n", kprintf("at 0x%08lx: size %-4lu %u/%u free\n", (unsigned long)prpage,
(unsigned long)prpage, (unsigned long) sizes[blktype], (unsigned long)sizes[blktype], (unsigned)pr->nfree, n);
(unsigned) pr->nfree, n);
kprintf(" "); kprintf(" ");
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
int val = (freemap[i / 32] & (1 << (i % 32))) != 0; int val = (freemap[i / 32] & (1 << (i % 32))) != 0;
@ -797,9 +751,7 @@ subpage_stats(struct pageref *pr)
/* /*
* Print the whole heap. * Print the whole heap.
*/ */
void void kheap_printstats(void) {
kheap_printstats(void)
{
struct pageref *pr; struct pageref *pr;
/* print the whole thing with interrupts off */ /* print the whole thing with interrupts off */
@ -819,10 +771,7 @@ kheap_printstats(void)
/* /*
* Remove a pageref from both lists that it's on. * Remove a pageref from both lists that it's on.
*/ */
static static void remove_lists(struct pageref *pr, int blktype) {
void
remove_lists(struct pageref *pr, int blktype)
{
struct pageref **guy; struct pageref **guy;
KASSERT(blktype >= 0 && blktype < NSIZES); KASSERT(blktype >= 0 && blktype < NSIZES);
@ -848,10 +797,7 @@ remove_lists(struct pageref *pr, int blktype)
* Given a requested client size, return the block type, that is, the * Given a requested client size, return the block type, that is, the
* index into the sizes[] array for the block size to use. * index into the sizes[] array for the block size to use.
*/ */
static static inline int blocktype(size_t clientsz) {
inline
int blocktype(size_t clientsz)
{
unsigned i; unsigned i;
for (i = 0; i < NSIZES; i++) { for (i = 0; i < NSIZES; i++) {
if (clientsz <= sizes[i]) { if (clientsz <= sizes[i]) {
@ -859,8 +805,7 @@ int blocktype(size_t clientsz)
} }
} }
panic("Subpage allocator cannot handle allocation of size %zu\n", panic("Subpage allocator cannot handle allocation of size %zu\n", clientsz);
clientsz);
// keep compiler happy // keep compiler happy
return 0; return 0;
@ -870,14 +815,12 @@ int blocktype(size_t clientsz)
* Allocate a block of size SZ, where SZ is not large enough to * Allocate a block of size SZ, where SZ is not large enough to
* warrant a whole-page allocation. * warrant a whole-page allocation.
*/ */
static static void *subpage_kmalloc(size_t sz
void *
subpage_kmalloc(size_t sz
#ifdef LABELS #ifdef LABELS
, vaddr_t label ,
vaddr_t label
#endif #endif
) ) {
{
unsigned blktype; // index into sizes[] that we're using unsigned blktype; // index into sizes[] that we're using
struct pageref *pr; // pageref for page we're allocating from struct pageref *pr; // pageref for page we're allocating from
vaddr_t prpage; // PR_PAGEADDR(pr) vaddr_t prpage; // PR_PAGEADDR(pr)
@ -935,8 +878,7 @@ subpage_kmalloc(size_t sz
fla = (vaddr_t)fl; fla = (vaddr_t)fl;
KASSERT(fla - prpage < PAGE_SIZE); KASSERT(fla - prpage < PAGE_SIZE);
pr->freelist_offset = fla - prpage; pr->freelist_offset = fla - prpage;
} } else {
else {
KASSERT(pr->nfree == 0); KASSERT(pr->nfree == 0);
pr->freelist_offset = INVALID_OFFSET; pr->freelist_offset = INVALID_OFFSET;
} }
@ -1021,10 +963,7 @@ subpage_kmalloc(size_t sz
* Free a pointer previously returned from subpage_kmalloc. If the * Free a pointer previously returned from subpage_kmalloc. If the
* pointer is not on any heap page we recognize, return -1. * pointer is not on any heap page we recognize, return -1.
*/ */
static static int subpage_kfree(void *ptr) {
int
subpage_kfree(void *ptr)
{
int blktype; // index into sizes[] that we're using int blktype; // index into sizes[] that we're using
vaddr_t ptraddr; // same as ptr vaddr_t ptraddr; // same as ptr
struct pageref *pr; // pageref for page we're freeing in struct pageref *pr; // pageref for page we're freeing in
@ -1145,8 +1084,7 @@ subpage_kfree(void *ptr)
/* Call free_kpages without kmalloc_spinlock. */ /* Call free_kpages without kmalloc_spinlock. */
spinlock_release(&kmalloc_spinlock); spinlock_release(&kmalloc_spinlock);
free_kpages(prpage); free_kpages(prpage);
} } else {
else {
spinlock_release(&kmalloc_spinlock); spinlock_release(&kmalloc_spinlock);
} }
@ -1166,9 +1104,7 @@ subpage_kfree(void *ptr)
* Allocate a block of size SZ. Redirect either to subpage_kmalloc or * Allocate a block of size SZ. Redirect either to subpage_kmalloc or
* alloc_kpages depending on how big SZ is. * alloc_kpages depending on how big SZ is.
*/ */
void * void *kmalloc(size_t sz) {
kmalloc(size_t sz)
{
size_t checksz; size_t checksz;
#ifdef LABELS #ifdef LABELS
vaddr_t label; vaddr_t label;
@ -1208,9 +1144,7 @@ kmalloc(size_t sz)
/* /*
* Free a block previously returned from kmalloc. * Free a block previously returned from kmalloc.
*/ */
void void kfree(void *ptr) {
kfree(void *ptr)
{
/* /*
* Try subpage first; if that fails, assume it's a big allocation. * Try subpage first; if that fails, assume it's a big allocation.
*/ */
@ -1221,4 +1155,3 @@ kfree(void *ptr)
free_kpages((vaddr_t)ptr); free_kpages((vaddr_t)ptr);
} }
} }