fix: format + dont track build folders
This commit is contained in:
parent
b77ad042ca
commit
c173f2b9b8
3
.gitignore
vendored
3
.gitignore
vendored
@ -1,2 +1,3 @@
|
|||||||
kern/compile/
|
kern/compile/
|
||||||
build/
|
build/*
|
||||||
|
**/build
|
||||||
|
@ -92,18 +92,9 @@ void spllower(int oldipl, int newipl);
|
|||||||
////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
SPL_INLINE
|
SPL_INLINE
|
||||||
int
|
int spl0(void) { return splx(IPL_NONE); }
|
||||||
spl0(void)
|
|
||||||
{
|
|
||||||
return splx(IPL_NONE);
|
|
||||||
}
|
|
||||||
|
|
||||||
SPL_INLINE
|
SPL_INLINE
|
||||||
int
|
int splhigh(void) { return splx(IPL_HIGH); }
|
||||||
splhigh(void)
|
|
||||||
{
|
|
||||||
return splx(IPL_HIGH);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* _SPL_H_ */
|
#endif /* _SPL_H_ */
|
||||||
|
@ -34,7 +34,6 @@
|
|||||||
* Header file for synchronization primitives.
|
* Header file for synchronization primitives.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
#include <spinlock.h>
|
#include <spinlock.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -62,7 +61,6 @@ void sem_destroy(struct semaphore *);
|
|||||||
void P(struct semaphore *);
|
void P(struct semaphore *);
|
||||||
void V(struct semaphore *);
|
void V(struct semaphore *);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Simple lock for mutual exclusion.
|
* Simple lock for mutual exclusion.
|
||||||
*
|
*
|
||||||
@ -97,7 +95,6 @@ void lock_acquire(struct lock *);
|
|||||||
void lock_release(struct lock *);
|
void lock_release(struct lock *);
|
||||||
bool lock_do_i_hold(struct lock *);
|
bool lock_do_i_hold(struct lock *);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Condition variable.
|
* Condition variable.
|
||||||
*
|
*
|
||||||
@ -138,5 +135,4 @@ void cv_wait(struct cv *cv, struct lock *lock);
|
|||||||
void cv_signal(struct cv *cv, struct lock *lock);
|
void cv_signal(struct cv *cv, struct lock *lock);
|
||||||
void cv_broadcast(struct cv *cv, struct lock *lock);
|
void cv_broadcast(struct cv *cv, struct lock *lock);
|
||||||
|
|
||||||
|
|
||||||
#endif /* _SYNCH_H_ */
|
#endif /* _SYNCH_H_ */
|
||||||
|
@ -40,7 +40,6 @@
|
|||||||
#include <vfs.h> // for vfs_sync()
|
#include <vfs.h> // for vfs_sync()
|
||||||
#include <lamebus/ltrace.h> // for ltrace_stop()
|
#include <lamebus/ltrace.h> // for ltrace_stop()
|
||||||
|
|
||||||
|
|
||||||
/* Flags word for DEBUG() macro. */
|
/* Flags word for DEBUG() macro. */
|
||||||
uint32_t dbflags = 0;
|
uint32_t dbflags = 0;
|
||||||
|
|
||||||
@ -50,20 +49,16 @@ static struct lock *kprintf_lock;
|
|||||||
/* Lock for polled kprintfs */
|
/* Lock for polled kprintfs */
|
||||||
static struct spinlock kprintf_spinlock;
|
static struct spinlock kprintf_spinlock;
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Warning: all this has to work from interrupt handlers and when
|
* Warning: all this has to work from interrupt handlers and when
|
||||||
* interrupts are disabled.
|
* interrupts are disabled.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create the kprintf lock. Must be called before creating a second
|
* Create the kprintf lock. Must be called before creating a second
|
||||||
* thread or enabling a second CPU.
|
* thread or enabling a second CPU.
|
||||||
*/
|
*/
|
||||||
void
|
void kprintf_bootstrap(void) {
|
||||||
kprintf_bootstrap(void)
|
|
||||||
{
|
|
||||||
KASSERT(kprintf_lock == NULL);
|
KASSERT(kprintf_lock == NULL);
|
||||||
|
|
||||||
kprintf_lock = lock_create("kprintf_lock");
|
kprintf_lock = lock_create("kprintf_lock");
|
||||||
@ -76,15 +71,12 @@ kprintf_bootstrap(void)
|
|||||||
/*
|
/*
|
||||||
* Send characters to the console. Backend for __printf.
|
* Send characters to the console. Backend for __printf.
|
||||||
*/
|
*/
|
||||||
static
|
static void console_send(void *junk, const char *data, size_t len) {
|
||||||
void
|
|
||||||
console_send(void *junk, const char *data, size_t len)
|
|
||||||
{
|
|
||||||
size_t i;
|
size_t i;
|
||||||
|
|
||||||
(void)junk;
|
(void)junk;
|
||||||
|
|
||||||
for (i=0; i<len; i++) {
|
for (i = 0; i < len; i++) {
|
||||||
putch(data[i]);
|
putch(data[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -92,22 +84,17 @@ console_send(void *junk, const char *data, size_t len)
|
|||||||
/*
|
/*
|
||||||
* Printf to the console.
|
* Printf to the console.
|
||||||
*/
|
*/
|
||||||
int
|
int kprintf(const char *fmt, ...) {
|
||||||
kprintf(const char *fmt, ...)
|
|
||||||
{
|
|
||||||
int chars;
|
int chars;
|
||||||
va_list ap;
|
va_list ap;
|
||||||
bool dolock;
|
bool dolock;
|
||||||
|
|
||||||
dolock = kprintf_lock != NULL
|
dolock = kprintf_lock != NULL && curthread->t_in_interrupt == false &&
|
||||||
&& curthread->t_in_interrupt == false
|
curthread->t_curspl == 0 && curcpu->c_spinlocks == 0;
|
||||||
&& curthread->t_curspl == 0
|
|
||||||
&& curcpu->c_spinlocks == 0;
|
|
||||||
|
|
||||||
if (dolock) {
|
if (dolock) {
|
||||||
lock_acquire(kprintf_lock);
|
lock_acquire(kprintf_lock);
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
spinlock_acquire(&kprintf_spinlock);
|
spinlock_acquire(&kprintf_spinlock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -117,8 +104,7 @@ kprintf(const char *fmt, ...)
|
|||||||
|
|
||||||
if (dolock) {
|
if (dolock) {
|
||||||
lock_release(kprintf_lock);
|
lock_release(kprintf_lock);
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
spinlock_release(&kprintf_spinlock);
|
spinlock_release(&kprintf_spinlock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -130,9 +116,7 @@ kprintf(const char *fmt, ...)
|
|||||||
* passed and then halts the system.
|
* passed and then halts the system.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void
|
void panic(const char *fmt, ...) {
|
||||||
panic(const char *fmt, ...)
|
|
||||||
{
|
|
||||||
va_list ap;
|
va_list ap;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -202,15 +186,13 @@ panic(const char *fmt, ...)
|
|||||||
* Last resort, just in case.
|
* Last resort, just in case.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
for (;;);
|
for (;;)
|
||||||
|
;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Assertion failures go through this.
|
* Assertion failures go through this.
|
||||||
*/
|
*/
|
||||||
void
|
void badassert(const char *expr, const char *file, int line, const char *func) {
|
||||||
badassert(const char *expr, const char *file, int line, const char *func)
|
panic("Assertion failed: %s, at %s:%d (%s)\n", expr, file, line, func);
|
||||||
{
|
|
||||||
panic("Assertion failed: %s, at %s:%d (%s)\n",
|
|
||||||
expr, file, line, func);
|
|
||||||
}
|
}
|
||||||
|
@ -51,7 +51,6 @@
|
|||||||
#include <version.h>
|
#include <version.h>
|
||||||
#include "autoconf.h" // for pseudoconfig
|
#include "autoconf.h" // for pseudoconfig
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* These two pieces of data are maintained by the makefiles and build system.
|
* These two pieces of data are maintained by the makefiles and build system.
|
||||||
* buildconfig is the name of the config file the kernel was configured with.
|
* buildconfig is the name of the config file the kernel was configured with.
|
||||||
@ -71,14 +70,10 @@ static const char harvard_copyright[] =
|
|||||||
"Copyright (c) 2000, 2001-2005, 2008-2011, 2013, 2014\n"
|
"Copyright (c) 2000, 2001-2005, 2008-2011, 2013, 2014\n"
|
||||||
" President and Fellows of Harvard College. All rights reserved.\n";
|
" President and Fellows of Harvard College. All rights reserved.\n";
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initial boot sequence.
|
* Initial boot sequence.
|
||||||
*/
|
*/
|
||||||
static
|
static void boot(void) {
|
||||||
void
|
|
||||||
boot(void)
|
|
||||||
{
|
|
||||||
/*
|
/*
|
||||||
* The order of these is important!
|
* The order of these is important!
|
||||||
* Don't go changing it without thinking about the consequences.
|
* Don't go changing it without thinking about the consequences.
|
||||||
@ -101,8 +96,8 @@ boot(void)
|
|||||||
kprintf("%s", harvard_copyright);
|
kprintf("%s", harvard_copyright);
|
||||||
kprintf("\n");
|
kprintf("\n");
|
||||||
|
|
||||||
kprintf("Put-your-group-name-here's system version %s (%s #%d)\n",
|
kprintf("Minh Tran's system version %s (%s #%d)\n", GROUP_VERSION,
|
||||||
GROUP_VERSION, buildconfig, buildversion);
|
buildconfig, buildversion);
|
||||||
kprintf("\n");
|
kprintf("\n");
|
||||||
|
|
||||||
/* Early initialization. */
|
/* Early initialization. */
|
||||||
@ -143,10 +138,7 @@ boot(void)
|
|||||||
/*
|
/*
|
||||||
* Shutdown sequence. Opposite to boot().
|
* Shutdown sequence. Opposite to boot().
|
||||||
*/
|
*/
|
||||||
static
|
static void shutdown(void) {
|
||||||
void
|
|
||||||
shutdown(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
kprintf("Shutting down.\n");
|
kprintf("Shutting down.\n");
|
||||||
|
|
||||||
@ -168,9 +160,7 @@ shutdown(void)
|
|||||||
* not because this is where system call code should go. Other syscall
|
* not because this is where system call code should go. Other syscall
|
||||||
* code should probably live in the "syscall" directory.
|
* code should probably live in the "syscall" directory.
|
||||||
*/
|
*/
|
||||||
int
|
int sys_reboot(int code) {
|
||||||
sys_reboot(int code)
|
|
||||||
{
|
|
||||||
switch (code) {
|
switch (code) {
|
||||||
case RB_REBOOT:
|
case RB_REBOOT:
|
||||||
case RB_HALT:
|
case RB_HALT:
|
||||||
@ -205,9 +195,7 @@ sys_reboot(int code)
|
|||||||
* Kernel main. Boot up, then fork the menu thread; wait for a reboot
|
* Kernel main. Boot up, then fork the menu thread; wait for a reboot
|
||||||
* request, and then shut down.
|
* request, and then shut down.
|
||||||
*/
|
*/
|
||||||
void
|
void kmain(char *arguments) {
|
||||||
kmain(char *arguments)
|
|
||||||
{
|
|
||||||
boot();
|
boot();
|
||||||
|
|
||||||
menu(arguments);
|
menu(arguments);
|
||||||
|
@ -44,9 +44,7 @@
|
|||||||
//
|
//
|
||||||
// Semaphore.
|
// Semaphore.
|
||||||
|
|
||||||
struct semaphore *
|
struct semaphore *sem_create(const char *name, unsigned initial_count) {
|
||||||
sem_create(const char *name, unsigned initial_count)
|
|
||||||
{
|
|
||||||
struct semaphore *sem;
|
struct semaphore *sem;
|
||||||
|
|
||||||
sem = kmalloc(sizeof(*sem));
|
sem = kmalloc(sizeof(*sem));
|
||||||
@ -73,9 +71,7 @@ sem_create(const char *name, unsigned initial_count)
|
|||||||
return sem;
|
return sem;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void sem_destroy(struct semaphore *sem) {
|
||||||
sem_destroy(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
KASSERT(sem != NULL);
|
KASSERT(sem != NULL);
|
||||||
|
|
||||||
/* wchan_cleanup will assert if anyone's waiting on it */
|
/* wchan_cleanup will assert if anyone's waiting on it */
|
||||||
@ -85,9 +81,7 @@ sem_destroy(struct semaphore *sem)
|
|||||||
kfree(sem);
|
kfree(sem);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void P(struct semaphore *sem) {
|
||||||
P(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
KASSERT(sem != NULL);
|
KASSERT(sem != NULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -120,9 +114,7 @@ P(struct semaphore *sem)
|
|||||||
spinlock_release(&sem->sem_lock);
|
spinlock_release(&sem->sem_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void V(struct semaphore *sem) {
|
||||||
V(struct semaphore *sem)
|
|
||||||
{
|
|
||||||
KASSERT(sem != NULL);
|
KASSERT(sem != NULL);
|
||||||
|
|
||||||
spinlock_acquire(&sem->sem_lock);
|
spinlock_acquire(&sem->sem_lock);
|
||||||
@ -138,9 +130,7 @@ V(struct semaphore *sem)
|
|||||||
//
|
//
|
||||||
// Lock.
|
// Lock.
|
||||||
|
|
||||||
struct lock *
|
struct lock *lock_create(const char *name) {
|
||||||
lock_create(const char *name)
|
|
||||||
{
|
|
||||||
struct lock *lock;
|
struct lock *lock;
|
||||||
|
|
||||||
lock = kmalloc(sizeof(*lock));
|
lock = kmalloc(sizeof(*lock));
|
||||||
@ -161,9 +151,7 @@ lock_create(const char *name)
|
|||||||
return lock;
|
return lock;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void lock_destroy(struct lock *lock) {
|
||||||
lock_destroy(struct lock *lock)
|
|
||||||
{
|
|
||||||
KASSERT(lock != NULL);
|
KASSERT(lock != NULL);
|
||||||
|
|
||||||
// add stuff here as needed
|
// add stuff here as needed
|
||||||
@ -172,34 +160,28 @@ lock_destroy(struct lock *lock)
|
|||||||
kfree(lock);
|
kfree(lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void lock_acquire(struct lock *lock) {
|
||||||
lock_acquire(struct lock *lock)
|
|
||||||
{
|
|
||||||
/* Call this (atomically) before waiting for a lock */
|
/* Call this (atomically) before waiting for a lock */
|
||||||
//HANGMAN_WAIT(&curthread->t_hangman, &lock->lk_hangman);
|
// HANGMAN_WAIT(&curthread->t_hangman, &lock->lk_hangman);
|
||||||
|
|
||||||
// Write this
|
// Write this
|
||||||
|
|
||||||
(void)lock; // suppress warning until code gets written
|
(void)lock; // suppress warning until code gets written
|
||||||
|
|
||||||
/* Call this (atomically) once the lock is acquired */
|
/* Call this (atomically) once the lock is acquired */
|
||||||
//HANGMAN_ACQUIRE(&curthread->t_hangman, &lock->lk_hangman);
|
// HANGMAN_ACQUIRE(&curthread->t_hangman, &lock->lk_hangman);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void lock_release(struct lock *lock) {
|
||||||
lock_release(struct lock *lock)
|
|
||||||
{
|
|
||||||
/* Call this (atomically) when the lock is released */
|
/* Call this (atomically) when the lock is released */
|
||||||
//HANGMAN_RELEASE(&curthread->t_hangman, &lock->lk_hangman);
|
// HANGMAN_RELEASE(&curthread->t_hangman, &lock->lk_hangman);
|
||||||
|
|
||||||
// Write this
|
// Write this
|
||||||
|
|
||||||
(void)lock; // suppress warning until code gets written
|
(void)lock; // suppress warning until code gets written
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool lock_do_i_hold(struct lock *lock) {
|
||||||
lock_do_i_hold(struct lock *lock)
|
|
||||||
{
|
|
||||||
// Write this
|
// Write this
|
||||||
|
|
||||||
(void)lock; // suppress warning until code gets written
|
(void)lock; // suppress warning until code gets written
|
||||||
@ -211,10 +193,7 @@ lock_do_i_hold(struct lock *lock)
|
|||||||
//
|
//
|
||||||
// CV
|
// CV
|
||||||
|
|
||||||
|
struct cv *cv_create(const char *name) {
|
||||||
struct cv *
|
|
||||||
cv_create(const char *name)
|
|
||||||
{
|
|
||||||
struct cv *cv;
|
struct cv *cv;
|
||||||
|
|
||||||
cv = kmalloc(sizeof(*cv));
|
cv = kmalloc(sizeof(*cv));
|
||||||
@ -223,7 +202,7 @@ cv_create(const char *name)
|
|||||||
}
|
}
|
||||||
|
|
||||||
cv->cv_name = kstrdup(name);
|
cv->cv_name = kstrdup(name);
|
||||||
if (cv->cv_name==NULL) {
|
if (cv->cv_name == NULL) {
|
||||||
kfree(cv);
|
kfree(cv);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -233,9 +212,7 @@ cv_create(const char *name)
|
|||||||
return cv;
|
return cv;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void cv_destroy(struct cv *cv) {
|
||||||
cv_destroy(struct cv *cv)
|
|
||||||
{
|
|
||||||
KASSERT(cv != NULL);
|
KASSERT(cv != NULL);
|
||||||
|
|
||||||
// add stuff here as needed
|
// add stuff here as needed
|
||||||
@ -244,25 +221,19 @@ cv_destroy(struct cv *cv)
|
|||||||
kfree(cv);
|
kfree(cv);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void cv_wait(struct cv *cv, struct lock *lock) {
|
||||||
cv_wait(struct cv *cv, struct lock *lock)
|
|
||||||
{
|
|
||||||
// Write this
|
// Write this
|
||||||
(void)cv; // suppress warning until code gets written
|
(void)cv; // suppress warning until code gets written
|
||||||
(void)lock; // suppress warning until code gets written
|
(void)lock; // suppress warning until code gets written
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void cv_signal(struct cv *cv, struct lock *lock) {
|
||||||
cv_signal(struct cv *cv, struct lock *lock)
|
|
||||||
{
|
|
||||||
// Write this
|
// Write this
|
||||||
(void)cv; // suppress warning until code gets written
|
(void)cv; // suppress warning until code gets written
|
||||||
(void)lock; // suppress warning until code gets written
|
(void)lock; // suppress warning until code gets written
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void cv_broadcast(struct cv *cv, struct lock *lock) {
|
||||||
cv_broadcast(struct cv *cv, struct lock *lock)
|
|
||||||
{
|
|
||||||
// Write this
|
// Write this
|
||||||
(void)cv; // suppress warning until code gets written
|
(void)cv; // suppress warning until code gets written
|
||||||
(void)lock; // suppress warning until code gets written
|
(void)lock; // suppress warning until code gets written
|
||||||
|
@ -51,7 +51,6 @@
|
|||||||
#include <mainbus.h>
|
#include <mainbus.h>
|
||||||
#include <vnode.h>
|
#include <vnode.h>
|
||||||
|
|
||||||
|
|
||||||
/* Magic number used as a guard value on kernel thread stacks. */
|
/* Magic number used as a guard value on kernel thread stacks. */
|
||||||
#define THREAD_STACK_MAGIC 0xbaadf00d
|
#define THREAD_STACK_MAGIC 0xbaadf00d
|
||||||
|
|
||||||
@ -76,10 +75,7 @@ static struct semaphore *cpu_startup_sem;
|
|||||||
* (sometimes) catch kernel stack overflows. Use thread_checkstack()
|
* (sometimes) catch kernel stack overflows. Use thread_checkstack()
|
||||||
* to test this.
|
* to test this.
|
||||||
*/
|
*/
|
||||||
static
|
static void thread_checkstack_init(struct thread *thread) {
|
||||||
void
|
|
||||||
thread_checkstack_init(struct thread *thread)
|
|
||||||
{
|
|
||||||
((uint32_t *)thread->t_stack)[0] = THREAD_STACK_MAGIC;
|
((uint32_t *)thread->t_stack)[0] = THREAD_STACK_MAGIC;
|
||||||
((uint32_t *)thread->t_stack)[1] = THREAD_STACK_MAGIC;
|
((uint32_t *)thread->t_stack)[1] = THREAD_STACK_MAGIC;
|
||||||
((uint32_t *)thread->t_stack)[2] = THREAD_STACK_MAGIC;
|
((uint32_t *)thread->t_stack)[2] = THREAD_STACK_MAGIC;
|
||||||
@ -96,15 +92,12 @@ thread_checkstack_init(struct thread *thread)
|
|||||||
* cannot be freed (which in turn is the case if the stack is the boot
|
* cannot be freed (which in turn is the case if the stack is the boot
|
||||||
* stack, and the thread is the boot thread) this doesn't do anything.
|
* stack, and the thread is the boot thread) this doesn't do anything.
|
||||||
*/
|
*/
|
||||||
static
|
static void thread_checkstack(struct thread *thread) {
|
||||||
void
|
|
||||||
thread_checkstack(struct thread *thread)
|
|
||||||
{
|
|
||||||
if (thread->t_stack != NULL) {
|
if (thread->t_stack != NULL) {
|
||||||
KASSERT(((uint32_t*)thread->t_stack)[0] == THREAD_STACK_MAGIC);
|
KASSERT(((uint32_t *)thread->t_stack)[0] == THREAD_STACK_MAGIC);
|
||||||
KASSERT(((uint32_t*)thread->t_stack)[1] == THREAD_STACK_MAGIC);
|
KASSERT(((uint32_t *)thread->t_stack)[1] == THREAD_STACK_MAGIC);
|
||||||
KASSERT(((uint32_t*)thread->t_stack)[2] == THREAD_STACK_MAGIC);
|
KASSERT(((uint32_t *)thread->t_stack)[2] == THREAD_STACK_MAGIC);
|
||||||
KASSERT(((uint32_t*)thread->t_stack)[3] == THREAD_STACK_MAGIC);
|
KASSERT(((uint32_t *)thread->t_stack)[3] == THREAD_STACK_MAGIC);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -112,10 +105,7 @@ thread_checkstack(struct thread *thread)
|
|||||||
* Create a thread. This is used both to create a first thread
|
* Create a thread. This is used both to create a first thread
|
||||||
* for each CPU and to create subsequent forked threads.
|
* for each CPU and to create subsequent forked threads.
|
||||||
*/
|
*/
|
||||||
static
|
static struct thread *thread_create(const char *name) {
|
||||||
struct thread *
|
|
||||||
thread_create(const char *name)
|
|
||||||
{
|
|
||||||
struct thread *thread;
|
struct thread *thread;
|
||||||
|
|
||||||
DEBUGASSERT(name != NULL);
|
DEBUGASSERT(name != NULL);
|
||||||
@ -160,9 +150,7 @@ thread_create(const char *name)
|
|||||||
* board config or whatnot) is tracked separately because it is not
|
* board config or whatnot) is tracked separately because it is not
|
||||||
* necessarily anything sane or meaningful.
|
* necessarily anything sane or meaningful.
|
||||||
*/
|
*/
|
||||||
struct cpu *
|
struct cpu *cpu_create(unsigned hardware_number) {
|
||||||
cpu_create(unsigned hardware_number)
|
|
||||||
{
|
|
||||||
struct cpu *c;
|
struct cpu *c;
|
||||||
int result;
|
int result;
|
||||||
char namebuf[16];
|
char namebuf[16];
|
||||||
@ -208,8 +196,7 @@ cpu_create(unsigned hardware_number)
|
|||||||
* make it possible to free the boot stack?)
|
* make it possible to free the boot stack?)
|
||||||
*/
|
*/
|
||||||
/*c->c_curthread->t_stack = ... */
|
/*c->c_curthread->t_stack = ... */
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
c->c_curthread->t_stack = kmalloc(STACK_SIZE);
|
c->c_curthread->t_stack = kmalloc(STACK_SIZE);
|
||||||
if (c->c_curthread->t_stack == NULL) {
|
if (c->c_curthread->t_stack == NULL) {
|
||||||
panic("cpu_create: couldn't allocate stack");
|
panic("cpu_create: couldn't allocate stack");
|
||||||
@ -260,10 +247,7 @@ cpu_create(unsigned hardware_number)
|
|||||||
*
|
*
|
||||||
* (Freeing the stack you're actually using to run is ... inadvisable.)
|
* (Freeing the stack you're actually using to run is ... inadvisable.)
|
||||||
*/
|
*/
|
||||||
static
|
static void thread_destroy(struct thread *thread) {
|
||||||
void
|
|
||||||
thread_destroy(struct thread *thread)
|
|
||||||
{
|
|
||||||
KASSERT(thread != curthread);
|
KASSERT(thread != curthread);
|
||||||
KASSERT(thread->t_state != S_RUN);
|
KASSERT(thread->t_state != S_RUN);
|
||||||
|
|
||||||
@ -293,10 +277,7 @@ thread_destroy(struct thread *thread)
|
|||||||
*
|
*
|
||||||
* The list of zombies is per-cpu.
|
* The list of zombies is per-cpu.
|
||||||
*/
|
*/
|
||||||
static
|
static void exorcise(void) {
|
||||||
void
|
|
||||||
exorcise(void)
|
|
||||||
{
|
|
||||||
struct thread *z;
|
struct thread *z;
|
||||||
|
|
||||||
while ((z = threadlist_remhead(&curcpu->c_zombies)) != NULL) {
|
while ((z = threadlist_remhead(&curcpu->c_zombies)) != NULL) {
|
||||||
@ -311,9 +292,7 @@ exorcise(void)
|
|||||||
* possible) to make sure we don't end up letting any other threads
|
* possible) to make sure we don't end up letting any other threads
|
||||||
* run.
|
* run.
|
||||||
*/
|
*/
|
||||||
void
|
void thread_panic(void) {
|
||||||
thread_panic(void)
|
|
||||||
{
|
|
||||||
/*
|
/*
|
||||||
* Kill off other CPUs.
|
* Kill off other CPUs.
|
||||||
*
|
*
|
||||||
@ -353,9 +332,7 @@ thread_panic(void)
|
|||||||
/*
|
/*
|
||||||
* At system shutdown, ask the other CPUs to switch off.
|
* At system shutdown, ask the other CPUs to switch off.
|
||||||
*/
|
*/
|
||||||
void
|
void thread_shutdown(void) {
|
||||||
thread_shutdown(void)
|
|
||||||
{
|
|
||||||
/*
|
/*
|
||||||
* Stop the other CPUs.
|
* Stop the other CPUs.
|
||||||
*
|
*
|
||||||
@ -368,9 +345,7 @@ thread_shutdown(void)
|
|||||||
/*
|
/*
|
||||||
* Thread system initialization.
|
* Thread system initialization.
|
||||||
*/
|
*/
|
||||||
void
|
void thread_bootstrap(void) {
|
||||||
thread_bootstrap(void)
|
|
||||||
{
|
|
||||||
cpuarray_init(&allcpus);
|
cpuarray_init(&allcpus);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -402,9 +377,7 @@ thread_bootstrap(void)
|
|||||||
* to do anything. The startup thread can just exit; we only need it
|
* to do anything. The startup thread can just exit; we only need it
|
||||||
* to be able to get into thread_switch() properly.
|
* to be able to get into thread_switch() properly.
|
||||||
*/
|
*/
|
||||||
void
|
void cpu_hatch(unsigned software_number) {
|
||||||
cpu_hatch(unsigned software_number)
|
|
||||||
{
|
|
||||||
char buf[64];
|
char buf[64];
|
||||||
|
|
||||||
KASSERT(curcpu != NULL);
|
KASSERT(curcpu != NULL);
|
||||||
@ -423,9 +396,7 @@ cpu_hatch(unsigned software_number)
|
|||||||
/*
|
/*
|
||||||
* Start up secondary cpus. Called from boot().
|
* Start up secondary cpus. Called from boot().
|
||||||
*/
|
*/
|
||||||
void
|
void thread_start_cpus(void) {
|
||||||
thread_start_cpus(void)
|
|
||||||
{
|
|
||||||
char buf[64];
|
char buf[64];
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
@ -435,7 +406,7 @@ thread_start_cpus(void)
|
|||||||
cpu_startup_sem = sem_create("cpu_hatch", 0);
|
cpu_startup_sem = sem_create("cpu_hatch", 0);
|
||||||
mainbus_start_cpus();
|
mainbus_start_cpus();
|
||||||
|
|
||||||
for (i=0; i<cpuarray_num(&allcpus) - 1; i++) {
|
for (i = 0; i < cpuarray_num(&allcpus) - 1; i++) {
|
||||||
P(cpu_startup_sem);
|
P(cpu_startup_sem);
|
||||||
}
|
}
|
||||||
sem_destroy(cpu_startup_sem);
|
sem_destroy(cpu_startup_sem);
|
||||||
@ -447,10 +418,8 @@ thread_start_cpus(void)
|
|||||||
*
|
*
|
||||||
* targetcpu might be curcpu; it might not be, too.
|
* targetcpu might be curcpu; it might not be, too.
|
||||||
*/
|
*/
|
||||||
static
|
static void thread_make_runnable(struct thread *target,
|
||||||
void
|
bool already_have_lock) {
|
||||||
thread_make_runnable(struct thread *target, bool already_have_lock)
|
|
||||||
{
|
|
||||||
struct cpu *targetcpu;
|
struct cpu *targetcpu;
|
||||||
|
|
||||||
/* Lock the run queue of the target thread's cpu. */
|
/* Lock the run queue of the target thread's cpu. */
|
||||||
@ -459,8 +428,7 @@ thread_make_runnable(struct thread *target, bool already_have_lock)
|
|||||||
if (already_have_lock) {
|
if (already_have_lock) {
|
||||||
/* The target thread's cpu should be already locked. */
|
/* The target thread's cpu should be already locked. */
|
||||||
KASSERT(spinlock_do_i_hold(&targetcpu->c_runqueue_lock));
|
KASSERT(spinlock_do_i_hold(&targetcpu->c_runqueue_lock));
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
spinlock_acquire(&targetcpu->c_runqueue_lock);
|
spinlock_acquire(&targetcpu->c_runqueue_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -491,12 +459,9 @@ thread_make_runnable(struct thread *target, bool already_have_lock)
|
|||||||
* process is inherited from the caller. It will start on the same CPU
|
* process is inherited from the caller. It will start on the same CPU
|
||||||
* as the caller, unless the scheduler intervenes first.
|
* as the caller, unless the scheduler intervenes first.
|
||||||
*/
|
*/
|
||||||
int
|
int thread_fork(const char *name, struct proc *proc,
|
||||||
thread_fork(const char *name,
|
|
||||||
struct proc *proc,
|
|
||||||
void (*entrypoint)(void *data1, unsigned long data2),
|
void (*entrypoint)(void *data1, unsigned long data2),
|
||||||
void *data1, unsigned long data2)
|
void *data1, unsigned long data2) {
|
||||||
{
|
|
||||||
struct thread *newthread;
|
struct thread *newthread;
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
@ -557,10 +522,8 @@ thread_fork(const char *name,
|
|||||||
* WC, protected by the spinlock LK. Otherwise WC and Lk should be
|
* WC, protected by the spinlock LK. Otherwise WC and Lk should be
|
||||||
* NULL.
|
* NULL.
|
||||||
*/
|
*/
|
||||||
static
|
static void thread_switch(threadstate_t newstate, struct wchan *wc,
|
||||||
void
|
struct spinlock *lk) {
|
||||||
thread_switch(threadstate_t newstate, struct wchan *wc, struct spinlock *lk)
|
|
||||||
{
|
|
||||||
struct thread *cur, *next;
|
struct thread *cur, *next;
|
||||||
int spl;
|
int spl;
|
||||||
|
|
||||||
@ -709,7 +672,6 @@ thread_switch(threadstate_t newstate, struct wchan *wc, struct spinlock *lk)
|
|||||||
* thread_startup.
|
* thread_startup.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
/* Clear the wait channel and set the thread state. */
|
/* Clear the wait channel and set the thread state. */
|
||||||
cur->t_wchan_name = NULL;
|
cur->t_wchan_name = NULL;
|
||||||
cur->t_state = S_RUN;
|
cur->t_state = S_RUN;
|
||||||
@ -735,10 +697,8 @@ thread_switch(threadstate_t newstate, struct wchan *wc, struct spinlock *lk)
|
|||||||
* thread_switch, the beginning part of this function must match the
|
* thread_switch, the beginning part of this function must match the
|
||||||
* tail of thread_switch.
|
* tail of thread_switch.
|
||||||
*/
|
*/
|
||||||
void
|
void thread_startup(void (*entrypoint)(void *data1, unsigned long data2),
|
||||||
thread_startup(void (*entrypoint)(void *data1, unsigned long data2),
|
void *data1, unsigned long data2) {
|
||||||
void *data1, unsigned long data2)
|
|
||||||
{
|
|
||||||
struct thread *cur;
|
struct thread *cur;
|
||||||
|
|
||||||
cur = curthread;
|
cur = curthread;
|
||||||
@ -775,9 +735,7 @@ thread_startup(void (*entrypoint)(void *data1, unsigned long data2),
|
|||||||
*
|
*
|
||||||
* Does not return.
|
* Does not return.
|
||||||
*/
|
*/
|
||||||
void
|
void thread_exit(void) {
|
||||||
thread_exit(void)
|
|
||||||
{
|
|
||||||
struct thread *cur;
|
struct thread *cur;
|
||||||
|
|
||||||
cur = curthread;
|
cur = curthread;
|
||||||
@ -803,11 +761,7 @@ thread_exit(void)
|
|||||||
/*
|
/*
|
||||||
* Yield the cpu to another process, but stay runnable.
|
* Yield the cpu to another process, but stay runnable.
|
||||||
*/
|
*/
|
||||||
void
|
void thread_yield(void) { thread_switch(S_READY, NULL, NULL); }
|
||||||
thread_yield(void)
|
|
||||||
{
|
|
||||||
thread_switch(S_READY, NULL, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
@ -818,9 +772,7 @@ thread_yield(void)
|
|||||||
* the current CPU's run queue by job priority.
|
* the current CPU's run queue by job priority.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
void
|
void schedule(void) {
|
||||||
schedule(void)
|
|
||||||
{
|
|
||||||
/*
|
/*
|
||||||
* You can write this. If we do nothing, threads will run in
|
* You can write this. If we do nothing, threads will run in
|
||||||
* round-robin fashion.
|
* round-robin fashion.
|
||||||
@ -844,9 +796,7 @@ schedule(void)
|
|||||||
* System/161 does not (yet) model such cache effects, we'll be very
|
* System/161 does not (yet) model such cache effects, we'll be very
|
||||||
* aggressive.
|
* aggressive.
|
||||||
*/
|
*/
|
||||||
void
|
void thread_consider_migration(void) {
|
||||||
thread_consider_migration(void)
|
|
||||||
{
|
|
||||||
unsigned my_count, total_count, one_share, to_send;
|
unsigned my_count, total_count, one_share, to_send;
|
||||||
unsigned i, numcpus;
|
unsigned i, numcpus;
|
||||||
struct cpu *c;
|
struct cpu *c;
|
||||||
@ -855,7 +805,7 @@ thread_consider_migration(void)
|
|||||||
|
|
||||||
my_count = total_count = 0;
|
my_count = total_count = 0;
|
||||||
numcpus = cpuarray_num(&allcpus);
|
numcpus = cpuarray_num(&allcpus);
|
||||||
for (i=0; i<numcpus; i++) {
|
for (i = 0; i < numcpus; i++) {
|
||||||
c = cpuarray_get(&allcpus, i);
|
c = cpuarray_get(&allcpus, i);
|
||||||
spinlock_acquire(&c->c_runqueue_lock);
|
spinlock_acquire(&c->c_runqueue_lock);
|
||||||
total_count += c->c_runqueue.tl_count;
|
total_count += c->c_runqueue.tl_count;
|
||||||
@ -873,13 +823,13 @@ thread_consider_migration(void)
|
|||||||
to_send = my_count - one_share;
|
to_send = my_count - one_share;
|
||||||
threadlist_init(&victims);
|
threadlist_init(&victims);
|
||||||
spinlock_acquire(&curcpu->c_runqueue_lock);
|
spinlock_acquire(&curcpu->c_runqueue_lock);
|
||||||
for (i=0; i<to_send; i++) {
|
for (i = 0; i < to_send; i++) {
|
||||||
t = threadlist_remtail(&curcpu->c_runqueue);
|
t = threadlist_remtail(&curcpu->c_runqueue);
|
||||||
threadlist_addhead(&victims, t);
|
threadlist_addhead(&victims, t);
|
||||||
}
|
}
|
||||||
spinlock_release(&curcpu->c_runqueue_lock);
|
spinlock_release(&curcpu->c_runqueue_lock);
|
||||||
|
|
||||||
for (i=0; i < numcpus && to_send > 0; i++) {
|
for (i = 0; i < numcpus && to_send > 0; i++) {
|
||||||
c = cpuarray_get(&allcpus, i);
|
c = cpuarray_get(&allcpus, i);
|
||||||
if (c == curcpu->c_self) {
|
if (c == curcpu->c_self) {
|
||||||
continue;
|
continue;
|
||||||
@ -917,9 +867,8 @@ thread_consider_migration(void)
|
|||||||
|
|
||||||
t->t_cpu = c;
|
t->t_cpu = c;
|
||||||
threadlist_addtail(&c->c_runqueue, t);
|
threadlist_addtail(&c->c_runqueue, t);
|
||||||
DEBUG(DB_THREADS,
|
DEBUG(DB_THREADS, "Migrated thread %s: cpu %u -> %u", t->t_name,
|
||||||
"Migrated thread %s: cpu %u -> %u",
|
curcpu->c_number, c->c_number);
|
||||||
t->t_name, curcpu->c_number, c->c_number);
|
|
||||||
to_send--;
|
to_send--;
|
||||||
if (c->c_isidle) {
|
if (c->c_isidle) {
|
||||||
/*
|
/*
|
||||||
@ -963,9 +912,7 @@ thread_consider_migration(void)
|
|||||||
* arrangements should be made to free it after the wait channel is
|
* arrangements should be made to free it after the wait channel is
|
||||||
* destroyed.
|
* destroyed.
|
||||||
*/
|
*/
|
||||||
struct wchan *
|
struct wchan *wchan_create(const char *name) {
|
||||||
wchan_create(const char *name)
|
|
||||||
{
|
|
||||||
struct wchan *wc;
|
struct wchan *wc;
|
||||||
|
|
||||||
wc = kmalloc(sizeof(*wc));
|
wc = kmalloc(sizeof(*wc));
|
||||||
@ -982,9 +929,7 @@ wchan_create(const char *name)
|
|||||||
* Destroy a wait channel. Must be empty and unlocked.
|
* Destroy a wait channel. Must be empty and unlocked.
|
||||||
* (The corresponding cleanup functions require this.)
|
* (The corresponding cleanup functions require this.)
|
||||||
*/
|
*/
|
||||||
void
|
void wchan_destroy(struct wchan *wc) {
|
||||||
wchan_destroy(struct wchan *wc)
|
|
||||||
{
|
|
||||||
threadlist_cleanup(&wc->wc_threads);
|
threadlist_cleanup(&wc->wc_threads);
|
||||||
kfree(wc);
|
kfree(wc);
|
||||||
}
|
}
|
||||||
@ -996,9 +941,7 @@ wchan_destroy(struct wchan *wc)
|
|||||||
* be locked. The call to thread_switch unlocks it; we relock it
|
* be locked. The call to thread_switch unlocks it; we relock it
|
||||||
* before returning.
|
* before returning.
|
||||||
*/
|
*/
|
||||||
void
|
void wchan_sleep(struct wchan *wc, struct spinlock *lk) {
|
||||||
wchan_sleep(struct wchan *wc, struct spinlock *lk)
|
|
||||||
{
|
|
||||||
/* may not sleep in an interrupt handler */
|
/* may not sleep in an interrupt handler */
|
||||||
KASSERT(!curthread->t_in_interrupt);
|
KASSERT(!curthread->t_in_interrupt);
|
||||||
|
|
||||||
@ -1015,9 +958,7 @@ wchan_sleep(struct wchan *wc, struct spinlock *lk)
|
|||||||
/*
|
/*
|
||||||
* Wake up one thread sleeping on a wait channel.
|
* Wake up one thread sleeping on a wait channel.
|
||||||
*/
|
*/
|
||||||
void
|
void wchan_wakeone(struct wchan *wc, struct spinlock *lk) {
|
||||||
wchan_wakeone(struct wchan *wc, struct spinlock *lk)
|
|
||||||
{
|
|
||||||
struct thread *target;
|
struct thread *target;
|
||||||
|
|
||||||
KASSERT(spinlock_do_i_hold(lk));
|
KASSERT(spinlock_do_i_hold(lk));
|
||||||
@ -1044,9 +985,7 @@ wchan_wakeone(struct wchan *wc, struct spinlock *lk)
|
|||||||
/*
|
/*
|
||||||
* Wake up all threads sleeping on a wait channel.
|
* Wake up all threads sleeping on a wait channel.
|
||||||
*/
|
*/
|
||||||
void
|
void wchan_wakeall(struct wchan *wc, struct spinlock *lk) {
|
||||||
wchan_wakeall(struct wchan *wc, struct spinlock *lk)
|
|
||||||
{
|
|
||||||
struct thread *target;
|
struct thread *target;
|
||||||
struct threadlist list;
|
struct threadlist list;
|
||||||
|
|
||||||
@ -1078,9 +1017,7 @@ wchan_wakeall(struct wchan *wc, struct spinlock *lk)
|
|||||||
* Return nonzero if there are no threads sleeping on the channel.
|
* Return nonzero if there are no threads sleeping on the channel.
|
||||||
* This is meant to be used only for diagnostic purposes.
|
* This is meant to be used only for diagnostic purposes.
|
||||||
*/
|
*/
|
||||||
bool
|
bool wchan_isempty(struct wchan *wc, struct spinlock *lk) {
|
||||||
wchan_isempty(struct wchan *wc, struct spinlock *lk)
|
|
||||||
{
|
|
||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
KASSERT(spinlock_do_i_hold(lk));
|
KASSERT(spinlock_do_i_hold(lk));
|
||||||
@ -1098,9 +1035,7 @@ wchan_isempty(struct wchan *wc, struct spinlock *lk)
|
|||||||
/*
|
/*
|
||||||
* Send an IPI (inter-processor interrupt) to the specified CPU.
|
* Send an IPI (inter-processor interrupt) to the specified CPU.
|
||||||
*/
|
*/
|
||||||
void
|
void ipi_send(struct cpu *target, int code) {
|
||||||
ipi_send(struct cpu *target, int code)
|
|
||||||
{
|
|
||||||
KASSERT(code >= 0 && code < 32);
|
KASSERT(code >= 0 && code < 32);
|
||||||
|
|
||||||
spinlock_acquire(&target->c_ipi_lock);
|
spinlock_acquire(&target->c_ipi_lock);
|
||||||
@ -1112,13 +1047,11 @@ ipi_send(struct cpu *target, int code)
|
|||||||
/*
|
/*
|
||||||
* Send an IPI to all CPUs.
|
* Send an IPI to all CPUs.
|
||||||
*/
|
*/
|
||||||
void
|
void ipi_broadcast(int code) {
|
||||||
ipi_broadcast(int code)
|
|
||||||
{
|
|
||||||
unsigned i;
|
unsigned i;
|
||||||
struct cpu *c;
|
struct cpu *c;
|
||||||
|
|
||||||
for (i=0; i < cpuarray_num(&allcpus); i++) {
|
for (i = 0; i < cpuarray_num(&allcpus); i++) {
|
||||||
c = cpuarray_get(&allcpus, i);
|
c = cpuarray_get(&allcpus, i);
|
||||||
if (c != curcpu->c_self) {
|
if (c != curcpu->c_self) {
|
||||||
ipi_send(c, code);
|
ipi_send(c, code);
|
||||||
@ -1129,9 +1062,7 @@ ipi_broadcast(int code)
|
|||||||
/*
|
/*
|
||||||
* Send a TLB shootdown IPI to the specified CPU.
|
* Send a TLB shootdown IPI to the specified CPU.
|
||||||
*/
|
*/
|
||||||
void
|
void ipi_tlbshootdown(struct cpu *target, const struct tlbshootdown *mapping) {
|
||||||
ipi_tlbshootdown(struct cpu *target, const struct tlbshootdown *mapping)
|
|
||||||
{
|
|
||||||
unsigned n;
|
unsigned n;
|
||||||
|
|
||||||
spinlock_acquire(&target->c_ipi_lock);
|
spinlock_acquire(&target->c_ipi_lock);
|
||||||
@ -1148,10 +1079,9 @@ ipi_tlbshootdown(struct cpu *target, const struct tlbshootdown *mapping)
|
|||||||
* reduce the number of unnecessary shootdowns.
|
* reduce the number of unnecessary shootdowns.
|
||||||
*/
|
*/
|
||||||
panic("ipi_tlbshootdown: Too many shootdowns queued\n");
|
panic("ipi_tlbshootdown: Too many shootdowns queued\n");
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
target->c_shootdown[n] = *mapping;
|
target->c_shootdown[n] = *mapping;
|
||||||
target->c_numshootdown = n+1;
|
target->c_numshootdown = n + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
target->c_ipi_pending |= (uint32_t)1 << IPI_TLBSHOOTDOWN;
|
target->c_ipi_pending |= (uint32_t)1 << IPI_TLBSHOOTDOWN;
|
||||||
@ -1163,9 +1093,7 @@ ipi_tlbshootdown(struct cpu *target, const struct tlbshootdown *mapping)
|
|||||||
/*
|
/*
|
||||||
* Handle an incoming interprocessor interrupt.
|
* Handle an incoming interprocessor interrupt.
|
||||||
*/
|
*/
|
||||||
void
|
void interprocessor_interrupt(void) {
|
||||||
interprocessor_interrupt(void)
|
|
||||||
{
|
|
||||||
uint32_t bits;
|
uint32_t bits;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
@ -1182,8 +1110,7 @@ interprocessor_interrupt(void)
|
|||||||
spinlock_release(&curcpu->c_ipi_lock);
|
spinlock_release(&curcpu->c_ipi_lock);
|
||||||
spinlock_acquire(&curcpu->c_runqueue_lock);
|
spinlock_acquire(&curcpu->c_runqueue_lock);
|
||||||
if (!curcpu->c_isidle) {
|
if (!curcpu->c_isidle) {
|
||||||
kprintf("cpu%d: offline: warning: not idle\n",
|
kprintf("cpu%d: offline: warning: not idle\n", curcpu->c_number);
|
||||||
curcpu->c_number);
|
|
||||||
}
|
}
|
||||||
spinlock_release(&curcpu->c_runqueue_lock);
|
spinlock_release(&curcpu->c_runqueue_lock);
|
||||||
kprintf("cpu%d: offline.\n", curcpu->c_number);
|
kprintf("cpu%d: offline.\n", curcpu->c_number);
|
||||||
@ -1201,7 +1128,7 @@ interprocessor_interrupt(void)
|
|||||||
* need to release the ipi lock while calling
|
* need to release the ipi lock while calling
|
||||||
* vm_tlbshootdown.
|
* vm_tlbshootdown.
|
||||||
*/
|
*/
|
||||||
for (i=0; i<curcpu->c_numshootdown; i++) {
|
for (i = 0; i < curcpu->c_numshootdown; i++) {
|
||||||
vm_tlbshootdown(&curcpu->c_shootdown[i]);
|
vm_tlbshootdown(&curcpu->c_shootdown[i]);
|
||||||
}
|
}
|
||||||
curcpu->c_numshootdown = 0;
|
curcpu->c_numshootdown = 0;
|
||||||
|
@ -36,18 +36,14 @@
|
|||||||
* Kernel malloc.
|
* Kernel malloc.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fill a block with 0xdeadbeef.
|
* Fill a block with 0xdeadbeef.
|
||||||
*/
|
*/
|
||||||
static
|
static void fill_deadbeef(void *vptr, size_t len) {
|
||||||
void
|
|
||||||
fill_deadbeef(void *vptr, size_t len)
|
|
||||||
{
|
|
||||||
uint32_t *ptr = vptr;
|
uint32_t *ptr = vptr;
|
||||||
size_t i;
|
size_t i;
|
||||||
|
|
||||||
for (i=0; i<len/sizeof(uint32_t); i++) {
|
for (i = 0; i < len / sizeof(uint32_t); i++) {
|
||||||
ptr[i] = 0xdeadbeef;
|
ptr[i] = 0xdeadbeef;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -126,7 +122,7 @@ fill_deadbeef(void *vptr, size_t len)
|
|||||||
#if PAGE_SIZE == 4096
|
#if PAGE_SIZE == 4096
|
||||||
|
|
||||||
#define NSIZES 8
|
#define NSIZES 8
|
||||||
static const size_t sizes[NSIZES] = { 16, 32, 64, 128, 256, 512, 1024, 2048 };
|
static const size_t sizes[NSIZES] = {16, 32, 64, 128, 256, 512, 1024, 2048};
|
||||||
|
|
||||||
#define SMALLEST_SUBPAGE_SIZE 16
|
#define SMALLEST_SUBPAGE_SIZE 16
|
||||||
#define LARGEST_SUBPAGE_SIZE 2048
|
#define LARGEST_SUBPAGE_SIZE 2048
|
||||||
@ -155,7 +151,7 @@ struct pageref {
|
|||||||
|
|
||||||
#define PR_PAGEADDR(pr) ((pr)->pageaddr_and_blocktype & PAGE_FRAME)
|
#define PR_PAGEADDR(pr) ((pr)->pageaddr_and_blocktype & PAGE_FRAME)
|
||||||
#define PR_BLOCKTYPE(pr) ((pr)->pageaddr_and_blocktype & ~PAGE_FRAME)
|
#define PR_BLOCKTYPE(pr) ((pr)->pageaddr_and_blocktype & ~PAGE_FRAME)
|
||||||
#define MKPAB(pa, blk) (((pa)&PAGE_FRAME) | ((blk) & ~PAGE_FRAME))
|
#define MKPAB(pa, blk) (((pa) & PAGE_FRAME) | ((blk) & ~PAGE_FRAME))
|
||||||
|
|
||||||
////////////////////////////////////////
|
////////////////////////////////////////
|
||||||
|
|
||||||
@ -215,10 +211,7 @@ static struct kheap_root kheaproots[NUM_PAGEREFPAGES];
|
|||||||
/*
|
/*
|
||||||
* Allocate a page to hold pagerefs.
|
* Allocate a page to hold pagerefs.
|
||||||
*/
|
*/
|
||||||
static
|
static void allocpagerefpage(struct kheap_root *root) {
|
||||||
void
|
|
||||||
allocpagerefpage(struct kheap_root *root)
|
|
||||||
{
|
|
||||||
vaddr_t va;
|
vaddr_t va;
|
||||||
|
|
||||||
KASSERT(root->page == NULL);
|
KASSERT(root->page == NULL);
|
||||||
@ -253,16 +246,13 @@ allocpagerefpage(struct kheap_root *root)
|
|||||||
/*
|
/*
|
||||||
* Allocate a pageref structure.
|
* Allocate a pageref structure.
|
||||||
*/
|
*/
|
||||||
static
|
static struct pageref *allocpageref(void) {
|
||||||
struct pageref *
|
unsigned i, j;
|
||||||
allocpageref(void)
|
|
||||||
{
|
|
||||||
unsigned i,j;
|
|
||||||
uint32_t k;
|
uint32_t k;
|
||||||
unsigned whichroot;
|
unsigned whichroot;
|
||||||
struct kheap_root *root;
|
struct kheap_root *root;
|
||||||
|
|
||||||
for (whichroot=0; whichroot < NUM_PAGEREFPAGES; whichroot++) {
|
for (whichroot = 0; whichroot < NUM_PAGEREFPAGES; whichroot++) {
|
||||||
root = &kheaproots[whichroot];
|
root = &kheaproots[whichroot];
|
||||||
if (root->numinuse >= NPAGEREFS_PER_PAGE) {
|
if (root->numinuse >= NPAGEREFS_PER_PAGE) {
|
||||||
continue;
|
continue;
|
||||||
@ -271,13 +261,13 @@ allocpageref(void)
|
|||||||
/*
|
/*
|
||||||
* This should probably not be a linear search.
|
* This should probably not be a linear search.
|
||||||
*/
|
*/
|
||||||
for (i=0; i<INUSE_WORDS; i++) {
|
for (i = 0; i < INUSE_WORDS; i++) {
|
||||||
if (root->pagerefs_inuse[i]==0xffffffff) {
|
if (root->pagerefs_inuse[i] == 0xffffffff) {
|
||||||
/* full */
|
/* full */
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
for (k=1,j=0; k!=0; k<<=1,j++) {
|
for (k = 1, j = 0; k != 0; k <<= 1, j++) {
|
||||||
if ((root->pagerefs_inuse[i] & k)==0) {
|
if ((root->pagerefs_inuse[i] & k) == 0) {
|
||||||
root->pagerefs_inuse[i] |= k;
|
root->pagerefs_inuse[i] |= k;
|
||||||
root->numinuse++;
|
root->numinuse++;
|
||||||
if (root->page == NULL) {
|
if (root->page == NULL) {
|
||||||
@ -286,7 +276,7 @@ allocpageref(void)
|
|||||||
if (root->page == NULL) {
|
if (root->page == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
return &root->page->refs[i*32 + j];
|
return &root->page->refs[i * 32 + j];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
KASSERT(0);
|
KASSERT(0);
|
||||||
@ -300,17 +290,14 @@ allocpageref(void)
|
|||||||
/*
|
/*
|
||||||
* Release a pageref structure.
|
* Release a pageref structure.
|
||||||
*/
|
*/
|
||||||
static
|
static void freepageref(struct pageref *p) {
|
||||||
void
|
|
||||||
freepageref(struct pageref *p)
|
|
||||||
{
|
|
||||||
size_t i, j;
|
size_t i, j;
|
||||||
uint32_t k;
|
uint32_t k;
|
||||||
unsigned whichroot;
|
unsigned whichroot;
|
||||||
struct kheap_root *root;
|
struct kheap_root *root;
|
||||||
struct pagerefpage *page;
|
struct pagerefpage *page;
|
||||||
|
|
||||||
for (whichroot=0; whichroot < NUM_PAGEREFPAGES; whichroot++) {
|
for (whichroot = 0; whichroot < NUM_PAGEREFPAGES; whichroot++) {
|
||||||
root = &kheaproots[whichroot];
|
root = &kheaproots[whichroot];
|
||||||
|
|
||||||
page = root->page;
|
page = root->page;
|
||||||
@ -319,12 +306,12 @@ freepageref(struct pageref *p)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
j = p-page->refs;
|
j = p - page->refs;
|
||||||
/* note: j is unsigned, don't test < 0 */
|
/* note: j is unsigned, don't test < 0 */
|
||||||
if (j < NPAGEREFS_PER_PAGE) {
|
if (j < NPAGEREFS_PER_PAGE) {
|
||||||
/* on this page */
|
/* on this page */
|
||||||
i = j/32;
|
i = j / 32;
|
||||||
k = ((uint32_t)1) << (j%32);
|
k = ((uint32_t)1) << (j % 32);
|
||||||
KASSERT((root->pagerefs_inuse[i] & k) != 0);
|
KASSERT((root->pagerefs_inuse[i] & k) != 0);
|
||||||
root->pagerefs_inuse[i] &= ~k;
|
root->pagerefs_inuse[i] &= ~k;
|
||||||
KASSERT(root->numinuse > 0);
|
KASSERT(root->numinuse > 0);
|
||||||
@ -365,10 +352,8 @@ static struct pageref *allbase;
|
|||||||
/*
|
/*
|
||||||
* Set up the guard values in a block we're about to return.
|
* Set up the guard values in a block we're about to return.
|
||||||
*/
|
*/
|
||||||
static
|
static void *establishguardband(void *block, size_t clientsize,
|
||||||
void *
|
size_t blocksize) {
|
||||||
establishguardband(void *block, size_t clientsize, size_t blocksize)
|
|
||||||
{
|
|
||||||
vaddr_t lowguard, lowsize, data, enddata, highguard, highsize, i;
|
vaddr_t lowguard, lowsize, data, enddata, highguard, highsize, i;
|
||||||
|
|
||||||
KASSERT(clientsize + GUARD_OVERHEAD <= blocksize);
|
KASSERT(clientsize + GUARD_OVERHEAD <= blocksize);
|
||||||
@ -383,10 +368,10 @@ establishguardband(void *block, size_t clientsize, size_t blocksize)
|
|||||||
|
|
||||||
*(uint16_t *)lowguard = GUARD_HALFWORD;
|
*(uint16_t *)lowguard = GUARD_HALFWORD;
|
||||||
*(uint16_t *)lowsize = clientsize;
|
*(uint16_t *)lowsize = clientsize;
|
||||||
for (i=data; i<enddata; i++) {
|
for (i = data; i < enddata; i++) {
|
||||||
*(uint8_t *)i = GUARD_RETBYTE;
|
*(uint8_t *)i = GUARD_RETBYTE;
|
||||||
}
|
}
|
||||||
for (i=enddata; i<highguard; i++) {
|
for (i = enddata; i < highguard; i++) {
|
||||||
*(uint8_t *)i = GUARD_FILLBYTE;
|
*(uint8_t *)i = GUARD_FILLBYTE;
|
||||||
}
|
}
|
||||||
*(uint16_t *)highguard = GUARD_HALFWORD;
|
*(uint16_t *)highguard = GUARD_HALFWORD;
|
||||||
@ -398,10 +383,8 @@ establishguardband(void *block, size_t clientsize, size_t blocksize)
|
|||||||
/*
|
/*
|
||||||
* Validate the guard values in an existing block.
|
* Validate the guard values in an existing block.
|
||||||
*/
|
*/
|
||||||
static
|
static void checkguardband(vaddr_t blockaddr, size_t smallerblocksize,
|
||||||
void
|
size_t blocksize) {
|
||||||
checkguardband(vaddr_t blockaddr, size_t smallerblocksize, size_t blocksize)
|
|
||||||
{
|
|
||||||
/*
|
/*
|
||||||
* The first two bytes of the block are the lower guard band.
|
* The first two bytes of the block are the lower guard band.
|
||||||
* The next two bytes are the real size (the size of the
|
* The next two bytes are the real size (the size of the
|
||||||
@ -430,7 +413,7 @@ checkguardband(vaddr_t blockaddr, size_t smallerblocksize, size_t blocksize)
|
|||||||
KASSERT(clientsize + GUARD_OVERHEAD > smallerblocksize);
|
KASSERT(clientsize + GUARD_OVERHEAD > smallerblocksize);
|
||||||
KASSERT(clientsize + GUARD_OVERHEAD <= blocksize);
|
KASSERT(clientsize + GUARD_OVERHEAD <= blocksize);
|
||||||
enddata = data + clientsize;
|
enddata = data + clientsize;
|
||||||
for (i=enddata; i<highguard; i++) {
|
for (i = enddata; i < highguard; i++) {
|
||||||
KASSERT(*(uint8_t *)i == GUARD_FILLBYTE);
|
KASSERT(*(uint8_t *)i == GUARD_FILLBYTE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -457,14 +440,11 @@ checkguardband(vaddr_t blockaddr, size_t smallerblocksize, size_t blocksize)
|
|||||||
* The first word of the block is a freelist pointer and should not be
|
* The first word of the block is a freelist pointer and should not be
|
||||||
* deadbeef; the rest of the block should be only deadbeef.
|
* deadbeef; the rest of the block should be only deadbeef.
|
||||||
*/
|
*/
|
||||||
static
|
static void checkdeadbeef(void *block, size_t blocksize) {
|
||||||
void
|
|
||||||
checkdeadbeef(void *block, size_t blocksize)
|
|
||||||
{
|
|
||||||
uint32_t *ptr = block;
|
uint32_t *ptr = block;
|
||||||
size_t i;
|
size_t i;
|
||||||
|
|
||||||
for (i=1; i < blocksize/sizeof(uint32_t); i++) {
|
for (i = 1; i < blocksize / sizeof(uint32_t); i++) {
|
||||||
KASSERT(ptr[i] == 0xdeadbeef);
|
KASSERT(ptr[i] == 0xdeadbeef);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -490,14 +470,11 @@ checkdeadbeef(void *block, size_t blocksize)
|
|||||||
* assertion as a bit in isfree is set twice; if not, a circular
|
* assertion as a bit in isfree is set twice; if not, a circular
|
||||||
* freelist will cause an infinite loop.
|
* freelist will cause an infinite loop.
|
||||||
*/
|
*/
|
||||||
static
|
static void checksubpage(struct pageref *pr) {
|
||||||
void
|
|
||||||
checksubpage(struct pageref *pr)
|
|
||||||
{
|
|
||||||
vaddr_t prpage, fla;
|
vaddr_t prpage, fla;
|
||||||
struct freelist *fl;
|
struct freelist *fl;
|
||||||
int blktype;
|
int blktype;
|
||||||
int nfree=0;
|
int nfree = 0;
|
||||||
size_t blocksize;
|
size_t blocksize;
|
||||||
#ifdef CHECKGUARDS
|
#ifdef CHECKGUARDS
|
||||||
const unsigned maxblocks = PAGE_SIZE / SMALLEST_SUBPAGE_SIZE;
|
const unsigned maxblocks = PAGE_SIZE / SMALLEST_SUBPAGE_SIZE;
|
||||||
@ -510,7 +487,7 @@ checksubpage(struct pageref *pr)
|
|||||||
KASSERT(spinlock_do_i_hold(&kmalloc_spinlock));
|
KASSERT(spinlock_do_i_hold(&kmalloc_spinlock));
|
||||||
|
|
||||||
if (pr->freelist_offset == INVALID_OFFSET) {
|
if (pr->freelist_offset == INVALID_OFFSET) {
|
||||||
KASSERT(pr->nfree==0);
|
KASSERT(pr->nfree == 0);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -521,7 +498,7 @@ checksubpage(struct pageref *pr)
|
|||||||
|
|
||||||
#ifdef CHECKGUARDS
|
#ifdef CHECKGUARDS
|
||||||
smallerblocksize = blktype > 0 ? sizes[blktype - 1] : 0;
|
smallerblocksize = blktype > 0 ? sizes[blktype - 1] : 0;
|
||||||
for (i=0; i<numfreewords; i++) {
|
for (i = 0; i < numfreewords; i++) {
|
||||||
isfree[i] = 0;
|
isfree[i] = 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -540,12 +517,12 @@ checksubpage(struct pageref *pr)
|
|||||||
for (; fl != NULL; fl = fl->next) {
|
for (; fl != NULL; fl = fl->next) {
|
||||||
fla = (vaddr_t)fl;
|
fla = (vaddr_t)fl;
|
||||||
KASSERT(fla >= prpage && fla < prpage + PAGE_SIZE);
|
KASSERT(fla >= prpage && fla < prpage + PAGE_SIZE);
|
||||||
KASSERT((fla-prpage) % blocksize == 0);
|
KASSERT((fla - prpage) % blocksize == 0);
|
||||||
#ifdef CHECKBEEF
|
#ifdef CHECKBEEF
|
||||||
checkdeadbeef(fl, blocksize);
|
checkdeadbeef(fl, blocksize);
|
||||||
#endif
|
#endif
|
||||||
#ifdef CHECKGUARDS
|
#ifdef CHECKGUARDS
|
||||||
blocknum = (fla-prpage) / blocksize;
|
blocknum = (fla - prpage) / blocksize;
|
||||||
mask = 1U << (blocknum % 32);
|
mask = 1U << (blocknum % 32);
|
||||||
KASSERT((isfree[blocknum / 32] & mask) == 0);
|
KASSERT((isfree[blocknum / 32] & mask) == 0);
|
||||||
isfree[blocknum / 32] |= mask;
|
isfree[blocknum / 32] |= mask;
|
||||||
@ -553,15 +530,14 @@ checksubpage(struct pageref *pr)
|
|||||||
KASSERT(fl->next != fl);
|
KASSERT(fl->next != fl);
|
||||||
nfree++;
|
nfree++;
|
||||||
}
|
}
|
||||||
KASSERT(nfree==pr->nfree);
|
KASSERT(nfree == pr->nfree);
|
||||||
|
|
||||||
#ifdef CHECKGUARDS
|
#ifdef CHECKGUARDS
|
||||||
numblocks = PAGE_SIZE / blocksize;
|
numblocks = PAGE_SIZE / blocksize;
|
||||||
for (i=0; i<numblocks; i++) {
|
for (i = 0; i < numblocks; i++) {
|
||||||
mask = 1U << (i % 32);
|
mask = 1U << (i % 32);
|
||||||
if ((isfree[i / 32] & mask) == 0) {
|
if ((isfree[i / 32] & mask) == 0) {
|
||||||
checkguardband(prpage + i * blocksize,
|
checkguardband(prpage + i * blocksize, smallerblocksize, blocksize);
|
||||||
smallerblocksize, blocksize);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -575,17 +551,14 @@ checksubpage(struct pageref *pr)
|
|||||||
* Run checksubpage on all heap pages. This also checks that the
|
* Run checksubpage on all heap pages. This also checks that the
|
||||||
* linked lists of pagerefs are more or less intact.
|
* linked lists of pagerefs are more or less intact.
|
||||||
*/
|
*/
|
||||||
static
|
static void checksubpages(void) {
|
||||||
void
|
|
||||||
checksubpages(void)
|
|
||||||
{
|
|
||||||
struct pageref *pr;
|
struct pageref *pr;
|
||||||
int i;
|
int i;
|
||||||
unsigned sc=0, ac=0;
|
unsigned sc = 0, ac = 0;
|
||||||
|
|
||||||
KASSERT(spinlock_do_i_hold(&kmalloc_spinlock));
|
KASSERT(spinlock_do_i_hold(&kmalloc_spinlock));
|
||||||
|
|
||||||
for (i=0; i<NSIZES; i++) {
|
for (i = 0; i < NSIZES; i++) {
|
||||||
for (pr = sizebases[i]; pr != NULL; pr = pr->next_samesize) {
|
for (pr = sizebases[i]; pr != NULL; pr = pr->next_samesize) {
|
||||||
checksubpage(pr);
|
checksubpage(pr);
|
||||||
KASSERT(sc < TOTAL_PAGEREFS);
|
KASSERT(sc < TOTAL_PAGEREFS);
|
||||||
@ -599,7 +572,7 @@ checksubpages(void)
|
|||||||
ac++;
|
ac++;
|
||||||
}
|
}
|
||||||
|
|
||||||
KASSERT(sc==ac);
|
KASSERT(sc == ac);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
#define checksubpages()
|
#define checksubpages()
|
||||||
@ -622,10 +595,7 @@ static unsigned mallocgeneration;
|
|||||||
/*
|
/*
|
||||||
* Label a block of memory.
|
* Label a block of memory.
|
||||||
*/
|
*/
|
||||||
static
|
static void *establishlabel(void *block, vaddr_t label) {
|
||||||
void *
|
|
||||||
establishlabel(void *block, vaddr_t label)
|
|
||||||
{
|
|
||||||
struct malloclabel *ml;
|
struct malloclabel *ml;
|
||||||
|
|
||||||
ml = block;
|
ml = block;
|
||||||
@ -635,10 +605,7 @@ establishlabel(void *block, vaddr_t label)
|
|||||||
return ml;
|
return ml;
|
||||||
}
|
}
|
||||||
|
|
||||||
static
|
static void dump_subpage(struct pageref *pr, unsigned generation) {
|
||||||
void
|
|
||||||
dump_subpage(struct pageref *pr, unsigned generation)
|
|
||||||
{
|
|
||||||
unsigned blocksize = sizes[PR_BLOCKTYPE(pr)];
|
unsigned blocksize = sizes[PR_BLOCKTYPE(pr)];
|
||||||
unsigned numblocks = PAGE_SIZE / blocksize;
|
unsigned numblocks = PAGE_SIZE / blocksize;
|
||||||
unsigned numfreewords = DIVROUNDUP(numblocks, 32);
|
unsigned numfreewords = DIVROUNDUP(numblocks, 32);
|
||||||
@ -649,7 +616,7 @@ dump_subpage(struct pageref *pr, unsigned generation)
|
|||||||
struct malloclabel *ml;
|
struct malloclabel *ml;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
for (i=0; i<numfreewords; i++) {
|
for (i = 0; i < numfreewords; i++) {
|
||||||
isfree[i] = 0;
|
isfree[i] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -661,7 +628,7 @@ dump_subpage(struct pageref *pr, unsigned generation)
|
|||||||
isfree[i / 32] |= mask;
|
isfree[i / 32] |= mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i=0; i<numblocks; i++) {
|
for (i = 0; i < numblocks; i++) {
|
||||||
mask = 1U << (i % 32);
|
mask = 1U << (i % 32);
|
||||||
if (isfree[i / 32] & mask) {
|
if (isfree[i / 32] & mask) {
|
||||||
continue;
|
continue;
|
||||||
@ -671,20 +638,17 @@ dump_subpage(struct pageref *pr, unsigned generation)
|
|||||||
if (ml->generation != generation) {
|
if (ml->generation != generation) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
kprintf("%5zu bytes at %p, allocated at %p\n",
|
kprintf("%5zu bytes at %p, allocated at %p\n", blocksize, (void *)blockaddr,
|
||||||
blocksize, (void *)blockaddr, (void *)ml->label);
|
(void *)ml->label);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static
|
static void dump_subpages(unsigned generation) {
|
||||||
void
|
|
||||||
dump_subpages(unsigned generation)
|
|
||||||
{
|
|
||||||
struct pageref *pr;
|
struct pageref *pr;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
kprintf("Remaining allocations from generation %u:\n", generation);
|
kprintf("Remaining allocations from generation %u:\n", generation);
|
||||||
for (i=0; i<NSIZES; i++) {
|
for (i = 0; i < NSIZES; i++) {
|
||||||
for (pr = sizebases[i]; pr != NULL; pr = pr->next_samesize) {
|
for (pr = sizebases[i]; pr != NULL; pr = pr->next_samesize) {
|
||||||
dump_subpage(pr, generation);
|
dump_subpage(pr, generation);
|
||||||
}
|
}
|
||||||
@ -697,9 +661,7 @@ dump_subpages(unsigned generation)
|
|||||||
|
|
||||||
#endif /* LABELS */
|
#endif /* LABELS */
|
||||||
|
|
||||||
void
|
void kheap_nextgeneration(void) {
|
||||||
kheap_nextgeneration(void)
|
|
||||||
{
|
|
||||||
#ifdef LABELS
|
#ifdef LABELS
|
||||||
spinlock_acquire(&kmalloc_spinlock);
|
spinlock_acquire(&kmalloc_spinlock);
|
||||||
mallocgeneration++;
|
mallocgeneration++;
|
||||||
@ -707,9 +669,7 @@ kheap_nextgeneration(void)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void kheap_dump(void) {
|
||||||
kheap_dump(void)
|
|
||||||
{
|
|
||||||
#ifdef LABELS
|
#ifdef LABELS
|
||||||
/* print the whole thing with interrupts off */
|
/* print the whole thing with interrupts off */
|
||||||
spinlock_acquire(&kmalloc_spinlock);
|
spinlock_acquire(&kmalloc_spinlock);
|
||||||
@ -720,15 +680,13 @@ kheap_dump(void)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void kheap_dumpall(void) {
|
||||||
kheap_dumpall(void)
|
|
||||||
{
|
|
||||||
#ifdef LABELS
|
#ifdef LABELS
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
/* print the whole thing with interrupts off */
|
/* print the whole thing with interrupts off */
|
||||||
spinlock_acquire(&kmalloc_spinlock);
|
spinlock_acquire(&kmalloc_spinlock);
|
||||||
for (i=0; i<=mallocgeneration; i++) {
|
for (i = 0; i <= mallocgeneration; i++) {
|
||||||
dump_subpages(i);
|
dump_subpages(i);
|
||||||
}
|
}
|
||||||
spinlock_release(&kmalloc_spinlock);
|
spinlock_release(&kmalloc_spinlock);
|
||||||
@ -742,21 +700,18 @@ kheap_dumpall(void)
|
|||||||
/*
|
/*
|
||||||
* Print the allocated/freed map of a single kernel heap page.
|
* Print the allocated/freed map of a single kernel heap page.
|
||||||
*/
|
*/
|
||||||
static
|
static void subpage_stats(struct pageref *pr) {
|
||||||
void
|
|
||||||
subpage_stats(struct pageref *pr)
|
|
||||||
{
|
|
||||||
vaddr_t prpage, fla;
|
vaddr_t prpage, fla;
|
||||||
struct freelist *fl;
|
struct freelist *fl;
|
||||||
int blktype;
|
int blktype;
|
||||||
unsigned i, n, index;
|
unsigned i, n, index;
|
||||||
uint32_t freemap[PAGE_SIZE / (SMALLEST_SUBPAGE_SIZE*32)];
|
uint32_t freemap[PAGE_SIZE / (SMALLEST_SUBPAGE_SIZE * 32)];
|
||||||
|
|
||||||
checksubpage(pr);
|
checksubpage(pr);
|
||||||
KASSERT(spinlock_do_i_hold(&kmalloc_spinlock));
|
KASSERT(spinlock_do_i_hold(&kmalloc_spinlock));
|
||||||
|
|
||||||
/* clear freemap[] */
|
/* clear freemap[] */
|
||||||
for (i=0; i<ARRAYCOUNT(freemap); i++) {
|
for (i = 0; i < ARRAYCOUNT(freemap); i++) {
|
||||||
freemap[i] = 0;
|
freemap[i] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -774,20 +729,19 @@ subpage_stats(struct pageref *pr)
|
|||||||
|
|
||||||
for (; fl != NULL; fl = fl->next) {
|
for (; fl != NULL; fl = fl->next) {
|
||||||
fla = (vaddr_t)fl;
|
fla = (vaddr_t)fl;
|
||||||
index = (fla-prpage) / sizes[blktype];
|
index = (fla - prpage) / sizes[blktype];
|
||||||
KASSERT(index<n);
|
KASSERT(index < n);
|
||||||
freemap[index/32] |= (1<<(index%32));
|
freemap[index / 32] |= (1 << (index % 32));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
kprintf("at 0x%08lx: size %-4lu %u/%u free\n",
|
kprintf("at 0x%08lx: size %-4lu %u/%u free\n", (unsigned long)prpage,
|
||||||
(unsigned long)prpage, (unsigned long) sizes[blktype],
|
(unsigned long)sizes[blktype], (unsigned)pr->nfree, n);
|
||||||
(unsigned) pr->nfree, n);
|
|
||||||
kprintf(" ");
|
kprintf(" ");
|
||||||
for (i=0; i<n; i++) {
|
for (i = 0; i < n; i++) {
|
||||||
int val = (freemap[i/32] & (1<<(i%32)))!=0;
|
int val = (freemap[i / 32] & (1 << (i % 32))) != 0;
|
||||||
kprintf("%c", val ? '.' : '*');
|
kprintf("%c", val ? '.' : '*');
|
||||||
if (i%64==63 && i<n-1) {
|
if (i % 64 == 63 && i < n - 1) {
|
||||||
kprintf("\n ");
|
kprintf("\n ");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -797,9 +751,7 @@ subpage_stats(struct pageref *pr)
|
|||||||
/*
|
/*
|
||||||
* Print the whole heap.
|
* Print the whole heap.
|
||||||
*/
|
*/
|
||||||
void
|
void kheap_printstats(void) {
|
||||||
kheap_printstats(void)
|
|
||||||
{
|
|
||||||
struct pageref *pr;
|
struct pageref *pr;
|
||||||
|
|
||||||
/* print the whole thing with interrupts off */
|
/* print the whole thing with interrupts off */
|
||||||
@ -819,13 +771,10 @@ kheap_printstats(void)
|
|||||||
/*
|
/*
|
||||||
* Remove a pageref from both lists that it's on.
|
* Remove a pageref from both lists that it's on.
|
||||||
*/
|
*/
|
||||||
static
|
static void remove_lists(struct pageref *pr, int blktype) {
|
||||||
void
|
|
||||||
remove_lists(struct pageref *pr, int blktype)
|
|
||||||
{
|
|
||||||
struct pageref **guy;
|
struct pageref **guy;
|
||||||
|
|
||||||
KASSERT(blktype>=0 && blktype<NSIZES);
|
KASSERT(blktype >= 0 && blktype < NSIZES);
|
||||||
|
|
||||||
for (guy = &sizebases[blktype]; *guy; guy = &(*guy)->next_samesize) {
|
for (guy = &sizebases[blktype]; *guy; guy = &(*guy)->next_samesize) {
|
||||||
checksubpage(*guy);
|
checksubpage(*guy);
|
||||||
@ -848,19 +797,15 @@ remove_lists(struct pageref *pr, int blktype)
|
|||||||
* Given a requested client size, return the block type, that is, the
|
* Given a requested client size, return the block type, that is, the
|
||||||
* index into the sizes[] array for the block size to use.
|
* index into the sizes[] array for the block size to use.
|
||||||
*/
|
*/
|
||||||
static
|
static inline int blocktype(size_t clientsz) {
|
||||||
inline
|
|
||||||
int blocktype(size_t clientsz)
|
|
||||||
{
|
|
||||||
unsigned i;
|
unsigned i;
|
||||||
for (i=0; i<NSIZES; i++) {
|
for (i = 0; i < NSIZES; i++) {
|
||||||
if (clientsz <= sizes[i]) {
|
if (clientsz <= sizes[i]) {
|
||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
panic("Subpage allocator cannot handle allocation of size %zu\n",
|
panic("Subpage allocator cannot handle allocation of size %zu\n", clientsz);
|
||||||
clientsz);
|
|
||||||
|
|
||||||
// keep compiler happy
|
// keep compiler happy
|
||||||
return 0;
|
return 0;
|
||||||
@ -870,14 +815,12 @@ int blocktype(size_t clientsz)
|
|||||||
* Allocate a block of size SZ, where SZ is not large enough to
|
* Allocate a block of size SZ, where SZ is not large enough to
|
||||||
* warrant a whole-page allocation.
|
* warrant a whole-page allocation.
|
||||||
*/
|
*/
|
||||||
static
|
static void *subpage_kmalloc(size_t sz
|
||||||
void *
|
|
||||||
subpage_kmalloc(size_t sz
|
|
||||||
#ifdef LABELS
|
#ifdef LABELS
|
||||||
, vaddr_t label
|
,
|
||||||
|
vaddr_t label
|
||||||
#endif
|
#endif
|
||||||
)
|
) {
|
||||||
{
|
|
||||||
unsigned blktype; // index into sizes[] that we're using
|
unsigned blktype; // index into sizes[] that we're using
|
||||||
struct pageref *pr; // pageref for page we're allocating from
|
struct pageref *pr; // pageref for page we're allocating from
|
||||||
vaddr_t prpage; // PR_PAGEADDR(pr)
|
vaddr_t prpage; // PR_PAGEADDR(pr)
|
||||||
@ -935,8 +878,7 @@ subpage_kmalloc(size_t sz
|
|||||||
fla = (vaddr_t)fl;
|
fla = (vaddr_t)fl;
|
||||||
KASSERT(fla - prpage < PAGE_SIZE);
|
KASSERT(fla - prpage < PAGE_SIZE);
|
||||||
pr->freelist_offset = fla - prpage;
|
pr->freelist_offset = fla - prpage;
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
KASSERT(pr->nfree == 0);
|
KASSERT(pr->nfree == 0);
|
||||||
pr->freelist_offset = INVALID_OFFSET;
|
pr->freelist_offset = INVALID_OFFSET;
|
||||||
}
|
}
|
||||||
@ -965,7 +907,7 @@ subpage_kmalloc(size_t sz
|
|||||||
|
|
||||||
spinlock_release(&kmalloc_spinlock);
|
spinlock_release(&kmalloc_spinlock);
|
||||||
prpage = alloc_kpages(1);
|
prpage = alloc_kpages(1);
|
||||||
if (prpage==0) {
|
if (prpage == 0) {
|
||||||
/* Out of memory. */
|
/* Out of memory. */
|
||||||
kprintf("kmalloc: Subpage allocator couldn't get a page\n");
|
kprintf("kmalloc: Subpage allocator couldn't get a page\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -978,7 +920,7 @@ subpage_kmalloc(size_t sz
|
|||||||
spinlock_acquire(&kmalloc_spinlock);
|
spinlock_acquire(&kmalloc_spinlock);
|
||||||
|
|
||||||
pr = allocpageref();
|
pr = allocpageref();
|
||||||
if (pr==NULL) {
|
if (pr == NULL) {
|
||||||
/* Couldn't allocate accounting space for the new page. */
|
/* Couldn't allocate accounting space for the new page. */
|
||||||
spinlock_release(&kmalloc_spinlock);
|
spinlock_release(&kmalloc_spinlock);
|
||||||
free_kpages(prpage);
|
free_kpages(prpage);
|
||||||
@ -998,14 +940,14 @@ subpage_kmalloc(size_t sz
|
|||||||
fla = prpage;
|
fla = prpage;
|
||||||
fl = (struct freelist *)fla;
|
fl = (struct freelist *)fla;
|
||||||
fl->next = NULL;
|
fl->next = NULL;
|
||||||
for (i=1; i<pr->nfree; i++) {
|
for (i = 1; i < pr->nfree; i++) {
|
||||||
fl = (struct freelist *)(fla + i*sizes[blktype]);
|
fl = (struct freelist *)(fla + i * sizes[blktype]);
|
||||||
fl->next = (struct freelist *)(fla + (i-1)*sizes[blktype]);
|
fl->next = (struct freelist *)(fla + (i - 1) * sizes[blktype]);
|
||||||
KASSERT(fl != fl->next);
|
KASSERT(fl != fl->next);
|
||||||
}
|
}
|
||||||
fla = (vaddr_t) fl;
|
fla = (vaddr_t)fl;
|
||||||
pr->freelist_offset = fla - prpage;
|
pr->freelist_offset = fla - prpage;
|
||||||
KASSERT(pr->freelist_offset == (pr->nfree-1)*sizes[blktype]);
|
KASSERT(pr->freelist_offset == (pr->nfree - 1) * sizes[blktype]);
|
||||||
|
|
||||||
pr->next_samesize = sizebases[blktype];
|
pr->next_samesize = sizebases[blktype];
|
||||||
sizebases[blktype] = pr;
|
sizebases[blktype] = pr;
|
||||||
@ -1021,10 +963,7 @@ subpage_kmalloc(size_t sz
|
|||||||
* Free a pointer previously returned from subpage_kmalloc. If the
|
* Free a pointer previously returned from subpage_kmalloc. If the
|
||||||
* pointer is not on any heap page we recognize, return -1.
|
* pointer is not on any heap page we recognize, return -1.
|
||||||
*/
|
*/
|
||||||
static
|
static int subpage_kfree(void *ptr) {
|
||||||
int
|
|
||||||
subpage_kfree(void *ptr)
|
|
||||||
{
|
|
||||||
int blktype; // index into sizes[] that we're using
|
int blktype; // index into sizes[] that we're using
|
||||||
vaddr_t ptraddr; // same as ptr
|
vaddr_t ptraddr; // same as ptr
|
||||||
struct pageref *pr; // pageref for page we're freeing in
|
struct pageref *pr; // pageref for page we're freeing in
|
||||||
@ -1075,7 +1014,7 @@ subpage_kfree(void *ptr)
|
|||||||
KASSERT(blktype >= 0 && blktype < NSIZES);
|
KASSERT(blktype >= 0 && blktype < NSIZES);
|
||||||
|
|
||||||
/* check for corruption */
|
/* check for corruption */
|
||||||
KASSERT(blktype>=0 && blktype<NSIZES);
|
KASSERT(blktype >= 0 && blktype < NSIZES);
|
||||||
checksubpage(pr);
|
checksubpage(pr);
|
||||||
|
|
||||||
if (ptraddr >= prpage && ptraddr < prpage + PAGE_SIZE) {
|
if (ptraddr >= prpage && ptraddr < prpage + PAGE_SIZE) {
|
||||||
@ -1083,7 +1022,7 @@ subpage_kfree(void *ptr)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pr==NULL) {
|
if (pr == NULL) {
|
||||||
/* Not on any of our pages - not a subpage allocation */
|
/* Not on any of our pages - not a subpage allocation */
|
||||||
spinlock_release(&kmalloc_spinlock);
|
spinlock_release(&kmalloc_spinlock);
|
||||||
return -1;
|
return -1;
|
||||||
@ -1145,8 +1084,7 @@ subpage_kfree(void *ptr)
|
|||||||
/* Call free_kpages without kmalloc_spinlock. */
|
/* Call free_kpages without kmalloc_spinlock. */
|
||||||
spinlock_release(&kmalloc_spinlock);
|
spinlock_release(&kmalloc_spinlock);
|
||||||
free_kpages(prpage);
|
free_kpages(prpage);
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
spinlock_release(&kmalloc_spinlock);
|
spinlock_release(&kmalloc_spinlock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1166,9 +1104,7 @@ subpage_kfree(void *ptr)
|
|||||||
* Allocate a block of size SZ. Redirect either to subpage_kmalloc or
|
* Allocate a block of size SZ. Redirect either to subpage_kmalloc or
|
||||||
* alloc_kpages depending on how big SZ is.
|
* alloc_kpages depending on how big SZ is.
|
||||||
*/
|
*/
|
||||||
void *
|
void *kmalloc(size_t sz) {
|
||||||
kmalloc(size_t sz)
|
|
||||||
{
|
|
||||||
size_t checksz;
|
size_t checksz;
|
||||||
#ifdef LABELS
|
#ifdef LABELS
|
||||||
vaddr_t label;
|
vaddr_t label;
|
||||||
@ -1188,9 +1124,9 @@ kmalloc(size_t sz)
|
|||||||
vaddr_t address;
|
vaddr_t address;
|
||||||
|
|
||||||
/* Round up to a whole number of pages. */
|
/* Round up to a whole number of pages. */
|
||||||
npages = (sz + PAGE_SIZE - 1)/PAGE_SIZE;
|
npages = (sz + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||||
address = alloc_kpages(npages);
|
address = alloc_kpages(npages);
|
||||||
if (address==0) {
|
if (address == 0) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
KASSERT(address % PAGE_SIZE == 0);
|
KASSERT(address % PAGE_SIZE == 0);
|
||||||
@ -1208,17 +1144,14 @@ kmalloc(size_t sz)
|
|||||||
/*
|
/*
|
||||||
* Free a block previously returned from kmalloc.
|
* Free a block previously returned from kmalloc.
|
||||||
*/
|
*/
|
||||||
void
|
void kfree(void *ptr) {
|
||||||
kfree(void *ptr)
|
|
||||||
{
|
|
||||||
/*
|
/*
|
||||||
* Try subpage first; if that fails, assume it's a big allocation.
|
* Try subpage first; if that fails, assume it's a big allocation.
|
||||||
*/
|
*/
|
||||||
if (ptr == NULL) {
|
if (ptr == NULL) {
|
||||||
return;
|
return;
|
||||||
} else if (subpage_kfree(ptr)) {
|
} else if (subpage_kfree(ptr)) {
|
||||||
KASSERT((vaddr_t)ptr%PAGE_SIZE==0);
|
KASSERT((vaddr_t)ptr % PAGE_SIZE == 0);
|
||||||
free_kpages((vaddr_t)ptr);
|
free_kpages((vaddr_t)ptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user