Revert "Merging in 1.0.2."

This reverts commit 50cf3276e7.
This commit is contained in:
Geoffrey Challen
2017-01-09 22:52:13 -05:00
parent 50cf3276e7
commit e318e3171e
118 changed files with 3158 additions and 1350 deletions

View File

@@ -126,6 +126,21 @@ free_kpages(vaddr_t addr)
(void)addr;
}
unsigned
int
coremap_used_bytes() {
/* dumbvm doesn't track page allocations. Return 0 so that khu works. */
return 0;
}
void
vm_tlbshootdown_all(void)
{
panic("dumbvm tried to do tlb shootdown?!\n");
}
void
vm_tlbshootdown(const struct tlbshootdown *ts)
{

View File

@@ -30,3 +30,4 @@ options sfs # Always use the file system
#options netfs # You might write this as a project.
options dumbvm # Chewing gum and baling wire.
#options synchprobs # Uncomment to enable ASST1 synchronization problems

View File

@@ -309,6 +309,14 @@ file ../common/libc/string/strlen.c
file ../common/libc/string/strrchr.c
file ../common/libc/string/strtok_r.c
#
# libtest161 shared code and security functions
#
file ../common/libtest161/test161.c
file ../common/libtest161/secure.c
file ../common/libtest161/sha256.c
########################################
# #
# Core kernel source files #
@@ -434,7 +442,19 @@ file test/threadlisttest.c
file test/threadtest.c
file test/tt3.c
file test/synchtest.c
file test/rwtest.c
file test/semunit.c
file test/hmacunit.c
file test/kmalloctest.c
file test/fstest.c
file test/lib.c
optfile net test/nettest.c
defoption synchprobs
optfile synchprobs synchprobs/whalemating.c
optfile synchprobs synchprobs/stoplight.c
optfile synchprobs test/synchprobs.c
defoption automationtest
optfile automationtest test/automationtest.c

View File

@@ -206,7 +206,7 @@ echo "$CONFNAME" $CONFTMP | awk '
#
if [ ! -d "$COMPILEDIR" ]; then
mkdir $COMPILEDIR
mkdir -p $COMPILEDIR
fi
echo -n 'Generating files...'

View File

@@ -35,6 +35,7 @@
#include <threadlist.h>
#include <machine/vm.h> /* for TLBSHOOTDOWN_MAX */
extern unsigned num_cpus;
/*
* Per-cpu structure
@@ -74,21 +75,24 @@ struct cpu {
* Accessed by other cpus.
* Protected by the IPI lock.
*
* TLB shootdown requests made to this CPU are queued in
* c_shootdown[], with c_numshootdown holding the number of
* requests. TLBSHOOTDOWN_MAX is the maximum number that can
* be queued at once, which is machine-dependent.
* If c_numshootdown is -1 (TLBSHOOTDOWN_ALL), all mappings
* should be invalidated. This is used if more than
* TLBSHOOTDOWN_MAX mappings are going to be invalidated at
* once. TLBSHOOTDOWN_MAX is MD and chosen based on when it
* becomes more efficient just to flush the whole TLB.
*
* The contents of struct tlbshootdown are also machine-
* dependent and might reasonably be either an address space
* and vaddr pair, or a paddr, or something else.
* struct tlbshootdown is machine-dependent and might
* reasonably be either an address space and vaddr pair, or a
* paddr, or something else.
*/
uint32_t c_ipi_pending; /* One bit for each IPI number */
struct tlbshootdown c_shootdown[TLBSHOOTDOWN_MAX];
unsigned c_numshootdown;
int c_numshootdown;
struct spinlock c_ipi_lock;
};
#define TLBSHOOTDOWN_ALL (-1)
/*
* Initialization functions.
*

View File

@@ -129,6 +129,8 @@ uint32_t random(void);
void *kmalloc(size_t size);
void kfree(void *ptr);
void kheap_printstats(void);
void kheap_printused(void);
unsigned long kheap_getused(void);
void kheap_nextgeneration(void);
void kheap_dump(void);
void kheap_dumpall(void);

View File

@@ -44,10 +44,10 @@
* internally.
*/
struct semaphore {
char *sem_name;
char *sem_name;
struct wchan *sem_wchan;
struct spinlock sem_lock;
volatile unsigned sem_count;
volatile unsigned sem_count;
};
struct semaphore *sem_create(const char *name, unsigned initial_count);
@@ -137,5 +137,40 @@ void cv_wait(struct cv *cv, struct lock *lock);
void cv_signal(struct cv *cv, struct lock *lock);
void cv_broadcast(struct cv *cv, struct lock *lock);
/*
* Reader-writer locks.
*
* When the lock is created, no thread should be holding it. Likewise,
* when the lock is destroyed, no thread should be holding it.
*
* The name field is for easier debugging. A copy of the name is
* (should be) made internally.
*/
struct rwlock {
char *rwlock_name;
// add what you need here
// (don't forget to mark things volatile as needed)
};
struct rwlock * rwlock_create(const char *);
void rwlock_destroy(struct rwlock *);
/*
* Operations:
* rwlock_acquire_read - Get the lock for reading. Multiple threads can
* hold the lock for reading at the same time.
* rwlock_release_read - Free the lock.
* rwlock_acquire_write - Get the lock for writing. Only one thread can
* hold the write lock at one time.
* rwlock_release_write - Free the write lock.
*
* These operations must be atomic. You get to write them.
*/
void rwlock_acquire_read(struct rwlock *);
void rwlock_release_read(struct rwlock *);
void rwlock_acquire_write(struct rwlock *);
void rwlock_release_write(struct rwlock *);
#endif /* _SYNCH_H_ */

View File

@@ -30,6 +30,13 @@
#ifndef _TEST_H_
#define _TEST_H_
/* Get __PF() for declaring printf-like functions. */
#include <cdefs.h>
#include <kern/secret.h>
#include "opt-synchprobs.h"
#include "opt-automationtest.h"
/*
* Declarations for test code and other miscellaneous high-level
* functions.
@@ -52,8 +59,18 @@ int threadtest2(int, char **);
int threadtest3(int, char **);
int semtest(int, char **);
int locktest(int, char **);
int locktest2(int, char **);
int locktest3(int, char **);
int cvtest(int, char **);
int cvtest2(int, char **);
int cvtest3(int, char **);
int cvtest4(int, char **);
int cvtest5(int, char **);
int rwtest(int, char **);
int rwtest2(int, char **);
int rwtest3(int, char **);
int rwtest4(int, char **);
int rwtest5(int, char **);
/* semaphore unit tests */
int semu1(int, char **);
@@ -88,11 +105,15 @@ int longstress(int, char **);
int createstress(int, char **);
int printfile(int, char **);
/* HMAC/hash tests */
int hmacu1(int, char**);
/* other tests */
int kmalloctest(int, char **);
int kmallocstress(int, char **);
int kmalloctest3(int, char **);
int kmalloctest4(int, char **);
int kmalloctest5(int, char **);
int nettest(int, char **);
/* Routine for running a user-level program. */
@@ -104,5 +125,75 @@ void menu(char *argstr);
/* The main function, called from start.S. */
void kmain(char *bootstring);
#if OPT_SYNCHPROBS
/*
* Synchronization driver primitives.
*/
void male_start(uint32_t);
void male_end(uint32_t);
void female_start(uint32_t);
void female_end(uint32_t);
void matchmaker_start(uint32_t);
void matchmaker_end(uint32_t);
int whalemating(int, char **);
void inQuadrant(int, uint32_t);
void leaveIntersection(uint32_t);
int stoplight(int, char **);
/*
* Synchronization problem primitives.
*/
/*
* whalemating.c.
*/
void whalemating_init(void);
void whalemating_cleanup(void);
void male(uint32_t);
void female(uint32_t);
void matchmaker(uint32_t);
/*
* stoplight.c.
*/
void gostraight(uint32_t, uint32_t);
void turnleft(uint32_t, uint32_t);
void turnright(uint32_t, uint32_t);
void stoplight_init(void);
void stoplight_cleanup(void);
#endif
/*
* Automation tests for detecting kernel deadlocks and livelocks.
*/
#if OPT_AUTOMATIONTEST
int dltest(int, char **);
int ll1test(int, char **);
int ll16test(int, char **);
#endif
void random_yielder(uint32_t);
void random_spinner(uint32_t);
/*
* kprintf variants that do not (or only) print during automated testing.
*/
#ifdef SECRET_TESTING
#define kprintf_t(...) kprintf(__VA_ARGS__)
#define kprintf_n(...) silent(__VA_ARGS__)
#else
#define kprintf_t(...) silent(__VA_ARGS__)
#define kprintf_n(...) kprintf(__VA_ARGS__)
#endif
static inline void silent(const char * fmt, ...) { (void)fmt; };
#endif /* _TEST_H_ */

View File

@@ -48,6 +48,7 @@ struct cpu;
/* Size of kernel stacks; must be power of 2 */
#define STACK_SIZE 4096
#define MAX_NAME_LENGTH 64
/* Mask for extracting the stack base address of a kernel stack pointer */
#define STACK_MASK (~(vaddr_t)(STACK_SIZE-1))
@@ -70,7 +71,16 @@ struct thread {
* These go up front so they're easy to get to even if the
* debugger is messed up.
*/
char *t_name; /* Name of this thread */
/*
* Name of this thread. Used to be dynamically allocated using kmalloc, but
* this can cause small changes in the amount of available memory due to the
* fact that it was cleaned up in exorcise. This produces more predictable
* behavior at the cost of a small amount of memory overhead and the
* inability to give threads huge names.
*/
char t_name[MAX_NAME_LENGTH];
const char *t_wchan_name; /* Name of wait channel, if sleeping */
threadstate_t t_state; /* State this thread is in */
@@ -168,5 +178,7 @@ void schedule(void);
*/
void thread_consider_migration(void);
extern unsigned thread_count;
void thread_wait_for_count(unsigned);
#endif /* _THREAD_H_ */

View File

@@ -34,7 +34,7 @@
* Leave this alone, so we can tell what version of the OS/161 base
* code we gave you.
*/
#define BASE_VERSION "2.0.2"
#define BASE_VERSION "2.0.1"
/*
* Change this as you see fit in the course of hacking the system.

View File

@@ -156,13 +156,6 @@ int vfs_getcwd(struct uio *buf);
* vfs_unmount - Unmount the filesystem presently mounted on the
* specified device.
*
* vfs_swapon - Look up DEVNAME and mark it as a swap device,
* returning a vnode. Similar to vfs_mount.
*
* vfs_swapoff - Unmark DEVNAME as a swap device. The vnode
* previously returned by vfs_swapon should be
* decref'd first. Similar to vfs_unmount.
*
* vfs_unmountall - Unmount all mounted filesystems.
*/
@@ -179,8 +172,6 @@ int vfs_mount(const char *devname, void *data,
struct device *dev,
struct fs **result));
int vfs_unmount(const char *devname);
int vfs_swapon(const char *devname, struct vnode **result);
int vfs_swapoff(const char *devname);
int vfs_unmountall(void);
/*

View File

@@ -55,7 +55,15 @@ int vm_fault(int faulttype, vaddr_t faultaddress);
vaddr_t alloc_kpages(unsigned npages);
void free_kpages(vaddr_t addr);
/*
* Return amount of memory (in bytes) used by allocated coremap pages. If
* there are ongoing allocations, this value could change after it is returned
* to the caller. But it should have been correct at some point in time.
*/
unsigned int coremap_used_bytes(void);
/* TLB shootdown handling called from interprocessor_interrupt */
void vm_tlbshootdown_all(void);
void vm_tlbshootdown(const struct tlbshootdown *);

View File

@@ -39,6 +39,8 @@
#include <mainbus.h>
#include <vfs.h> // for vfs_sync()
#include <lamebus/ltrace.h> // for ltrace_stop()
#include <kern/secret.h>
#include <test.h>
/* Flags word for DEBUG() macro. */
@@ -90,13 +92,14 @@ console_send(void *junk, const char *data, size_t len)
}
/*
* Printf to the console.
* kprintf and tprintf helper function.
*/
static
inline
int
kprintf(const char *fmt, ...)
__kprintf(const char *fmt, va_list ap)
{
int chars;
va_list ap;
bool dolock;
dolock = kprintf_lock != NULL
@@ -111,9 +114,7 @@ kprintf(const char *fmt, ...)
spinlock_acquire(&kprintf_spinlock);
}
va_start(ap, fmt);
chars = __vprintf(console_send, NULL, fmt, ap);
va_end(ap);
if (dolock) {
lock_release(kprintf_lock);
@@ -125,6 +126,22 @@ kprintf(const char *fmt, ...)
return chars;
}
/*
* Printf to the console.
*/
int
kprintf(const char *fmt, ...)
{
int chars;
va_list ap;
va_start(ap, fmt);
chars = __kprintf(fmt, ap);
va_end(ap);
return chars;
}
/*
* panic() is for fatal errors. It prints the printf arguments it's
* passed and then halts the system.

View File

@@ -48,6 +48,7 @@
#include <device.h>
#include <syscall.h>
#include <test.h>
#include <kern/test161.h>
#include <version.h>
#include "autoconf.h" // for pseudoconfig
@@ -127,6 +128,7 @@ boot(void)
vm_bootstrap();
kprintf_bootstrap();
thread_start_cpus();
test161_bootstrap();
/* Default bootfs - but ignore failure, in case emu0 doesn't exist */
vfs_setbootfs("emu0");

View File

@@ -41,8 +41,11 @@
#include <sfs.h>
#include <syscall.h>
#include <test.h>
#include <prompt.h>
#include "opt-sfs.h"
#include "opt-net.h"
#include "opt-synchprobs.h"
#include "opt-automationtest.h"
/*
* In-kernel menu and command dispatcher.
@@ -114,6 +117,7 @@ common_prog(int nargs, char **args)
{
struct proc *proc;
int result;
unsigned tc;
/* Create a process for the new program to run in. */
proc = proc_create_runprogram(args[0] /* name */);
@@ -121,6 +125,8 @@ common_prog(int nargs, char **args)
return ENOMEM;
}
tc = thread_count;
result = thread_fork(args[0] /* thread name */,
proc /* new process */,
cmd_progthread /* thread function */,
@@ -136,6 +142,10 @@ common_prog(int nargs, char **args)
* once you write the code for handling that.
*/
// Wait for all threads to finish cleanup, otherwise khu be a bit behind,
// especially once swapping is enabled.
thread_wait_for_count(tc);
return 0;
}
@@ -373,6 +383,18 @@ cmd_kheapstats(int nargs, char **args)
return 0;
}
static
int
cmd_kheapused(int nargs, char **args)
{
(void)nargs;
(void)args;
kheap_printused();
return 0;
}
static
int
cmd_kheapgeneration(int nargs, char **args)
@@ -466,16 +488,31 @@ static const char *testmenu[] = {
"[km2] kmalloc stress test ",
"[km3] Large kmalloc test ",
"[km4] Multipage kmalloc test ",
"[km5] kmalloc coremap alloc test ",
"[tt1] Thread test 1 ",
"[tt2] Thread test 2 ",
"[tt3] Thread test 3 ",
#if OPT_NET
"[net] Network test ",
#endif
"[sy1] Semaphore test ",
"[sy2] Lock test (1) ",
"[sy3] CV test (1) ",
"[sy4] CV test #2 (1) ",
"[sem1] Semaphore test ",
"[lt1] Lock test 1 (1) ",
"[lt2] Lock test 2 (1*) ",
"[lt3] Lock test 3 (1*) ",
"[cvt1] CV test 1 (1) ",
"[cvt2] CV test 2 (1) ",
"[cvt3] CV test 3 (1*) ",
"[cvt4] CV test 4 (1*) ",
"[cvt5] CV test 5 (1) ",
"[rwt1] RW lock test (1?) ",
"[rwt2] RW lock test 2 (1?) ",
"[rwt3] RW lock test 3 (1?) ",
"[rwt4] RW lock test 4 (1?) ",
"[rwt5] RW lock test 5 (1?) ",
#if OPT_SYNCHPROBS
"[sp1] Whalemating test (1) ",
"[sp2] Stoplight test (1) ",
#endif
"[semu1-22] Semaphore unit tests ",
"[fs1] Filesystem test ",
"[fs2] FS read stress ",
@@ -483,6 +520,7 @@ static const char *testmenu[] = {
"[fs4] FS write stress 2 ",
"[fs5] FS long stress ",
"[fs6] FS create stress ",
"[hm1] HMAC unit test ",
NULL
};
@@ -496,15 +534,41 @@ cmd_testmenu(int n, char **a)
showmenu("OS/161 tests menu", testmenu);
kprintf(" (1) These tests will fail until you finish the "
"synch assignment.\n");
kprintf(" (*) These tests will panic on success.\n");
kprintf(" (?) These tests are left to you to implement.\n");
kprintf("\n");
return 0;
}
#if OPT_AUTOMATIONTEST
static const char *automationmenu[] = {
"[dl] Deadlock test (*) ",
"[ll1] Livelock test (1 thread) ",
"[ll16] Livelock test (16 threads) ",
NULL
};
static
int
cmd_automationmenu(int n, char **a)
{
(void)n;
(void)a;
showmenu("OS/161 automation tests menu", automationmenu);
kprintf(" (*) These tests require locks.\n");
kprintf("\n");
return 0;
}
#endif
static const char *mainmenu[] = {
"[?o] Operations menu ",
"[?t] Tests menu ",
"[kh] Kernel heap stats ",
"[khu] Kernel heap usage ",
"[khgen] Next kernel heap generation ",
"[khdump] Dump kernel heap ",
"[q] Quit and shut down ",
@@ -536,6 +600,9 @@ static struct {
{ "help", cmd_mainmenu },
{ "?o", cmd_opsmenu },
{ "?t", cmd_testmenu },
#if OPT_AUTOMATIONTEST
{ "?a", cmd_automationmenu },
#endif
/* operations */
{ "s", cmd_shell },
@@ -554,6 +621,7 @@ static struct {
/* stats */
{ "kh", cmd_kheapstats },
{ "khu", cmd_kheapused },
{ "khgen", cmd_kheapgeneration },
{ "khdump", cmd_kheapdump },
@@ -566,18 +634,33 @@ static struct {
{ "km2", kmallocstress },
{ "km3", kmalloctest3 },
{ "km4", kmalloctest4 },
{ "km5", kmalloctest5 },
#if OPT_NET
{ "net", nettest },
#endif
{ "tt1", threadtest },
{ "tt2", threadtest2 },
{ "tt3", threadtest3 },
{ "sy1", semtest },
/* synchronization assignment tests */
{ "sy2", locktest },
{ "sy3", cvtest },
{ "sy4", cvtest2 },
{ "sem1", semtest },
{ "lt1", locktest },
{ "lt2", locktest2 },
{ "lt3", locktest3 },
{ "cvt1", cvtest },
{ "cvt2", cvtest2 },
{ "cvt3", cvtest3 },
{ "cvt4", cvtest4 },
{ "cvt5", cvtest5 },
{ "rwt1", rwtest },
{ "rwt2", rwtest2 },
{ "rwt3", rwtest3 },
{ "rwt4", rwtest4 },
{ "rwt5", rwtest5 },
#if OPT_SYNCHPROBS
{ "sp1", whalemating },
{ "sp2", stoplight },
#endif
/* semaphore unit tests */
{ "semu1", semu1 },
@@ -611,6 +694,16 @@ static struct {
{ "fs5", longstress },
{ "fs6", createstress },
/* HMAC unit tests */
{ "hm1", hmacu1 },
#if OPT_AUTOMATIONTEST
/* automation tests */
{ "dl", dltest },
{ "ll1", ll1test },
{ "ll16", ll16test },
#endif
{ NULL, NULL }
};
@@ -724,7 +817,11 @@ menu(char *args)
menu_execute(args, 1);
while (1) {
kprintf("OS/161 kernel [? for menu]: ");
/*
* Defined in overwrite.h. If you want to change the kernel prompt, please
* do it in that file. Otherwise automated test testing will break.
*/
kprintf(KERNEL_PROMPT);
kgets(buf, sizeof(buf));
menu_execute(buf, 0);
}

View File

@@ -33,13 +33,19 @@
#include <types.h>
#include <kern/errno.h>
#include <lib.h>
#include <cpu.h>
#include <thread.h>
#include <synch.h>
#include <vm.h> /* for PAGE_SIZE */
#include <test.h>
#include <kern/test161.h>
#include <mainbus.h>
#include "opt-dumbvm.h"
// from arch/mips/vm/ram.c
extern vaddr_t firstfree;
////////////////////////////////////////////////////////////
// km1/km2
@@ -58,6 +64,12 @@
#define ITEMSIZE 997
#define NTHREADS 8
#define PROGRESS(iter) do { \
if ((iter % 100) == 0) { \
kprintf("."); \
} \
} while (0)
static
void
kmallocthread(void *sm, unsigned long num)
@@ -69,15 +81,16 @@ kmallocthread(void *sm, unsigned long num)
int i;
for (i=0; i<NTRIES; i++) {
PROGRESS(i);
ptr = kmalloc(ITEMSIZE);
if (ptr==NULL) {
if (sem) {
kprintf("thread %lu: kmalloc returned NULL\n",
num);
goto done;
panic("kmalloc test failed");
}
kprintf("kmalloc returned null; test failed.\n");
goto done;
panic("kmalloc test failed");
}
if (oldptr2) {
kfree(oldptr2);
@@ -85,7 +98,7 @@ kmallocthread(void *sm, unsigned long num)
oldptr2 = oldptr;
oldptr = ptr;
}
done:
if (oldptr2) {
kfree(oldptr2);
}
@@ -105,7 +118,8 @@ kmalloctest(int nargs, char **args)
kprintf("Starting kmalloc test...\n");
kmallocthread(NULL, 0);
kprintf("kmalloc test done\n");
kprintf("\n");
success(TEST161_SUCCESS, SECRET, "km1");
return 0;
}
@@ -140,7 +154,8 @@ kmallocstress(int nargs, char **args)
}
sem_destroy(sem);
kprintf("kmalloc stress test done\n");
kprintf("\n");
success(TEST161_SUCCESS, SECRET, "km2");
return 0;
}
@@ -252,6 +267,7 @@ kmalloctest3(int nargs, char **args)
curpos = 0;
cursizeindex = 0;
for (i=0; i<numptrs; i++) {
PROGRESS(i);
cursize = sizes[cursizeindex];
ptr = ptrblocks[curblock][curpos];
KASSERT(ptr != NULL);
@@ -282,13 +298,15 @@ kmalloctest3(int nargs, char **args)
/* Free the lower tier. */
for (i=0; i<numptrblocks; i++) {
PROGRESS(i);
KASSERT(ptrblocks[i] != NULL);
kfree(ptrblocks[i]);
}
/* Free the upper tier. */
kfree(ptrblocks);
kprintf("kmalloctest3: passed\n");
kprintf("\n");
success(TEST161_SUCCESS, SECRET, "km3");
return 0;
}
@@ -300,20 +318,24 @@ void
kmalloctest4thread(void *sm, unsigned long num)
{
#define NUM_KM4_SIZES 5
#define ITERATIONS 50
static const unsigned sizes[NUM_KM4_SIZES] = { 1, 3, 5, 2, 4 };
struct semaphore *sem = sm;
void *ptrs[NUM_KM4_SIZES];
unsigned p, q;
unsigned i;
unsigned i, j, k;
uint32_t magic;
for (i=0; i<NUM_KM4_SIZES; i++) {
ptrs[i] = NULL;
}
p = 0;
q = NUM_KM4_SIZES / 2;
magic = random();
for (i=0; i<NTRIES; i++) {
PROGRESS(i);
if (ptrs[q] != NULL) {
kfree(ptrs[q]);
ptrs[q] = NULL;
@@ -324,6 +346,24 @@ kmalloctest4thread(void *sm, unsigned long num)
"allocating %u pages failed\n",
num, sizes[p]);
}
// Write to each page of the allocated memory and make sure nothing
// overwrites it.
for (k = 0; k < sizes[p]; k++) {
*((uint32_t *)ptrs[p] + k*PAGE_SIZE/sizeof(uint32_t)) = magic;
}
for (j = 0; j < ITERATIONS; j++) {
random_yielder(4);
for (k = 0; k < sizes[p]; k++) {
uint32_t actual = *((uint32_t *)ptrs[p] + k*PAGE_SIZE/sizeof(uint32_t));
if (actual != magic) {
panic("km4: expected %u got %u. Your VM is broken!",
magic, actual);
}
}
}
magic++;
p = (p + 1) % NUM_KM4_SIZES;
q = (q + 1) % NUM_KM4_SIZES;
}
@@ -375,6 +415,193 @@ kmalloctest4(int nargs, char **args)
}
sem_destroy(sem);
kprintf("Multipage kmalloc test done\n");
kprintf("\n");
success(TEST161_SUCCESS, SECRET, "km4");
return 0;
}
static inline
void
km5_usage()
{
kprintf("usage: km5 [--avail <num_pages>] [--kernel <num_pages>]\n");
}
/*
* Allocate and free all physical memory a number of times. Along the we, we
* check coremap_used_bytes to make sure it's reporting the number we're
* expecting.
*/
int
kmalloctest5(int nargs, char **args)
{
#define KM5_ITERATIONS 5
// We're expecting an even number of arguments, less arg[0].
if (nargs > 5 || (nargs % 2) == 0) {
km5_usage();
return 0;
}
unsigned avail_page_slack = 0, kernel_page_limit = 0;
int arg = 1;
while (arg < nargs) {
if (strcmp(args[arg], "--avail") == 0) {
arg++;
avail_page_slack = atoi(args[arg++]);
} else if (strcmp(args[arg], "--kernel") == 0) {
arg++;
kernel_page_limit = atoi(args[arg++]);
} else {
km5_usage();
return 0;
}
}
#if OPT_DUMBVM
kprintf("(This test will not work with dumbvm)\n");
#endif
// First, we need to figure out how much memory we're running with and how
// much space it will take up if we maintain a pointer to each allocated
// page. We do something similar to km3 - for 32 bit systems with
// PAGE_SIZE == 4096, we can store 1024 pointers on a page. We keep an array
// of page size blocks of pointers which in total can hold enough pointers
// for each page of available physical memory.
unsigned orig_used, ptrs_per_page, num_ptr_blocks, max_pages;
unsigned total_ram, avail_ram, magic, orig_magic, known_pages;
ptrs_per_page = PAGE_SIZE / sizeof(void *);
total_ram = mainbus_ramsize();
avail_ram = total_ram - (uint32_t)(firstfree - MIPS_KSEG0);
max_pages = (avail_ram + PAGE_SIZE-1) / PAGE_SIZE;
num_ptr_blocks = (max_pages + ptrs_per_page-1) / ptrs_per_page;
// The array can go on the stack, we won't have that many
// (sys161 16M max => 4 blocks)
void **ptrs[num_ptr_blocks];
for (unsigned i = 0; i < num_ptr_blocks; i++) {
ptrs[i] = kmalloc(PAGE_SIZE);
if (ptrs[i] == NULL) {
panic("Can't allocate ptr page!");
}
bzero(ptrs[i], PAGE_SIZE);
}
kprintf("km5 --> phys ram: %uk avail ram: %uk (%u pages) ptr blocks: %u\n", total_ram/1024,
avail_ram/1024, max_pages, num_ptr_blocks);
// Initially, there must be at least 1 page allocated for each thread stack,
// one page for kmalloc for this thread struct, plus what we just allocated).
// This probably isn't the GLB, but its a decent lower bound.
orig_used = coremap_used_bytes();
known_pages = num_cpus + num_ptr_blocks + 1;
if (orig_used < known_pages * PAGE_SIZE) {
panic ("Not enough pages initially allocated");
}
if ((orig_used % PAGE_SIZE) != 0) {
panic("Coremap used bytes should be a multiple of PAGE_SIZE");
}
// Test for kernel bloat.
if (kernel_page_limit > 0) {
uint32_t kpages = (total_ram - avail_ram + PAGE_SIZE) / PAGE_SIZE;
if (kpages > kernel_page_limit) {
panic("You're kernel is bloated! Max allowed pages: %d, used pages: %d",
kernel_page_limit, kpages);
}
}
orig_magic = magic = random();
for (int i = 0; i < KM5_ITERATIONS; i++) {
// Step 1: allocate all physical memory, with checks along the way
unsigned int block, pos, oom, pages, used, prev;
void *page;
block = pos = oom = pages = used = 0;
prev = orig_used;
while (pages < max_pages+1) {
PROGRESS(pages);
page = kmalloc(PAGE_SIZE);
if (page == NULL) {
oom = 1;
break;
}
// Make sure we can write to the page
*(uint32_t *)page = magic++;
// Make sure the number of used bytes is going up, and by increments of PAGE_SIZE
used = coremap_used_bytes();
if (used != prev + PAGE_SIZE) {
panic("Allocation not equal to PAGE_SIZE. prev: %u used: %u", prev, used);
}
prev = used;
ptrs[block][pos] = page;
pos++;
if (pos >= ptrs_per_page) {
pos = 0;
block++;
}
pages++;
}
// Step 2: Check that we were able to allocate a reasonable number of pages
unsigned expected;
if (avail_page_slack > 0 ) {
// max avail pages + what we can prove we allocated + some slack
expected = max_pages - (known_pages + avail_page_slack);
} else {
// At the very least, just so we know things are working.
expected = 3;
}
if (pages < expected) {
panic("Expected to allocate at least %d pages, only allocated %d",
expected, pages);
}
// We tried to allocate 1 more page than is available in physical memory. That
// should fail unless you're swapping out kernel pages, which you should
// probably not be doing.
if (!oom) {
panic("Allocated more pages than physical memory. Are you swapping kernel pages?");
}
// Step 3: free everything and check that we're back to where we started
for (block = 0; block < num_ptr_blocks; block++) {
for (pos = 0; pos < ptrs_per_page; pos++) {
if (ptrs[block][pos] != NULL) {
// Make sure we got unique addresses
if ((*(uint32_t *)ptrs[block][pos]) != orig_magic++) {
panic("km5: expected %u got %u - your VM is broken!",
orig_magic-1, (*(uint32_t *)ptrs[block][pos]));
}
kfree(ptrs[block][pos]);
}
}
}
// Check that we're back to where we started
used = coremap_used_bytes();
if (used != orig_used) {
panic("orig (%u) != used (%u)", orig_used, used);
}
}
//Clean up the pointer blocks
for (unsigned i = 0; i < num_ptr_blocks; i++) {
kfree(ptrs[i]);
}
kprintf("\n");
success(TEST161_SUCCESS, SECRET, "km5");
return 0;
}

View File

@@ -29,6 +29,9 @@
/*
* Synchronization test code.
*
* All the contents of this file are overwritten during automated
* testing. Please consider this before changing anything in this file.
*/
#include <types.h>
@@ -37,185 +40,303 @@
#include <thread.h>
#include <synch.h>
#include <test.h>
#include <kern/test161.h>
#include <spinlock.h>
#define CREATELOOPS 8
#define NSEMLOOPS 63
#define NLOCKLOOPS 120
#define NCVLOOPS 5
#define NTHREADS 32
#define SYNCHTEST_YIELDER_MAX 16
static volatile unsigned long testval1;
static volatile unsigned long testval2;
static volatile unsigned long testval3;
static struct semaphore *testsem;
static struct lock *testlock;
static struct cv *testcv;
static struct semaphore *donesem;
static volatile int32_t testval4;
static struct semaphore *testsem = NULL;
static struct semaphore *testsem2 = NULL;
static struct lock *testlock = NULL;
static struct lock *testlock2 = NULL;
static struct cv *testcv = NULL;
static struct semaphore *donesem = NULL;
struct spinlock status_lock;
static bool test_status = TEST161_FAIL;
static unsigned long semtest_current;
static
void
inititems(void)
{
if (testsem==NULL) {
testsem = sem_create("testsem", 2);
if (testsem == NULL) {
panic("synchtest: sem_create failed\n");
}
}
if (testlock==NULL) {
testlock = lock_create("testlock");
if (testlock == NULL) {
panic("synchtest: lock_create failed\n");
}
}
if (testcv==NULL) {
testcv = cv_create("testlock");
if (testcv == NULL) {
panic("synchtest: cv_create failed\n");
}
}
if (donesem==NULL) {
donesem = sem_create("donesem", 0);
if (donesem == NULL) {
panic("synchtest: sem_create failed\n");
}
bool
failif(bool condition) {
if (condition) {
spinlock_acquire(&status_lock);
test_status = TEST161_FAIL;
spinlock_release(&status_lock);
}
return condition;
}
static
void
semtestthread(void *junk, unsigned long num)
{
int i;
(void)junk;
int i;
random_yielder(4);
/*
* Only one of these should print at a time.
*/
P(testsem);
kprintf("Thread %2lu: ", num);
semtest_current = num;
kprintf_n("Thread %2lu: ", num);
for (i=0; i<NSEMLOOPS; i++) {
kprintf("%c", (int)num+64);
kprintf_t(".");
kprintf_n("%2lu", num);
random_yielder(4);
failif((semtest_current != num));
}
kprintf("\n");
kprintf_n("\n");
V(donesem);
}
int
semtest(int nargs, char **args)
{
int i, result;
(void)nargs;
(void)args;
inititems();
kprintf("Starting semaphore test...\n");
kprintf("If this hangs, it's broken: ");
int i, result;
kprintf_n("Starting sem1...\n");
for (i=0; i<CREATELOOPS; i++) {
kprintf_t(".");
testsem = sem_create("testsem", 2);
if (testsem == NULL) {
panic("sem1: sem_create failed\n");
}
donesem = sem_create("donesem", 0);
if (donesem == NULL) {
panic("sem1: sem_create failed\n");
}
if (i != CREATELOOPS - 1) {
sem_destroy(testsem);
sem_destroy(donesem);
}
}
spinlock_init(&status_lock);
test_status = TEST161_SUCCESS;
kprintf_n("If this hangs, it's broken: ");
P(testsem);
P(testsem);
kprintf("ok\n");
kprintf_n("OK\n");
kprintf_t(".");
for (i=0; i<NTHREADS; i++) {
kprintf_t(".");
result = thread_fork("semtest", NULL, semtestthread, NULL, i);
if (result) {
panic("semtest: thread_fork failed: %s\n",
panic("sem1: thread_fork failed: %s\n",
strerror(result));
}
}
for (i=0; i<NTHREADS; i++) {
kprintf_t(".");
V(testsem);
P(donesem);
}
/* so we can run it again */
V(testsem);
V(testsem);
sem_destroy(testsem);
sem_destroy(donesem);
testsem = donesem = NULL;
kprintf_t("\n");
success(test_status, SECRET, "sem1");
kprintf("Semaphore test done.\n");
return 0;
}
static
void
fail(unsigned long num, const char *msg)
{
kprintf("thread %lu: Mismatch on %s\n", num, msg);
kprintf("Test failed\n");
lock_release(testlock);
V(donesem);
thread_exit();
}
static
void
locktestthread(void *junk, unsigned long num)
{
int i;
(void)junk;
int i;
for (i=0; i<NLOCKLOOPS; i++) {
kprintf_t(".");
lock_acquire(testlock);
random_yielder(4);
testval1 = num;
testval2 = num*num;
testval3 = num%3;
if (testval2 != testval1*testval1) {
fail(num, "testval2/testval1");
goto fail;
}
random_yielder(4);
if (testval2%3 != (testval3*testval3)%3) {
fail(num, "testval2/testval3");
goto fail;
}
random_yielder(4);
if (testval3 != testval1%3) {
fail(num, "testval3/testval1");
goto fail;
}
random_yielder(4);
if (testval1 != num) {
fail(num, "testval1/num");
goto fail;
}
random_yielder(4);
if (testval2 != num*num) {
fail(num, "testval2/num");
goto fail;
}
random_yielder(4);
if (testval3 != num%3) {
fail(num, "testval3/num");
goto fail;
}
random_yielder(4);
if (!(lock_do_i_hold(testlock))) {
goto fail;
}
random_yielder(4);
lock_release(testlock);
}
/* Check for solutions that don't track ownership properly */
for (i=0; i<NLOCKLOOPS; i++) {
kprintf_t(".");
if (lock_do_i_hold(testlock)) {
goto fail2;
}
}
V(donesem);
return;
fail:
lock_release(testlock);
fail2:
failif(true);
V(donesem);
return;
}
int
locktest(int nargs, char **args)
{
int i, result;
(void)nargs;
(void)args;
inititems();
kprintf("Starting lock test...\n");
int i, result;
kprintf_n("Starting lt1...\n");
for (i=0; i<CREATELOOPS; i++) {
kprintf_t(".");
testlock = lock_create("testlock");
if (testlock == NULL) {
panic("lt1: lock_create failed\n");
}
donesem = sem_create("donesem", 0);
if (donesem == NULL) {
panic("lt1: sem_create failed\n");
}
if (i != CREATELOOPS - 1) {
lock_destroy(testlock);
sem_destroy(donesem);
}
}
spinlock_init(&status_lock);
test_status = TEST161_SUCCESS;
for (i=0; i<NTHREADS; i++) {
result = thread_fork("synchtest", NULL, locktestthread,
NULL, i);
kprintf_t(".");
result = thread_fork("synchtest", NULL, locktestthread, NULL, i);
if (result) {
panic("locktest: thread_fork failed: %s\n",
strerror(result));
panic("lt1: thread_fork failed: %s\n", strerror(result));
}
}
for (i=0; i<NTHREADS; i++) {
kprintf_t(".");
P(donesem);
}
kprintf("Lock test done.\n");
lock_destroy(testlock);
sem_destroy(donesem);
testlock = NULL;
donesem = NULL;
kprintf_t("\n");
success(test_status, SECRET, "lt1");
return 0;
}
int
locktest2(int nargs, char **args) {
(void)nargs;
(void)args;
kprintf_n("Starting lt2...\n");
kprintf_n("(This test panics on success!)\n");
testlock = lock_create("testlock");
if (testlock == NULL) {
panic("lt2: lock_create failed\n");
}
secprintf(SECRET, "Should panic...", "lt2");
lock_release(testlock);
/* Should not get here on success. */
success(TEST161_FAIL, SECRET, "lt2");
lock_destroy(testlock);
testlock = NULL;
return 0;
}
int
locktest3(int nargs, char **args) {
(void)nargs;
(void)args;
kprintf_n("Starting lt3...\n");
kprintf_n("(This test panics on success!)\n");
testlock = lock_create("testlock");
if (testlock == NULL) {
panic("lt3: lock_create failed\n");
}
secprintf(SECRET, "Should panic...", "lt3");
lock_acquire(testlock);
lock_destroy(testlock);
/* Should not get here on success. */
success(TEST161_FAIL, SECRET, "lt3");
testlock = NULL;
return 0;
}
@@ -224,35 +345,38 @@ static
void
cvtestthread(void *junk, unsigned long num)
{
(void)junk;
int i;
volatile int j;
struct timespec ts1, ts2;
(void)junk;
for (i=0; i<NCVLOOPS; i++) {
kprintf_t(".");
lock_acquire(testlock);
while (testval1 != num) {
testval2 = 0;
random_yielder(4);
gettime(&ts1);
cv_wait(testcv, testlock);
gettime(&ts2);
random_yielder(4);
/* ts2 -= ts1 */
timespec_sub(&ts2, &ts1, &ts2);
/* Require at least 2000 cpu cycles (we're 25mhz) */
if (ts2.tv_sec == 0 && ts2.tv_nsec < 40*2000) {
kprintf("cv_wait took only %u ns\n",
ts2.tv_nsec);
kprintf("That's too fast... you must be "
"busy-looping\n");
kprintf_n("cv_wait took only %u ns\n", ts2.tv_nsec);
kprintf_n("That's too fast... you must be busy-looping\n");
failif(true);
V(donesem);
thread_exit();
}
testval2 = 0xFFFFFFFF;
}
kprintf("Thread %lu\n", num);
testval1 = (testval1 + NTHREADS - 1)%NTHREADS;
testval2 = num;
/*
* loop a little while to make sure we can measure the
@@ -260,7 +384,13 @@ cvtestthread(void *junk, unsigned long num)
*/
for (j=0; j<3000; j++);
random_yielder(4);
cv_broadcast(testcv, testlock);
random_yielder(4);
failif((testval1 != testval2));
kprintf_n("Thread %lu\n", testval2);
testval1 = (testval1 + NTHREADS - 1) % NTHREADS;
lock_release(testlock);
}
V(donesem);
@@ -269,30 +399,57 @@ cvtestthread(void *junk, unsigned long num)
int
cvtest(int nargs, char **args)
{
int i, result;
(void)nargs;
(void)args;
inititems();
kprintf("Starting CV test...\n");
kprintf("Threads should print out in reverse order.\n");
int i, result;
kprintf_n("Starting cvt1...\n");
for (i=0; i<CREATELOOPS; i++) {
kprintf_t(".");
testlock = lock_create("testlock");
if (testlock == NULL) {
panic("cvt1: lock_create failed\n");
}
testcv = cv_create("testcv");
if (testcv == NULL) {
panic("cvt1: cv_create failed\n");
}
donesem = sem_create("donesem", 0);
if (donesem == NULL) {
panic("cvt1: sem_create failed\n");
}
if (i != CREATELOOPS - 1) {
lock_destroy(testlock);
cv_destroy(testcv);
sem_destroy(donesem);
}
}
spinlock_init(&status_lock);
test_status = TEST161_SUCCESS;
testval1 = NTHREADS-1;
for (i=0; i<NTHREADS; i++) {
result = thread_fork("synchtest", NULL, cvtestthread, NULL, i);
kprintf_t(".");
result = thread_fork("cvt1", NULL, cvtestthread, NULL, (long unsigned) i);
if (result) {
panic("cvtest: thread_fork failed: %s\n",
strerror(result));
panic("cvt1: thread_fork failed: %s\n", strerror(result));
}
}
for (i=0; i<NTHREADS; i++) {
kprintf_t(".");
P(donesem);
}
kprintf("CV test done\n");
lock_destroy(testlock);
cv_destroy(testcv);
sem_destroy(donesem);
testlock = NULL;
testcv = NULL;
donesem = NULL;
kprintf_t("\n");
success(test_status, SECRET, "cvt1");
return 0;
}
@@ -318,19 +475,28 @@ static
void
sleepthread(void *junk1, unsigned long junk2)
{
unsigned i, j;
(void)junk1;
(void)junk2;
unsigned i, j;
random_yielder(4);
for (j=0; j<NLOOPS; j++) {
kprintf_t(".");
for (i=0; i<NCVS; i++) {
lock_acquire(testlocks[i]);
random_yielder(4);
V(gatesem);
random_yielder(4);
spinlock_acquire(&status_lock);
testval4++;
spinlock_release(&status_lock);
cv_wait(testcvs[i], testlocks[i]);
random_yielder(4);
lock_release(testlocks[i]);
}
kprintf("sleepthread: %u\n", j);
kprintf_n("sleepthread: %u\n", j);
}
V(exitsem);
}
@@ -339,19 +505,28 @@ static
void
wakethread(void *junk1, unsigned long junk2)
{
unsigned i, j;
(void)junk1;
(void)junk2;
unsigned i, j;
random_yielder(4);
for (j=0; j<NLOOPS; j++) {
kprintf_t(".");
for (i=0; i<NCVS; i++) {
random_yielder(4);
P(gatesem);
random_yielder(4);
lock_acquire(testlocks[i]);
random_yielder(4);
testval4--;
failif((testval4 != 0));
cv_signal(testcvs[i], testlocks[i]);
random_yielder(4);
lock_release(testlocks[i]);
}
kprintf("wakethread: %u\n", j);
kprintf_n("wakethread: %u\n", j);
}
V(exitsem);
}
@@ -359,30 +534,44 @@ wakethread(void *junk1, unsigned long junk2)
int
cvtest2(int nargs, char **args)
{
unsigned i;
int result;
(void)nargs;
(void)args;
unsigned i;
int result;
kprintf_n("Starting cvt2...\n");
for (i=0; i<CREATELOOPS; i++) {
kprintf_t(".");
gatesem = sem_create("gatesem", 0);
if (gatesem == NULL) {
panic("cvt2: sem_create failed\n");
}
exitsem = sem_create("exitsem", 0);
if (exitsem == NULL) {
panic("cvt2: sem_create failed\n");
}
if (i != CREATELOOPS - 1) {
sem_destroy(gatesem);
sem_destroy(exitsem);
}
}
for (i=0; i<NCVS; i++) {
kprintf_t(".");
testlocks[i] = lock_create("cvtest2 lock");
testcvs[i] = cv_create("cvtest2 cv");
}
gatesem = sem_create("gatesem", 0);
exitsem = sem_create("exitsem", 0);
spinlock_init(&status_lock);
test_status = TEST161_SUCCESS;
kprintf("cvtest2...\n");
result = thread_fork("cvtest2", NULL, sleepthread, NULL, 0);
result = thread_fork("cvt2", NULL, sleepthread, NULL, 0);
if (result) {
panic("cvtest2: thread_fork failed\n");
panic("cvt2: thread_fork failed\n");
}
result = thread_fork("cvtest2", NULL, wakethread, NULL, 0);
result = thread_fork("cvt2", NULL, wakethread, NULL, 0);
if (result) {
panic("cvtest2: thread_fork failed\n");
panic("cvt2: thread_fork failed\n");
}
P(exitsem);
P(exitsem);
@@ -390,12 +579,194 @@ cvtest2(int nargs, char **args)
sem_destroy(gatesem);
exitsem = gatesem = NULL;
for (i=0; i<NCVS; i++) {
kprintf_t(".");
lock_destroy(testlocks[i]);
cv_destroy(testcvs[i]);
testlocks[i] = NULL;
testcvs[i] = NULL;
}
kprintf("cvtest2 done\n");
kprintf_t("\n");
success(test_status, SECRET, "cvt2");
return 0;
}
int
cvtest3(int nargs, char **args) {
(void)nargs;
(void)args;
kprintf_n("Starting cvt3...\n");
kprintf_n("(This test panics on success!)\n");
testlock = lock_create("testlock");
if (testlock == NULL) {
panic("cvt3: lock_create failed\n");
}
testcv = cv_create("testcv");
if (testcv == NULL) {
panic("cvt3: cv_create failed\n");
}
secprintf(SECRET, "Should panic...", "cvt3");
cv_wait(testcv, testlock);
/* Should not get here on success. */
success(TEST161_FAIL, SECRET, "cvt3");
lock_destroy(testlock);
cv_destroy(testcv);
testcv = NULL;
testlock = NULL;
return 0;
}
int
cvtest4(int nargs, char **args) {
(void)nargs;
(void)args;
kprintf_n("Starting cvt4...\n");
kprintf_n("(This test panics on success!)\n");
testlock = lock_create("testlock");
if (testlock == NULL) {
panic("cvt4: lock_create failed\n");
}
testcv = cv_create("testcv");
if (testcv == NULL) {
panic("cvt4: cv_create failed\n");
}
secprintf(SECRET, "Should panic...", "cvt4");
cv_broadcast(testcv, testlock);
/* Should not get here on success. */
success(TEST161_FAIL, SECRET, "cvt4");
lock_destroy(testlock);
cv_destroy(testcv);
testcv = NULL;
testlock = NULL;
return 0;
}
static
void
sleeperthread(void *junk1, unsigned long junk2) {
(void)junk1;
(void)junk2;
random_yielder(4);
lock_acquire(testlock);
random_yielder(4);
failif((testval1 != 0));
testval1 = 1;
cv_signal(testcv, testlock);
random_yielder(4);
cv_wait(testcv, testlock);
failif((testval1 != 3));
testval1 = 4;
random_yielder(4);
lock_release(testlock);
random_yielder(4);
V(exitsem);
}
static
void
wakerthread(void *junk1, unsigned long junk2) {
(void)junk1;
(void)junk2;
random_yielder(4);
lock_acquire(testlock2);
failif((testval1 != 2));
testval1 = 3;
random_yielder(4);
cv_signal(testcv, testlock2);
random_yielder(4);
lock_release(testlock2);
random_yielder(4);
V(exitsem);
}
int
cvtest5(int nargs, char **args) {
(void)nargs;
(void)args;
int result;
kprintf_n("Starting cvt5...\n");
testlock = lock_create("testlock");
if (testlock == NULL) {
panic("cvt5: lock_create failed\n");
}
testlock2 = lock_create("testlock2");
if (testlock == NULL) {
panic("cvt5: lock_create failed\n");
}
testcv = cv_create("testcv");
if (testcv == NULL) {
panic("cvt5: cv_create failed\n");
}
exitsem = sem_create("exitsem", 0);
if (exitsem == NULL) {
panic("cvt5: sem_create failed\n");
}
spinlock_init(&status_lock);
test_status = TEST161_SUCCESS;
testval1 = 0;
lock_acquire(testlock);
lock_acquire(testlock2);
result = thread_fork("cvt5", NULL, sleeperthread, NULL, 0);
if (result) {
panic("cvt5: thread_fork failed\n");
}
result = thread_fork("cvt5", NULL, wakerthread, NULL, 0);
if (result) {
panic("cvt5: thread_fork failed\n");
}
random_yielder(4);
cv_wait(testcv, testlock);
failif((testval1 != 1));
testval1 = 2;
random_yielder(4);
lock_release(testlock);
random_yielder(4);
lock_release(testlock2);
P(exitsem);
P(exitsem);
failif((testval1 != 4));
sem_destroy(exitsem);
cv_destroy(testcv);
lock_destroy(testlock2);
lock_destroy(testlock);
success(test_status, SECRET, "cvt5");
exitsem = NULL;
testcv = NULL;
testlock2 = NULL;
testlock = NULL;
testsem2 = NULL;
testsem = NULL;
return 0;
}

View File

@@ -66,10 +66,7 @@ fakethread_create(const char *name)
}
/* ignore most of the fields, zero everything for tidiness */
bzero(t, sizeof(*t));
t->t_name = kstrdup(name);
if (t->t_name == NULL) {
panic("threadlisttest: Out of memory\n");
}
strcpy(t->t_name, name);
t->t_stack = FAKE_MAGIC;
threadlistnode_init(&t->t_listnode, t);
return t;
@@ -84,7 +81,6 @@ fakethread_destroy(struct thread *t)
{
KASSERT(t->t_stack == FAKE_MAGIC);
threadlistnode_cleanup(&t->t_listnode);
kfree(t->t_name);
kfree(t);
}

View File

@@ -47,18 +47,18 @@
struct semaphore *
sem_create(const char *name, unsigned initial_count)
{
struct semaphore *sem;
struct semaphore *sem;
sem = kmalloc(sizeof(*sem));
if (sem == NULL) {
return NULL;
}
sem = kmalloc(sizeof(*sem));
if (sem == NULL) {
return NULL;
}
sem->sem_name = kstrdup(name);
if (sem->sem_name == NULL) {
kfree(sem);
return NULL;
}
sem->sem_name = kstrdup(name);
if (sem->sem_name == NULL) {
kfree(sem);
return NULL;
}
sem->sem_wchan = wchan_create(sem->sem_name);
if (sem->sem_wchan == NULL) {
@@ -68,39 +68,39 @@ sem_create(const char *name, unsigned initial_count)
}
spinlock_init(&sem->sem_lock);
sem->sem_count = initial_count;
sem->sem_count = initial_count;
return sem;
return sem;
}
void
sem_destroy(struct semaphore *sem)
{
KASSERT(sem != NULL);
KASSERT(sem != NULL);
/* wchan_cleanup will assert if anyone's waiting on it */
spinlock_cleanup(&sem->sem_lock);
wchan_destroy(sem->sem_wchan);
kfree(sem->sem_name);
kfree(sem);
kfree(sem->sem_name);
kfree(sem);
}
void
P(struct semaphore *sem)
{
KASSERT(sem != NULL);
KASSERT(sem != NULL);
/*
* May not block in an interrupt handler.
*
* For robustness, always check, even if we can actually
* complete the P without blocking.
*/
KASSERT(curthread->t_in_interrupt == false);
/*
* May not block in an interrupt handler.
*
* For robustness, always check, even if we can actually
* complete the P without blocking.
*/
KASSERT(curthread->t_in_interrupt == false);
/* Use the semaphore spinlock to protect the wchan as well. */
spinlock_acquire(&sem->sem_lock);
while (sem->sem_count == 0) {
while (sem->sem_count == 0) {
/*
*
* Note that we don't maintain strict FIFO ordering of
@@ -114,21 +114,21 @@ P(struct semaphore *sem)
* ordering?
*/
wchan_sleep(sem->sem_wchan, &sem->sem_lock);
}
KASSERT(sem->sem_count > 0);
sem->sem_count--;
}
KASSERT(sem->sem_count > 0);
sem->sem_count--;
spinlock_release(&sem->sem_lock);
}
void
V(struct semaphore *sem)
{
KASSERT(sem != NULL);
KASSERT(sem != NULL);
spinlock_acquire(&sem->sem_lock);
sem->sem_count++;
KASSERT(sem->sem_count > 0);
sem->sem_count++;
KASSERT(sem->sem_count > 0);
wchan_wakeone(sem->sem_wchan, &sem->sem_lock);
spinlock_release(&sem->sem_lock);
@@ -141,59 +141,59 @@ V(struct semaphore *sem)
struct lock *
lock_create(const char *name)
{
struct lock *lock;
struct lock *lock;
lock = kmalloc(sizeof(*lock));
if (lock == NULL) {
return NULL;
}
lock = kmalloc(sizeof(*lock));
if (lock == NULL) {
return NULL;
}
lock->lk_name = kstrdup(name);
if (lock->lk_name == NULL) {
kfree(lock);
return NULL;
}
lock->lk_name = kstrdup(name);
if (lock->lk_name == NULL) {
kfree(lock);
return NULL;
}
// add stuff here as needed
// add stuff here as needed
return lock;
return lock;
}
void
lock_destroy(struct lock *lock)
{
KASSERT(lock != NULL);
KASSERT(lock != NULL);
// add stuff here as needed
// add stuff here as needed
kfree(lock->lk_name);
kfree(lock);
kfree(lock->lk_name);
kfree(lock);
}
void
lock_acquire(struct lock *lock)
{
// Write this
// Write this
(void)lock; // suppress warning until code gets written
(void)lock; // suppress warning until code gets written
}
void
lock_release(struct lock *lock)
{
// Write this
// Write this
(void)lock; // suppress warning until code gets written
(void)lock; // suppress warning until code gets written
}
bool
lock_do_i_hold(struct lock *lock)
{
// Write this
// Write this
(void)lock; // suppress warning until code gets written
(void)lock; // suppress warning until code gets written
return true; // dummy until code gets written
return true; // dummy until code gets written
}
////////////////////////////////////////////////////////////
@@ -204,47 +204,47 @@ lock_do_i_hold(struct lock *lock)
struct cv *
cv_create(const char *name)
{
struct cv *cv;
struct cv *cv;
cv = kmalloc(sizeof(*cv));
if (cv == NULL) {
return NULL;
}
cv = kmalloc(sizeof(*cv));
if (cv == NULL) {
return NULL;
}
cv->cv_name = kstrdup(name);
if (cv->cv_name==NULL) {
kfree(cv);
return NULL;
}
cv->cv_name = kstrdup(name);
if (cv->cv_name==NULL) {
kfree(cv);
return NULL;
}
// add stuff here as needed
// add stuff here as needed
return cv;
return cv;
}
void
cv_destroy(struct cv *cv)
{
KASSERT(cv != NULL);
KASSERT(cv != NULL);
// add stuff here as needed
// add stuff here as needed
kfree(cv->cv_name);
kfree(cv);
kfree(cv->cv_name);
kfree(cv);
}
void
cv_wait(struct cv *cv, struct lock *lock)
{
// Write this
(void)cv; // suppress warning until code gets written
(void)lock; // suppress warning until code gets written
// Write this
(void)cv; // suppress warning until code gets written
(void)lock; // suppress warning until code gets written
}
void
cv_signal(struct cv *cv, struct lock *lock)
{
// Write this
// Write this
(void)cv; // suppress warning until code gets written
(void)lock; // suppress warning until code gets written
}

View File

@@ -65,10 +65,16 @@ struct wchan {
DECLARRAY(cpu, static __UNUSED inline);
DEFARRAY(cpu, static __UNUSED inline);
static struct cpuarray allcpus;
unsigned num_cpus;
/* Used to wait for secondary CPUs to come online. */
static struct semaphore *cpu_startup_sem;
/* Used to synchronize exit cleanup. */
unsigned thread_count = 0;
static struct spinlock thread_count_lock = SPINLOCK_INITIALIZER;
static struct wchan *thread_count_wchan;
////////////////////////////////////////////////////////////
/*
@@ -119,17 +125,16 @@ thread_create(const char *name)
struct thread *thread;
DEBUGASSERT(name != NULL);
if (strlen(name) > MAX_NAME_LENGTH) {
return NULL;
}
thread = kmalloc(sizeof(*thread));
if (thread == NULL) {
return NULL;
}
thread->t_name = kstrdup(name);
if (thread->t_name == NULL) {
kfree(thread);
return NULL;
}
strcpy(thread->t_name, name);
thread->t_wchan_name = "NEW";
thread->t_state = S_READY;
@@ -256,6 +261,9 @@ cpu_create(unsigned hardware_number)
* Nor can it be called on a running thread.
*
* (Freeing the stack you're actually using to run is ... inadvisable.)
*
* Thread destroy should finish the process of cleaning up a thread started by
* thread_exit.
*/
static
void
@@ -264,11 +272,6 @@ thread_destroy(struct thread *thread)
KASSERT(thread != curthread);
KASSERT(thread->t_state != S_RUN);
/*
* If you add things to struct thread, be sure to clean them up
* either here or in thread_exit(). (And not both...)
*/
/* Thread subsystem fields */
KASSERT(thread->t_proc == NULL);
if (thread->t_stack != NULL) {
@@ -280,7 +283,6 @@ thread_destroy(struct thread *thread)
/* sheer paranoia */
thread->t_wchan_name = "DESTROYED";
kfree(thread->t_name);
kfree(thread);
}
@@ -411,8 +413,6 @@ cpu_hatch(unsigned software_number)
spl0();
cpu_identify(buf, sizeof(buf));
kprintf("cpu%u: %s\n", software_number, buf);
V(cpu_startup_sem);
thread_exit();
}
@@ -430,13 +430,26 @@ thread_start_cpus(void)
kprintf("cpu0: %s\n", buf);
cpu_startup_sem = sem_create("cpu_hatch", 0);
thread_count_wchan = wchan_create("thread_count");
mainbus_start_cpus();
for (i=0; i<cpuarray_num(&allcpus) - 1; i++) {
num_cpus = cpuarray_num(&allcpus);
for (i=0; i<num_cpus - 1; i++) {
P(cpu_startup_sem);
}
sem_destroy(cpu_startup_sem);
if (i == 0) {
kprintf("1 CPU online\n");
} else {
kprintf("%d CPUs online\n", i + 1);
}
cpu_startup_sem = NULL;
// Gross hack to deal with os/161 "idle" threads. Hardcode the thread count
// to 1 so the inc/dec properly works in thread_[fork/exit]. The one thread
// is the cpu0 boot thread (menu), which is the only thread that hasn't
// exited yet.
thread_count = 1;
}
/*
@@ -465,7 +478,7 @@ thread_make_runnable(struct thread *target, bool already_have_lock)
target->t_state = S_READY;
threadlist_addtail(&targetcpu->c_runqueue, target);
if (targetcpu->c_isidle && targetcpu != curcpu->c_self) {
if (targetcpu->c_isidle) {
/*
* Other processor is idle; send interrupt to make
* sure it unidles.
@@ -535,6 +548,11 @@ thread_fork(const char *name,
*/
newthread->t_iplhigh_count++;
spinlock_acquire(&thread_count_lock);
++thread_count;
wchan_wakeall(thread_count_wchan, &thread_count_lock);
spinlock_release(&thread_count_lock);
/* Set up the switchframe so entrypoint() gets called */
switchframe_init(newthread, entrypoint, data1, data2);
@@ -770,6 +788,13 @@ thread_startup(void (*entrypoint)(void *data1, unsigned long data2),
* should be cleaned up right away. The rest has to wait until
* thread_destroy is called from exorcise().
*
* Note that any dynamically-allocated structures that can vary in size from
* thread to thread should be cleaned up here, not in thread_destroy. This is
* because the last thread left on each core runs the idle loop and does not
* get cleaned up until new threads are created. Differences in the amount of
* memory used by different threads after thread_exit will make it look like
* your kernel in leaking memory and cause some of the test161 checks to fail.
*
* Does not return.
*/
void
@@ -791,8 +816,16 @@ thread_exit(void)
/* Check the stack guard band. */
thread_checkstack(cur);
// Decrement the thread count and notify anyone interested.
if (thread_count) {
spinlock_acquire(&thread_count_lock);
--thread_count;
wchan_wakeall(thread_count_wchan, &thread_count_lock);
spinlock_release(&thread_count_lock);
}
/* Interrupts off on this processor */
splhigh();
splhigh();
thread_switch(S_ZOMBIE, NULL, NULL);
panic("braaaaaaaiiiiiiiiiiinssssss\n");
}
@@ -1106,9 +1139,6 @@ ipi_send(struct cpu *target, int code)
spinlock_release(&target->c_ipi_lock);
}
/*
* Send an IPI to all CPUs.
*/
void
ipi_broadcast(int code)
{
@@ -1123,28 +1153,16 @@ ipi_broadcast(int code)
}
}
/*
* Send a TLB shootdown IPI to the specified CPU.
*/
void
ipi_tlbshootdown(struct cpu *target, const struct tlbshootdown *mapping)
{
unsigned n;
int n;
spinlock_acquire(&target->c_ipi_lock);
n = target->c_numshootdown;
if (n == TLBSHOOTDOWN_MAX) {
/*
* If you have problems with this panic going off,
* consider: (1) increasing the maximum, (2) putting
* logic here to sleep until space appears (may
* interact awkwardly with VM system locking), (3)
* putting logic here to coalesce requests together,
* and/or (4) improving VM system state tracking to
* reduce the number of unnecessary shootdowns.
*/
panic("ipi_tlbshootdown: Too many shootdowns queued\n");
target->c_numshootdown = TLBSHOOTDOWN_ALL;
}
else {
target->c_shootdown[n] = *mapping;
@@ -1157,14 +1175,11 @@ ipi_tlbshootdown(struct cpu *target, const struct tlbshootdown *mapping)
spinlock_release(&target->c_ipi_lock);
}
/*
* Handle an incoming interprocessor interrupt.
*/
void
interprocessor_interrupt(void)
{
uint32_t bits;
unsigned i;
int i;
spinlock_acquire(&curcpu->c_ipi_lock);
bits = curcpu->c_ipi_pending;
@@ -1183,7 +1198,6 @@ interprocessor_interrupt(void)
curcpu->c_number);
}
spinlock_release(&curcpu->c_runqueue_lock);
kprintf("cpu%d: offline.\n", curcpu->c_number);
cpu_halt();
}
if (bits & (1U << IPI_UNIDLE)) {
@@ -1193,13 +1207,13 @@ interprocessor_interrupt(void)
*/
}
if (bits & (1U << IPI_TLBSHOOTDOWN)) {
/*
* Note: depending on your VM system locking you might
* need to release the ipi lock while calling
* vm_tlbshootdown.
*/
for (i=0; i<curcpu->c_numshootdown; i++) {
vm_tlbshootdown(&curcpu->c_shootdown[i]);
if (curcpu->c_numshootdown == TLBSHOOTDOWN_ALL) {
vm_tlbshootdown_all();
}
else {
for (i=0; i<curcpu->c_numshootdown; i++) {
vm_tlbshootdown(&curcpu->c_shootdown[i]);
}
}
curcpu->c_numshootdown = 0;
}
@@ -1207,3 +1221,15 @@ interprocessor_interrupt(void)
curcpu->c_ipi_pending = 0;
spinlock_release(&curcpu->c_ipi_lock);
}
/*
* Wait for the thread count to equal tc.
*/
void thread_wait_for_count(unsigned tc)
{
spinlock_acquire(&thread_count_lock);
while (thread_count != tc) {
wchan_sleep(thread_count_wchan, &thread_count_lock);
}
spinlock_release(&thread_count_lock);
}

View File

@@ -82,9 +82,6 @@ struct knowndev {
struct fs *kd_fs;
};
/* A placeholder for kd_fs for devices used as swap */
#define SWAP_FS ((struct fs *)-1)
DECLARRAY(knowndev, static __UNUSED inline);
DEFARRAY(knowndev, static __UNUSED inline);
@@ -163,7 +160,7 @@ vfs_sync(void)
num = knowndevarray_num(knowndevs);
for (i=0; i<num; i++) {
dev = knowndevarray_get(knowndevs, i);
if (dev->kd_fs != NULL && dev->kd_fs != SWAP_FS) {
if (dev->kd_fs != NULL) {
/*result =*/ FSOP_SYNC(dev->kd_fs);
}
}
@@ -198,7 +195,7 @@ vfs_getroot(const char *devname, struct vnode **ret)
* and DEVNAME names the device, return ENXIO.
*/
if (kd->kd_fs != NULL && kd->kd_fs != SWAP_FS) {
if (kd->kd_fs!=NULL) {
const char *volname;
volname = FSOP_GETVOLNAME(kd->kd_fs);
@@ -347,7 +344,7 @@ badnames(const char *n1, const char *n2, const char *n3)
for (i=0; i<num; i++) {
kd = knowndevarray_get(knowndevs, i);
if (kd->kd_fs != NULL && kd->kd_fs != SWAP_FS) {
if (kd->kd_fs) {
volname = FSOP_GETVOLNAME(kd->kd_fs);
if (samestring3(volname, n1, n2, n3)) {
return 1;
@@ -545,7 +542,6 @@ vfs_mount(const char *devname, void *data,
}
KASSERT(fs != NULL);
KASSERT(fs != SWAP_FS);
kd->kd_fs = fs;
@@ -557,59 +553,6 @@ vfs_mount(const char *devname, void *data,
return 0;
}
/*
* Like mount, but for attaching swap. Hands back the raw device
* vnode. Unlike mount tolerates a trailing colon on the device name,
* to avoid student-facing confusion.
*/
int
vfs_swapon(const char *devname, struct vnode **ret)
{
char *myname = NULL;
size_t len;
struct knowndev *kd;
int result;
len = strlen(devname);
if (len > 0 && devname[len - 1] == ':') {
/* tolerate trailing :, e.g. lhd0: rather than lhd0 */
myname = kstrdup(devname);
if (myname == NULL) {
return ENOMEM;
}
myname[len - 1] = 0;
devname = myname;
}
vfs_biglock_acquire();
result = findmount(devname, &kd);
if (result) {
goto out;
}
if (kd->kd_fs != NULL) {
result = EBUSY;
goto out;
}
KASSERT(kd->kd_rawname != NULL);
KASSERT(kd->kd_device != NULL);
kprintf("vfs: Swap attached to %s\n", kd->kd_name);
kd->kd_fs = SWAP_FS;
VOP_INCREF(kd->kd_vnode);
*ret = kd->kd_vnode;
out:
vfs_biglock_release();
if (myname != NULL) {
kfree(myname);
}
return result;
}
/*
* Unmount a filesystem/device by name.
* First calls FSOP_SYNC on the filesystem; then calls FSOP_UNMOUNT.
@@ -627,7 +570,7 @@ vfs_unmount(const char *devname)
goto fail;
}
if (kd->kd_fs == NULL || kd->kd_fs == SWAP_FS) {
if (kd->kd_fs == NULL) {
result = EINVAL;
goto fail;
}
@@ -657,43 +600,6 @@ vfs_unmount(const char *devname)
return result;
}
/*
* Detach swap. Like unmount.
*
* (Provided for completeness; there is no real need to remove swap
* explicitly prior to shutting down, except perhaps when swapping to
* things that themselves want a clean shutdown, like RAIDs.)
*/
int
vfs_swapoff(const char *devname)
{
struct knowndev *kd;
int result;
vfs_biglock_acquire();
result = findmount(devname, &kd);
if (result) {
goto fail;
}
if (kd->kd_fs != SWAP_FS) {
result = EINVAL;
goto fail;
}
kprintf("vfs: Swap detached from %s:\n", kd->kd_name);
/* drop it */
kd->kd_fs = NULL;
KASSERT(result==0);
fail:
vfs_biglock_release();
return result;
}
/*
* Global unmount function.
*/
@@ -717,11 +623,6 @@ vfs_unmountall(void)
/* not mounted */
continue;
}
if (dev->kd_fs == SWAP_FS) {
/* just drop it */
dev->kd_fs = NULL;
continue;
}
kprintf("vfs: Unmounting %s:\n", dev->kd_name);

View File

@@ -129,8 +129,7 @@ vnode_decref(struct vnode *vn)
void
vnode_check(struct vnode *v, const char *opstr)
{
/* not safe, and not really needed to check constant fields */
/*vfs_biglock_acquire();*/
vfs_biglock_acquire();
if (v == NULL) {
panic("vnode_check: vop_%s: null vnode\n", opstr);
@@ -174,5 +173,5 @@ vnode_check(struct vnode *v, const char *opstr)
}
spinlock_release(&v->vn_countlock);
/*vfs_biglock_release();*/
vfs_biglock_release();
}

View File

@@ -31,6 +31,8 @@
#include <lib.h>
#include <spinlock.h>
#include <vm.h>
#include <kern/test161.h>
#include <test.h>
/*
* Kernel malloc.
@@ -743,8 +745,8 @@ kheap_dumpall(void)
* Print the allocated/freed map of a single kernel heap page.
*/
static
void
subpage_stats(struct pageref *pr)
unsigned long
subpage_stats(struct pageref *pr, bool quiet)
{
vaddr_t prpage, fla;
struct freelist *fl;
@@ -780,18 +782,21 @@ subpage_stats(struct pageref *pr)
}
}
kprintf("at 0x%08lx: size %-4lu %u/%u free\n",
(unsigned long)prpage, (unsigned long) sizes[blktype],
(unsigned) pr->nfree, n);
kprintf(" ");
for (i=0; i<n; i++) {
int val = (freemap[i/32] & (1<<(i%32)))!=0;
kprintf("%c", val ? '.' : '*');
if (i%64==63 && i<n-1) {
kprintf("\n ");
if (!quiet) {
kprintf("at 0x%08lx: size %-4lu %u/%u free\n",
(unsigned long)prpage, (unsigned long) sizes[blktype],
(unsigned) pr->nfree, n);
kprintf(" ");
for (i=0; i<n; i++) {
int val = (freemap[i/32] & (1<<(i%32)))!=0;
kprintf("%c", val ? '.' : '*');
if (i%64==63 && i<n-1) {
kprintf("\n ");
}
}
kprintf("\n");
}
kprintf("\n");
return ((unsigned long)sizes[blktype] * (n - (unsigned) pr->nfree));
}
/*
@@ -808,12 +813,55 @@ kheap_printstats(void)
kprintf("Subpage allocator status:\n");
for (pr = allbase; pr != NULL; pr = pr->next_all) {
subpage_stats(pr);
subpage_stats(pr, false);
}
spinlock_release(&kmalloc_spinlock);
}
/*
* Return the number of used bytes.
*/
unsigned long
kheap_getused(void) {
struct pageref *pr;
unsigned long total = 0;
unsigned int num_pages = 0, coremap_bytes = 0;
/* compute with interrupts off */
spinlock_acquire(&kmalloc_spinlock);
for (pr = allbase; pr != NULL; pr = pr->next_all) {
total += subpage_stats(pr, true);
num_pages++;
}
coremap_bytes = coremap_used_bytes();
// Don't double-count the pages we're using for subpage allocation;
// we've already accounted for the used portion.
if (coremap_bytes > 0) {
total += coremap_bytes - (num_pages * PAGE_SIZE);
}
spinlock_release(&kmalloc_spinlock);
return total;
}
/*
* Print number of used bytes.
*/
void
kheap_printused(void)
{
char total_string[32];
snprintf(total_string, sizeof(total_string), "%lu", kheap_getused());
secprintf(SECRET, total_string, "khu");
}
////////////////////////////////////////
/*
@@ -967,7 +1015,7 @@ subpage_kmalloc(size_t sz
prpage = alloc_kpages(1);
if (prpage==0) {
/* Out of memory. */
kprintf("kmalloc: Subpage allocator couldn't get a page\n");
silent("kmalloc: Subpage allocator couldn't get a page\n");
return NULL;
}
KASSERT(prpage % PAGE_SIZE == 0);