Revert "Merging in 1.0.2."

This reverts commit 50cf3276e7.
This commit is contained in:
Geoffrey Challen
2017-01-09 22:52:13 -05:00
parent 50cf3276e7
commit e318e3171e
118 changed files with 3158 additions and 1350 deletions

View File

@@ -47,18 +47,18 @@
struct semaphore *
sem_create(const char *name, unsigned initial_count)
{
struct semaphore *sem;
struct semaphore *sem;
sem = kmalloc(sizeof(*sem));
if (sem == NULL) {
return NULL;
}
sem = kmalloc(sizeof(*sem));
if (sem == NULL) {
return NULL;
}
sem->sem_name = kstrdup(name);
if (sem->sem_name == NULL) {
kfree(sem);
return NULL;
}
sem->sem_name = kstrdup(name);
if (sem->sem_name == NULL) {
kfree(sem);
return NULL;
}
sem->sem_wchan = wchan_create(sem->sem_name);
if (sem->sem_wchan == NULL) {
@@ -68,39 +68,39 @@ sem_create(const char *name, unsigned initial_count)
}
spinlock_init(&sem->sem_lock);
sem->sem_count = initial_count;
sem->sem_count = initial_count;
return sem;
return sem;
}
void
sem_destroy(struct semaphore *sem)
{
KASSERT(sem != NULL);
KASSERT(sem != NULL);
/* wchan_cleanup will assert if anyone's waiting on it */
spinlock_cleanup(&sem->sem_lock);
wchan_destroy(sem->sem_wchan);
kfree(sem->sem_name);
kfree(sem);
kfree(sem->sem_name);
kfree(sem);
}
void
P(struct semaphore *sem)
{
KASSERT(sem != NULL);
KASSERT(sem != NULL);
/*
* May not block in an interrupt handler.
*
* For robustness, always check, even if we can actually
* complete the P without blocking.
*/
KASSERT(curthread->t_in_interrupt == false);
/*
* May not block in an interrupt handler.
*
* For robustness, always check, even if we can actually
* complete the P without blocking.
*/
KASSERT(curthread->t_in_interrupt == false);
/* Use the semaphore spinlock to protect the wchan as well. */
spinlock_acquire(&sem->sem_lock);
while (sem->sem_count == 0) {
while (sem->sem_count == 0) {
/*
*
* Note that we don't maintain strict FIFO ordering of
@@ -114,21 +114,21 @@ P(struct semaphore *sem)
* ordering?
*/
wchan_sleep(sem->sem_wchan, &sem->sem_lock);
}
KASSERT(sem->sem_count > 0);
sem->sem_count--;
}
KASSERT(sem->sem_count > 0);
sem->sem_count--;
spinlock_release(&sem->sem_lock);
}
void
V(struct semaphore *sem)
{
KASSERT(sem != NULL);
KASSERT(sem != NULL);
spinlock_acquire(&sem->sem_lock);
sem->sem_count++;
KASSERT(sem->sem_count > 0);
sem->sem_count++;
KASSERT(sem->sem_count > 0);
wchan_wakeone(sem->sem_wchan, &sem->sem_lock);
spinlock_release(&sem->sem_lock);
@@ -141,59 +141,59 @@ V(struct semaphore *sem)
struct lock *
lock_create(const char *name)
{
struct lock *lock;
struct lock *lock;
lock = kmalloc(sizeof(*lock));
if (lock == NULL) {
return NULL;
}
lock = kmalloc(sizeof(*lock));
if (lock == NULL) {
return NULL;
}
lock->lk_name = kstrdup(name);
if (lock->lk_name == NULL) {
kfree(lock);
return NULL;
}
lock->lk_name = kstrdup(name);
if (lock->lk_name == NULL) {
kfree(lock);
return NULL;
}
// add stuff here as needed
// add stuff here as needed
return lock;
return lock;
}
void
lock_destroy(struct lock *lock)
{
KASSERT(lock != NULL);
KASSERT(lock != NULL);
// add stuff here as needed
// add stuff here as needed
kfree(lock->lk_name);
kfree(lock);
kfree(lock->lk_name);
kfree(lock);
}
void
lock_acquire(struct lock *lock)
{
// Write this
// Write this
(void)lock; // suppress warning until code gets written
(void)lock; // suppress warning until code gets written
}
void
lock_release(struct lock *lock)
{
// Write this
// Write this
(void)lock; // suppress warning until code gets written
(void)lock; // suppress warning until code gets written
}
bool
lock_do_i_hold(struct lock *lock)
{
// Write this
// Write this
(void)lock; // suppress warning until code gets written
(void)lock; // suppress warning until code gets written
return true; // dummy until code gets written
return true; // dummy until code gets written
}
////////////////////////////////////////////////////////////
@@ -204,47 +204,47 @@ lock_do_i_hold(struct lock *lock)
struct cv *
cv_create(const char *name)
{
struct cv *cv;
struct cv *cv;
cv = kmalloc(sizeof(*cv));
if (cv == NULL) {
return NULL;
}
cv = kmalloc(sizeof(*cv));
if (cv == NULL) {
return NULL;
}
cv->cv_name = kstrdup(name);
if (cv->cv_name==NULL) {
kfree(cv);
return NULL;
}
cv->cv_name = kstrdup(name);
if (cv->cv_name==NULL) {
kfree(cv);
return NULL;
}
// add stuff here as needed
// add stuff here as needed
return cv;
return cv;
}
void
cv_destroy(struct cv *cv)
{
KASSERT(cv != NULL);
KASSERT(cv != NULL);
// add stuff here as needed
// add stuff here as needed
kfree(cv->cv_name);
kfree(cv);
kfree(cv->cv_name);
kfree(cv);
}
void
cv_wait(struct cv *cv, struct lock *lock)
{
// Write this
(void)cv; // suppress warning until code gets written
(void)lock; // suppress warning until code gets written
// Write this
(void)cv; // suppress warning until code gets written
(void)lock; // suppress warning until code gets written
}
void
cv_signal(struct cv *cv, struct lock *lock)
{
// Write this
// Write this
(void)cv; // suppress warning until code gets written
(void)lock; // suppress warning until code gets written
}

View File

@@ -65,10 +65,16 @@ struct wchan {
DECLARRAY(cpu, static __UNUSED inline);
DEFARRAY(cpu, static __UNUSED inline);
static struct cpuarray allcpus;
unsigned num_cpus;
/* Used to wait for secondary CPUs to come online. */
static struct semaphore *cpu_startup_sem;
/* Used to synchronize exit cleanup. */
unsigned thread_count = 0;
static struct spinlock thread_count_lock = SPINLOCK_INITIALIZER;
static struct wchan *thread_count_wchan;
////////////////////////////////////////////////////////////
/*
@@ -119,17 +125,16 @@ thread_create(const char *name)
struct thread *thread;
DEBUGASSERT(name != NULL);
if (strlen(name) > MAX_NAME_LENGTH) {
return NULL;
}
thread = kmalloc(sizeof(*thread));
if (thread == NULL) {
return NULL;
}
thread->t_name = kstrdup(name);
if (thread->t_name == NULL) {
kfree(thread);
return NULL;
}
strcpy(thread->t_name, name);
thread->t_wchan_name = "NEW";
thread->t_state = S_READY;
@@ -256,6 +261,9 @@ cpu_create(unsigned hardware_number)
* Nor can it be called on a running thread.
*
* (Freeing the stack you're actually using to run is ... inadvisable.)
*
* Thread destroy should finish the process of cleaning up a thread started by
* thread_exit.
*/
static
void
@@ -264,11 +272,6 @@ thread_destroy(struct thread *thread)
KASSERT(thread != curthread);
KASSERT(thread->t_state != S_RUN);
/*
* If you add things to struct thread, be sure to clean them up
* either here or in thread_exit(). (And not both...)
*/
/* Thread subsystem fields */
KASSERT(thread->t_proc == NULL);
if (thread->t_stack != NULL) {
@@ -280,7 +283,6 @@ thread_destroy(struct thread *thread)
/* sheer paranoia */
thread->t_wchan_name = "DESTROYED";
kfree(thread->t_name);
kfree(thread);
}
@@ -411,8 +413,6 @@ cpu_hatch(unsigned software_number)
spl0();
cpu_identify(buf, sizeof(buf));
kprintf("cpu%u: %s\n", software_number, buf);
V(cpu_startup_sem);
thread_exit();
}
@@ -430,13 +430,26 @@ thread_start_cpus(void)
kprintf("cpu0: %s\n", buf);
cpu_startup_sem = sem_create("cpu_hatch", 0);
thread_count_wchan = wchan_create("thread_count");
mainbus_start_cpus();
for (i=0; i<cpuarray_num(&allcpus) - 1; i++) {
num_cpus = cpuarray_num(&allcpus);
for (i=0; i<num_cpus - 1; i++) {
P(cpu_startup_sem);
}
sem_destroy(cpu_startup_sem);
if (i == 0) {
kprintf("1 CPU online\n");
} else {
kprintf("%d CPUs online\n", i + 1);
}
cpu_startup_sem = NULL;
// Gross hack to deal with os/161 "idle" threads. Hardcode the thread count
// to 1 so the inc/dec properly works in thread_[fork/exit]. The one thread
// is the cpu0 boot thread (menu), which is the only thread that hasn't
// exited yet.
thread_count = 1;
}
/*
@@ -465,7 +478,7 @@ thread_make_runnable(struct thread *target, bool already_have_lock)
target->t_state = S_READY;
threadlist_addtail(&targetcpu->c_runqueue, target);
if (targetcpu->c_isidle && targetcpu != curcpu->c_self) {
if (targetcpu->c_isidle) {
/*
* Other processor is idle; send interrupt to make
* sure it unidles.
@@ -535,6 +548,11 @@ thread_fork(const char *name,
*/
newthread->t_iplhigh_count++;
spinlock_acquire(&thread_count_lock);
++thread_count;
wchan_wakeall(thread_count_wchan, &thread_count_lock);
spinlock_release(&thread_count_lock);
/* Set up the switchframe so entrypoint() gets called */
switchframe_init(newthread, entrypoint, data1, data2);
@@ -770,6 +788,13 @@ thread_startup(void (*entrypoint)(void *data1, unsigned long data2),
* should be cleaned up right away. The rest has to wait until
* thread_destroy is called from exorcise().
*
* Note that any dynamically-allocated structures that can vary in size from
* thread to thread should be cleaned up here, not in thread_destroy. This is
* because the last thread left on each core runs the idle loop and does not
* get cleaned up until new threads are created. Differences in the amount of
* memory used by different threads after thread_exit will make it look like
* your kernel in leaking memory and cause some of the test161 checks to fail.
*
* Does not return.
*/
void
@@ -791,8 +816,16 @@ thread_exit(void)
/* Check the stack guard band. */
thread_checkstack(cur);
// Decrement the thread count and notify anyone interested.
if (thread_count) {
spinlock_acquire(&thread_count_lock);
--thread_count;
wchan_wakeall(thread_count_wchan, &thread_count_lock);
spinlock_release(&thread_count_lock);
}
/* Interrupts off on this processor */
splhigh();
splhigh();
thread_switch(S_ZOMBIE, NULL, NULL);
panic("braaaaaaaiiiiiiiiiiinssssss\n");
}
@@ -1106,9 +1139,6 @@ ipi_send(struct cpu *target, int code)
spinlock_release(&target->c_ipi_lock);
}
/*
* Send an IPI to all CPUs.
*/
void
ipi_broadcast(int code)
{
@@ -1123,28 +1153,16 @@ ipi_broadcast(int code)
}
}
/*
* Send a TLB shootdown IPI to the specified CPU.
*/
void
ipi_tlbshootdown(struct cpu *target, const struct tlbshootdown *mapping)
{
unsigned n;
int n;
spinlock_acquire(&target->c_ipi_lock);
n = target->c_numshootdown;
if (n == TLBSHOOTDOWN_MAX) {
/*
* If you have problems with this panic going off,
* consider: (1) increasing the maximum, (2) putting
* logic here to sleep until space appears (may
* interact awkwardly with VM system locking), (3)
* putting logic here to coalesce requests together,
* and/or (4) improving VM system state tracking to
* reduce the number of unnecessary shootdowns.
*/
panic("ipi_tlbshootdown: Too many shootdowns queued\n");
target->c_numshootdown = TLBSHOOTDOWN_ALL;
}
else {
target->c_shootdown[n] = *mapping;
@@ -1157,14 +1175,11 @@ ipi_tlbshootdown(struct cpu *target, const struct tlbshootdown *mapping)
spinlock_release(&target->c_ipi_lock);
}
/*
* Handle an incoming interprocessor interrupt.
*/
void
interprocessor_interrupt(void)
{
uint32_t bits;
unsigned i;
int i;
spinlock_acquire(&curcpu->c_ipi_lock);
bits = curcpu->c_ipi_pending;
@@ -1183,7 +1198,6 @@ interprocessor_interrupt(void)
curcpu->c_number);
}
spinlock_release(&curcpu->c_runqueue_lock);
kprintf("cpu%d: offline.\n", curcpu->c_number);
cpu_halt();
}
if (bits & (1U << IPI_UNIDLE)) {
@@ -1193,13 +1207,13 @@ interprocessor_interrupt(void)
*/
}
if (bits & (1U << IPI_TLBSHOOTDOWN)) {
/*
* Note: depending on your VM system locking you might
* need to release the ipi lock while calling
* vm_tlbshootdown.
*/
for (i=0; i<curcpu->c_numshootdown; i++) {
vm_tlbshootdown(&curcpu->c_shootdown[i]);
if (curcpu->c_numshootdown == TLBSHOOTDOWN_ALL) {
vm_tlbshootdown_all();
}
else {
for (i=0; i<curcpu->c_numshootdown; i++) {
vm_tlbshootdown(&curcpu->c_shootdown[i]);
}
}
curcpu->c_numshootdown = 0;
}
@@ -1207,3 +1221,15 @@ interprocessor_interrupt(void)
curcpu->c_ipi_pending = 0;
spinlock_release(&curcpu->c_ipi_lock);
}
/*
* Wait for the thread count to equal tc.
*/
void thread_wait_for_count(unsigned tc)
{
spinlock_acquire(&thread_count_lock);
while (thread_count != tc) {
wchan_sleep(thread_count_wchan, &thread_count_lock);
}
spinlock_release(&thread_count_lock);
}