Initial Spring 2016 commit.

This commit is contained in:
Geoffrey Challen
2015-12-23 00:50:04 +00:00
commit cafa9f5690
732 changed files with 92195 additions and 0 deletions

121
kern/thread/clock.c Normal file
View File

@@ -0,0 +1,121 @@
/*
* Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
* The President and Fellows of Harvard College.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <types.h>
#include <lib.h>
#include <cpu.h>
#include <wchan.h>
#include <clock.h>
#include <thread.h>
#include <current.h>
/*
* Time handling.
*
* This is pretty primitive. A real kernel will typically have some
* kind of support for scheduling callbacks to happen at specific
* points in the future, usually with more resolution than one second.
*
* A real kernel also has to maintain the time of day; in OS/161 we
* skimp on that because we have a known-good hardware clock.
*/
/*
* Timing constants. These should be tuned along with any work done on
* the scheduler.
*/
#define SCHEDULE_HARDCLOCKS 4 /* Reschedule every 4 hardclocks. */
#define MIGRATE_HARDCLOCKS 16 /* Migrate every 16 hardclocks. */
/*
* Once a second, everything waiting on lbolt is awakened by CPU 0.
*/
static struct wchan *lbolt;
static struct spinlock lbolt_lock;
/*
* Setup.
*/
void
hardclock_bootstrap(void)
{
spinlock_init(&lbolt_lock);
lbolt = wchan_create("lbolt");
if (lbolt == NULL) {
panic("Couldn't create lbolt\n");
}
}
/*
* This is called once per second, on one processor, by the timer
* code.
*/
void
timerclock(void)
{
/* Just broadcast on lbolt */
spinlock_acquire(&lbolt_lock);
wchan_wakeall(lbolt, &lbolt_lock);
spinlock_release(&lbolt_lock);
}
/*
* This is called HZ times a second (on each processor) by the timer
* code.
*/
void
hardclock(void)
{
/*
* Collect statistics here as desired.
*/
curcpu->c_hardclocks++;
if ((curcpu->c_hardclocks % MIGRATE_HARDCLOCKS) == 0) {
thread_consider_migration();
}
if ((curcpu->c_hardclocks % SCHEDULE_HARDCLOCKS) == 0) {
schedule();
}
thread_yield();
}
/*
* Suspend execution for n seconds.
*/
void
clocksleep(int num_secs)
{
spinlock_acquire(&lbolt_lock);
while (num_secs > 0) {
wchan_sleep(lbolt, &lbolt_lock);
num_secs--;
}
spinlock_release(&lbolt_lock);
}

148
kern/thread/spinlock.c Normal file
View File

@@ -0,0 +1,148 @@
/*
* Copyright (c) 2009
* The President and Fellows of Harvard College.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Make sure to build out-of-line versions of inline functions */
#define SPINLOCK_INLINE /* empty */
#define MEMBAR_INLINE /* empty */
#include <types.h>
#include <lib.h>
#include <cpu.h>
#include <spl.h>
#include <spinlock.h>
#include <membar.h>
#include <current.h> /* for curcpu */
/*
* Spinlocks.
*/
/*
* Initialize spinlock.
*/
void
spinlock_init(struct spinlock *splk)
{
spinlock_data_set(&splk->splk_lock, 0);
splk->splk_holder = NULL;
}
/*
* Clean up spinlock.
*/
void
spinlock_cleanup(struct spinlock *splk)
{
KASSERT(splk->splk_holder == NULL);
KASSERT(spinlock_data_get(&splk->splk_lock) == 0);
}
/*
* Get the lock.
*
* First disable interrupts (otherwise, if we get a timer interrupt we
* might come back to this lock and deadlock), then use a machine-level
* atomic operation to wait for the lock to be free.
*/
void
spinlock_acquire(struct spinlock *splk)
{
struct cpu *mycpu;
splraise(IPL_NONE, IPL_HIGH);
/* this must work before curcpu initialization */
if (CURCPU_EXISTS()) {
mycpu = curcpu->c_self;
if (splk->splk_holder == mycpu) {
panic("Deadlock on spinlock %p\n", splk);
}
mycpu->c_spinlocks++;
}
else {
mycpu = NULL;
}
while (1) {
/*
* Do test-test-and-set, that is, read first before
* doing test-and-set, to reduce bus contention.
*
* Test-and-set is a machine-level atomic operation
* that writes 1 into the lock word and returns the
* previous value. If that value was 0, the lock was
* previously unheld and we now own it. If it was 1,
* we don't.
*/
if (spinlock_data_get(&splk->splk_lock) != 0) {
continue;
}
if (spinlock_data_testandset(&splk->splk_lock) != 0) {
continue;
}
break;
}
membar_store_any();
splk->splk_holder = mycpu;
}
/*
* Release the lock.
*/
void
spinlock_release(struct spinlock *splk)
{
/* this must work before curcpu initialization */
if (CURCPU_EXISTS()) {
KASSERT(splk->splk_holder == curcpu->c_self);
KASSERT(curcpu->c_spinlocks > 0);
curcpu->c_spinlocks--;
}
splk->splk_holder = NULL;
membar_any_store();
spinlock_data_set(&splk->splk_lock, 0);
spllower(IPL_HIGH, IPL_NONE);
}
/*
* Check if the current cpu holds the lock.
*/
bool
spinlock_do_i_hold(struct spinlock *splk)
{
if (!CURCPU_EXISTS()) {
return true;
}
/* Assume we can read splk_holder atomically enough for this to work */
return (splk->splk_holder == curcpu->c_self);
}

160
kern/thread/spl.c Normal file
View File

@@ -0,0 +1,160 @@
/*
* Copyright (c) 2009
* The President and Fellows of Harvard College.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Make sure to build out-of-line versions of spl inline functions */
#define SPL_INLINE /* empty */
#include <types.h>
#include <lib.h>
#include <cpu.h>
#include <spl.h>
#include <thread.h>
#include <current.h>
/*
* Machine-independent interrupt handling functions.
*
* Traditionally, all this code is machine-dependent.
*
* However.
*
* Since on OS/161 we don't support interrupt levels on any platform,
* all we require under this logic is cpu_irqoff() and cpu_irqon()
* that explicitly turn interrupts off and on.
*
* If we had multiple interrupt levels, the number of levels would in
* general be different on different platforms (depending on hardware
* requirements and hardware capabilities) so things would get more
* complicated -- but nearly all of this code could remain MI.
*/
/*
* Raise and lower the interrupt priority level.
*
* Each spinlock acquisition can raise and lower the priority level
* independently. The spl calls also raise and lower the priority
* level independently of the spinlocks. This is necessary because in
* general spinlock acquisitions and releases don't nest perfectly,
* and don't necessarily nest with respect to spl calls either.
*
* For example:
*
* struct spinlock red, blue;
* int s;
*
* spinlock_acquire(&red);
* s = splhigh();
* spinlock_acquire(&blue);
* splx(s);
* spinlock_release(&red);
* spinlock_release(&blue);
*
* In order to make this work we need to count the number of times
* IPL_HIGH (or, if we had multiple interrupt priority levels, each
* level independently) has been raised. Interrupts go off on the
* first raise, and go on again only on the last lower.
*
* curthread->t_iplhigh_count is used to track this.
*/
void
splraise(int oldspl, int newspl)
{
struct thread *cur = curthread;
/* only one priority level, only one valid args configuration */
KASSERT(oldspl == IPL_NONE);
KASSERT(newspl == IPL_HIGH);
if (!CURCPU_EXISTS()) {
/* before curcpu initialization; interrupts are off anyway */
return;
}
if (cur->t_iplhigh_count == 0) {
cpu_irqoff();
}
cur->t_iplhigh_count++;
}
void
spllower(int oldspl, int newspl)
{
struct thread *cur = curthread;
/* only one priority level, only one valid args configuration */
KASSERT(oldspl == IPL_HIGH);
KASSERT(newspl == IPL_NONE);
if (!CURCPU_EXISTS()) {
/* before curcpu initialization; interrupts are off anyway */
return;
}
cur->t_iplhigh_count--;
if (cur->t_iplhigh_count == 0) {
cpu_irqon();
}
}
/*
* Disable or enable interrupts and adjust curspl setting. Return old
* spl level.
*/
int
splx(int spl)
{
struct thread *cur = curthread;
int ret;
if (!CURCPU_EXISTS()) {
/* before curcpu initialization; interrupts are off anyway */
return spl;
}
if (cur->t_curspl < spl) {
/* turning interrupts off */
splraise(cur->t_curspl, spl);
ret = cur->t_curspl;
cur->t_curspl = spl;
}
else if (cur->t_curspl > spl) {
/* turning interrupts on */
ret = cur->t_curspl;
cur->t_curspl = spl;
spllower(ret, spl);
}
else {
/* do nothing */
ret = spl;
}
return ret;
}

258
kern/thread/synch.c Normal file
View File

@@ -0,0 +1,258 @@
/*
* Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
* The President and Fellows of Harvard College.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Synchronization primitives.
* The specifications of the functions are in synch.h.
*/
#include <types.h>
#include <lib.h>
#include <spinlock.h>
#include <wchan.h>
#include <thread.h>
#include <current.h>
#include <synch.h>
////////////////////////////////////////////////////////////
//
// Semaphore.
struct semaphore *
sem_create(const char *name, unsigned initial_count)
{
struct semaphore *sem;
sem = kmalloc(sizeof(*sem));
if (sem == NULL) {
return NULL;
}
sem->sem_name = kstrdup(name);
if (sem->sem_name == NULL) {
kfree(sem);
return NULL;
}
sem->sem_wchan = wchan_create(sem->sem_name);
if (sem->sem_wchan == NULL) {
kfree(sem->sem_name);
kfree(sem);
return NULL;
}
spinlock_init(&sem->sem_lock);
sem->sem_count = initial_count;
return sem;
}
void
sem_destroy(struct semaphore *sem)
{
KASSERT(sem != NULL);
/* wchan_cleanup will assert if anyone's waiting on it */
spinlock_cleanup(&sem->sem_lock);
wchan_destroy(sem->sem_wchan);
kfree(sem->sem_name);
kfree(sem);
}
void
P(struct semaphore *sem)
{
KASSERT(sem != NULL);
/*
* May not block in an interrupt handler.
*
* For robustness, always check, even if we can actually
* complete the P without blocking.
*/
KASSERT(curthread->t_in_interrupt == false);
/* Use the semaphore spinlock to protect the wchan as well. */
spinlock_acquire(&sem->sem_lock);
while (sem->sem_count == 0) {
/*
*
* Note that we don't maintain strict FIFO ordering of
* threads going through the semaphore; that is, we
* might "get" it on the first try even if other
* threads are waiting. Apparently according to some
* textbooks semaphores must for some reason have
* strict ordering. Too bad. :-)
*
* Exercise: how would you implement strict FIFO
* ordering?
*/
wchan_sleep(sem->sem_wchan, &sem->sem_lock);
}
KASSERT(sem->sem_count > 0);
sem->sem_count--;
spinlock_release(&sem->sem_lock);
}
void
V(struct semaphore *sem)
{
KASSERT(sem != NULL);
spinlock_acquire(&sem->sem_lock);
sem->sem_count++;
KASSERT(sem->sem_count > 0);
wchan_wakeone(sem->sem_wchan, &sem->sem_lock);
spinlock_release(&sem->sem_lock);
}
////////////////////////////////////////////////////////////
//
// Lock.
struct lock *
lock_create(const char *name)
{
struct lock *lock;
lock = kmalloc(sizeof(*lock));
if (lock == NULL) {
return NULL;
}
lock->lk_name = kstrdup(name);
if (lock->lk_name == NULL) {
kfree(lock);
return NULL;
}
// add stuff here as needed
return lock;
}
void
lock_destroy(struct lock *lock)
{
KASSERT(lock != NULL);
// add stuff here as needed
kfree(lock->lk_name);
kfree(lock);
}
void
lock_acquire(struct lock *lock)
{
// Write this
(void)lock; // suppress warning until code gets written
}
void
lock_release(struct lock *lock)
{
// Write this
(void)lock; // suppress warning until code gets written
}
bool
lock_do_i_hold(struct lock *lock)
{
// Write this
(void)lock; // suppress warning until code gets written
return true; // dummy until code gets written
}
////////////////////////////////////////////////////////////
//
// CV
struct cv *
cv_create(const char *name)
{
struct cv *cv;
cv = kmalloc(sizeof(*cv));
if (cv == NULL) {
return NULL;
}
cv->cv_name = kstrdup(name);
if (cv->cv_name==NULL) {
kfree(cv);
return NULL;
}
// add stuff here as needed
return cv;
}
void
cv_destroy(struct cv *cv)
{
KASSERT(cv != NULL);
// add stuff here as needed
kfree(cv->cv_name);
kfree(cv);
}
void
cv_wait(struct cv *cv, struct lock *lock)
{
// Write this
(void)cv; // suppress warning until code gets written
(void)lock; // suppress warning until code gets written
}
void
cv_signal(struct cv *cv, struct lock *lock)
{
// Write this
(void)cv; // suppress warning until code gets written
(void)lock; // suppress warning until code gets written
}
void
cv_broadcast(struct cv *cv, struct lock *lock)
{
// Write this
(void)cv; // suppress warning until code gets written
(void)lock; // suppress warning until code gets written
}

1191
kern/thread/thread.c Normal file

File diff suppressed because it is too large Load Diff

240
kern/thread/threadlist.c Normal file
View File

@@ -0,0 +1,240 @@
/*
* Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
* The President and Fellows of Harvard College.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Thread list functions, rather dull.
*/
#include <types.h>
#include <lib.h>
#include <thread.h>
#include <threadlist.h>
void
threadlistnode_init(struct threadlistnode *tln, struct thread *t)
{
DEBUGASSERT(tln != NULL);
KASSERT(t != NULL);
tln->tln_next = NULL;
tln->tln_prev = NULL;
tln->tln_self = t;
}
void
threadlistnode_cleanup(struct threadlistnode *tln)
{
DEBUGASSERT(tln != NULL);
KASSERT(tln->tln_next == NULL);
KASSERT(tln->tln_prev == NULL);
KASSERT(tln->tln_self != NULL);
}
void
threadlist_init(struct threadlist *tl)
{
DEBUGASSERT(tl != NULL);
tl->tl_head.tln_next = &tl->tl_tail;
tl->tl_head.tln_prev = NULL;
tl->tl_tail.tln_next = NULL;
tl->tl_tail.tln_prev = &tl->tl_head;
tl->tl_head.tln_self = NULL;
tl->tl_tail.tln_self = NULL;
tl->tl_count = 0;
}
void
threadlist_cleanup(struct threadlist *tl)
{
DEBUGASSERT(tl != NULL);
DEBUGASSERT(tl->tl_head.tln_next == &tl->tl_tail);
DEBUGASSERT(tl->tl_head.tln_prev == NULL);
DEBUGASSERT(tl->tl_tail.tln_next == NULL);
DEBUGASSERT(tl->tl_tail.tln_prev == &tl->tl_head);
DEBUGASSERT(tl->tl_head.tln_self == NULL);
DEBUGASSERT(tl->tl_tail.tln_self == NULL);
KASSERT(threadlist_isempty(tl));
KASSERT(tl->tl_count == 0);
/* nothing (else) to do */
}
bool
threadlist_isempty(struct threadlist *tl)
{
DEBUGASSERT(tl != NULL);
return (tl->tl_count == 0);
}
////////////////////////////////////////////////////////////
// internal
/*
* Do insertion. Doesn't update tl_count.
*/
static
void
threadlist_insertafternode(struct threadlistnode *onlist, struct thread *t)
{
struct threadlistnode *addee;
addee = &t->t_listnode;
DEBUGASSERT(addee->tln_prev == NULL);
DEBUGASSERT(addee->tln_next == NULL);
addee->tln_prev = onlist;
addee->tln_next = onlist->tln_next;
addee->tln_prev->tln_next = addee;
addee->tln_next->tln_prev = addee;
}
/*
* Do insertion. Doesn't update tl_count.
*/
static
void
threadlist_insertbeforenode(struct thread *t, struct threadlistnode *onlist)
{
struct threadlistnode *addee;
addee = &t->t_listnode;
DEBUGASSERT(addee->tln_prev == NULL);
DEBUGASSERT(addee->tln_next == NULL);
addee->tln_prev = onlist->tln_prev;
addee->tln_next = onlist;
addee->tln_prev->tln_next = addee;
addee->tln_next->tln_prev = addee;
}
/*
* Do removal. Doesn't update tl_count.
*/
static
void
threadlist_removenode(struct threadlistnode *tln)
{
DEBUGASSERT(tln != NULL);
DEBUGASSERT(tln->tln_prev != NULL);
DEBUGASSERT(tln->tln_next != NULL);
tln->tln_prev->tln_next = tln->tln_next;
tln->tln_next->tln_prev = tln->tln_prev;
tln->tln_prev = NULL;
tln->tln_next = NULL;
}
////////////////////////////////////////////////////////////
// public
void
threadlist_addhead(struct threadlist *tl, struct thread *t)
{
DEBUGASSERT(tl != NULL);
DEBUGASSERT(t != NULL);
threadlist_insertafternode(&tl->tl_head, t);
tl->tl_count++;
}
void
threadlist_addtail(struct threadlist *tl, struct thread *t)
{
DEBUGASSERT(tl != NULL);
DEBUGASSERT(t != NULL);
threadlist_insertbeforenode(t, &tl->tl_tail);
tl->tl_count++;
}
struct thread *
threadlist_remhead(struct threadlist *tl)
{
struct threadlistnode *tln;
DEBUGASSERT(tl != NULL);
tln = tl->tl_head.tln_next;
if (tln->tln_next == NULL) {
/* list was empty */
return NULL;
}
threadlist_removenode(tln);
DEBUGASSERT(tl->tl_count > 0);
tl->tl_count--;
return tln->tln_self;
}
struct thread *
threadlist_remtail(struct threadlist *tl)
{
struct threadlistnode *tln;
DEBUGASSERT(tl != NULL);
tln = tl->tl_tail.tln_prev;
if (tln->tln_prev == NULL) {
/* list was empty */
return NULL;
}
threadlist_removenode(tln);
DEBUGASSERT(tl->tl_count > 0);
tl->tl_count--;
return tln->tln_self;
}
void
threadlist_insertafter(struct threadlist *tl,
struct thread *onlist, struct thread *addee)
{
threadlist_insertafternode(&onlist->t_listnode, addee);
tl->tl_count++;
}
void
threadlist_insertbefore(struct threadlist *tl,
struct thread *addee, struct thread *onlist)
{
threadlist_insertbeforenode(addee, &onlist->t_listnode);
tl->tl_count++;
}
void
threadlist_remove(struct threadlist *tl, struct thread *t)
{
threadlist_removenode(&t->t_listnode);
DEBUGASSERT(tl->tl_count > 0);
tl->tl_count--;
}