Initial Spring 2016 commit.
This commit is contained in:
433
kern/arch/mips/vm/dumbvm.c
Normal file
433
kern/arch/mips/vm/dumbvm.c
Normal file
@@ -0,0 +1,433 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
|
||||
* The President and Fellows of Harvard College.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <types.h>
|
||||
#include <kern/errno.h>
|
||||
#include <lib.h>
|
||||
#include <spl.h>
|
||||
#include <cpu.h>
|
||||
#include <spinlock.h>
|
||||
#include <proc.h>
|
||||
#include <current.h>
|
||||
#include <mips/tlb.h>
|
||||
#include <addrspace.h>
|
||||
#include <vm.h>
|
||||
|
||||
/*
|
||||
* Dumb MIPS-only "VM system" that is intended to only be just barely
|
||||
* enough to struggle off the ground. You should replace all of this
|
||||
* code while doing the VM assignment. In fact, starting in that
|
||||
* assignment, this file is not included in your kernel!
|
||||
*
|
||||
* NOTE: it's been found over the years that students often begin on
|
||||
* the VM assignment by copying dumbvm.c and trying to improve it.
|
||||
* This is not recommended. dumbvm is (more or less intentionally) not
|
||||
* a good design reference. The first recommendation would be: do not
|
||||
* look at dumbvm at all. The second recommendation would be: if you
|
||||
* do, be sure to review it from the perspective of comparing it to
|
||||
* what a VM system is supposed to do, and understanding what corners
|
||||
* it's cutting (there are many) and why, and more importantly, how.
|
||||
*/
|
||||
|
||||
/* under dumbvm, always have 72k of user stack */
|
||||
/* (this must be > 64K so argument blocks of size ARG_MAX will fit) */
|
||||
#define DUMBVM_STACKPAGES 18
|
||||
|
||||
/*
|
||||
* Wrap ram_stealmem in a spinlock.
|
||||
*/
|
||||
static struct spinlock stealmem_lock = SPINLOCK_INITIALIZER;
|
||||
|
||||
void
|
||||
vm_bootstrap(void)
|
||||
{
|
||||
/* Do nothing. */
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if we're in a context that can sleep. While most of the
|
||||
* operations in dumbvm don't in fact sleep, in a real VM system many
|
||||
* of them would. In those, assert that sleeping is ok. This helps
|
||||
* avoid the situation where syscall-layer code that works ok with
|
||||
* dumbvm starts blowing up during the VM assignment.
|
||||
*/
|
||||
static
|
||||
void
|
||||
dumbvm_can_sleep(void)
|
||||
{
|
||||
if (CURCPU_EXISTS()) {
|
||||
/* must not hold spinlocks */
|
||||
KASSERT(curcpu->c_spinlocks == 0);
|
||||
|
||||
/* must not be in an interrupt handler */
|
||||
KASSERT(curthread->t_in_interrupt == 0);
|
||||
}
|
||||
}
|
||||
|
||||
static
|
||||
paddr_t
|
||||
getppages(unsigned long npages)
|
||||
{
|
||||
paddr_t addr;
|
||||
|
||||
spinlock_acquire(&stealmem_lock);
|
||||
|
||||
addr = ram_stealmem(npages);
|
||||
|
||||
spinlock_release(&stealmem_lock);
|
||||
return addr;
|
||||
}
|
||||
|
||||
/* Allocate/free some kernel-space virtual pages */
|
||||
vaddr_t
|
||||
alloc_kpages(unsigned npages)
|
||||
{
|
||||
paddr_t pa;
|
||||
|
||||
dumbvm_can_sleep();
|
||||
pa = getppages(npages);
|
||||
if (pa==0) {
|
||||
return 0;
|
||||
}
|
||||
return PADDR_TO_KVADDR(pa);
|
||||
}
|
||||
|
||||
void
|
||||
free_kpages(vaddr_t addr)
|
||||
{
|
||||
/* nothing - leak the memory. */
|
||||
|
||||
(void)addr;
|
||||
}
|
||||
|
||||
void
|
||||
vm_tlbshootdown_all(void)
|
||||
{
|
||||
panic("dumbvm tried to do tlb shootdown?!\n");
|
||||
}
|
||||
|
||||
void
|
||||
vm_tlbshootdown(const struct tlbshootdown *ts)
|
||||
{
|
||||
(void)ts;
|
||||
panic("dumbvm tried to do tlb shootdown?!\n");
|
||||
}
|
||||
|
||||
int
|
||||
vm_fault(int faulttype, vaddr_t faultaddress)
|
||||
{
|
||||
vaddr_t vbase1, vtop1, vbase2, vtop2, stackbase, stacktop;
|
||||
paddr_t paddr;
|
||||
int i;
|
||||
uint32_t ehi, elo;
|
||||
struct addrspace *as;
|
||||
int spl;
|
||||
|
||||
faultaddress &= PAGE_FRAME;
|
||||
|
||||
DEBUG(DB_VM, "dumbvm: fault: 0x%x\n", faultaddress);
|
||||
|
||||
switch (faulttype) {
|
||||
case VM_FAULT_READONLY:
|
||||
/* We always create pages read-write, so we can't get this */
|
||||
panic("dumbvm: got VM_FAULT_READONLY\n");
|
||||
case VM_FAULT_READ:
|
||||
case VM_FAULT_WRITE:
|
||||
break;
|
||||
default:
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
if (curproc == NULL) {
|
||||
/*
|
||||
* No process. This is probably a kernel fault early
|
||||
* in boot. Return EFAULT so as to panic instead of
|
||||
* getting into an infinite faulting loop.
|
||||
*/
|
||||
return EFAULT;
|
||||
}
|
||||
|
||||
as = proc_getas();
|
||||
if (as == NULL) {
|
||||
/*
|
||||
* No address space set up. This is probably also a
|
||||
* kernel fault early in boot.
|
||||
*/
|
||||
return EFAULT;
|
||||
}
|
||||
|
||||
/* Assert that the address space has been set up properly. */
|
||||
KASSERT(as->as_vbase1 != 0);
|
||||
KASSERT(as->as_pbase1 != 0);
|
||||
KASSERT(as->as_npages1 != 0);
|
||||
KASSERT(as->as_vbase2 != 0);
|
||||
KASSERT(as->as_pbase2 != 0);
|
||||
KASSERT(as->as_npages2 != 0);
|
||||
KASSERT(as->as_stackpbase != 0);
|
||||
KASSERT((as->as_vbase1 & PAGE_FRAME) == as->as_vbase1);
|
||||
KASSERT((as->as_pbase1 & PAGE_FRAME) == as->as_pbase1);
|
||||
KASSERT((as->as_vbase2 & PAGE_FRAME) == as->as_vbase2);
|
||||
KASSERT((as->as_pbase2 & PAGE_FRAME) == as->as_pbase2);
|
||||
KASSERT((as->as_stackpbase & PAGE_FRAME) == as->as_stackpbase);
|
||||
|
||||
vbase1 = as->as_vbase1;
|
||||
vtop1 = vbase1 + as->as_npages1 * PAGE_SIZE;
|
||||
vbase2 = as->as_vbase2;
|
||||
vtop2 = vbase2 + as->as_npages2 * PAGE_SIZE;
|
||||
stackbase = USERSTACK - DUMBVM_STACKPAGES * PAGE_SIZE;
|
||||
stacktop = USERSTACK;
|
||||
|
||||
if (faultaddress >= vbase1 && faultaddress < vtop1) {
|
||||
paddr = (faultaddress - vbase1) + as->as_pbase1;
|
||||
}
|
||||
else if (faultaddress >= vbase2 && faultaddress < vtop2) {
|
||||
paddr = (faultaddress - vbase2) + as->as_pbase2;
|
||||
}
|
||||
else if (faultaddress >= stackbase && faultaddress < stacktop) {
|
||||
paddr = (faultaddress - stackbase) + as->as_stackpbase;
|
||||
}
|
||||
else {
|
||||
return EFAULT;
|
||||
}
|
||||
|
||||
/* make sure it's page-aligned */
|
||||
KASSERT((paddr & PAGE_FRAME) == paddr);
|
||||
|
||||
/* Disable interrupts on this CPU while frobbing the TLB. */
|
||||
spl = splhigh();
|
||||
|
||||
for (i=0; i<NUM_TLB; i++) {
|
||||
tlb_read(&ehi, &elo, i);
|
||||
if (elo & TLBLO_VALID) {
|
||||
continue;
|
||||
}
|
||||
ehi = faultaddress;
|
||||
elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
|
||||
DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);
|
||||
tlb_write(ehi, elo, i);
|
||||
splx(spl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
kprintf("dumbvm: Ran out of TLB entries - cannot handle page fault\n");
|
||||
splx(spl);
|
||||
return EFAULT;
|
||||
}
|
||||
|
||||
struct addrspace *
|
||||
as_create(void)
|
||||
{
|
||||
struct addrspace *as = kmalloc(sizeof(struct addrspace));
|
||||
if (as==NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
as->as_vbase1 = 0;
|
||||
as->as_pbase1 = 0;
|
||||
as->as_npages1 = 0;
|
||||
as->as_vbase2 = 0;
|
||||
as->as_pbase2 = 0;
|
||||
as->as_npages2 = 0;
|
||||
as->as_stackpbase = 0;
|
||||
|
||||
return as;
|
||||
}
|
||||
|
||||
void
|
||||
as_destroy(struct addrspace *as)
|
||||
{
|
||||
dumbvm_can_sleep();
|
||||
kfree(as);
|
||||
}
|
||||
|
||||
void
|
||||
as_activate(void)
|
||||
{
|
||||
int i, spl;
|
||||
struct addrspace *as;
|
||||
|
||||
as = proc_getas();
|
||||
if (as == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Disable interrupts on this CPU while frobbing the TLB. */
|
||||
spl = splhigh();
|
||||
|
||||
for (i=0; i<NUM_TLB; i++) {
|
||||
tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
|
||||
}
|
||||
|
||||
splx(spl);
|
||||
}
|
||||
|
||||
void
|
||||
as_deactivate(void)
|
||||
{
|
||||
/* nothing */
|
||||
}
|
||||
|
||||
int
|
||||
as_define_region(struct addrspace *as, vaddr_t vaddr, size_t sz,
|
||||
int readable, int writeable, int executable)
|
||||
{
|
||||
size_t npages;
|
||||
|
||||
dumbvm_can_sleep();
|
||||
|
||||
/* Align the region. First, the base... */
|
||||
sz += vaddr & ~(vaddr_t)PAGE_FRAME;
|
||||
vaddr &= PAGE_FRAME;
|
||||
|
||||
/* ...and now the length. */
|
||||
sz = (sz + PAGE_SIZE - 1) & PAGE_FRAME;
|
||||
|
||||
npages = sz / PAGE_SIZE;
|
||||
|
||||
/* We don't use these - all pages are read-write */
|
||||
(void)readable;
|
||||
(void)writeable;
|
||||
(void)executable;
|
||||
|
||||
if (as->as_vbase1 == 0) {
|
||||
as->as_vbase1 = vaddr;
|
||||
as->as_npages1 = npages;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (as->as_vbase2 == 0) {
|
||||
as->as_vbase2 = vaddr;
|
||||
as->as_npages2 = npages;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Support for more than two regions is not available.
|
||||
*/
|
||||
kprintf("dumbvm: Warning: too many regions\n");
|
||||
return ENOSYS;
|
||||
}
|
||||
|
||||
static
|
||||
void
|
||||
as_zero_region(paddr_t paddr, unsigned npages)
|
||||
{
|
||||
bzero((void *)PADDR_TO_KVADDR(paddr), npages * PAGE_SIZE);
|
||||
}
|
||||
|
||||
int
|
||||
as_prepare_load(struct addrspace *as)
|
||||
{
|
||||
KASSERT(as->as_pbase1 == 0);
|
||||
KASSERT(as->as_pbase2 == 0);
|
||||
KASSERT(as->as_stackpbase == 0);
|
||||
|
||||
dumbvm_can_sleep();
|
||||
|
||||
as->as_pbase1 = getppages(as->as_npages1);
|
||||
if (as->as_pbase1 == 0) {
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
as->as_pbase2 = getppages(as->as_npages2);
|
||||
if (as->as_pbase2 == 0) {
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
as->as_stackpbase = getppages(DUMBVM_STACKPAGES);
|
||||
if (as->as_stackpbase == 0) {
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
as_zero_region(as->as_pbase1, as->as_npages1);
|
||||
as_zero_region(as->as_pbase2, as->as_npages2);
|
||||
as_zero_region(as->as_stackpbase, DUMBVM_STACKPAGES);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
as_complete_load(struct addrspace *as)
|
||||
{
|
||||
dumbvm_can_sleep();
|
||||
(void)as;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
as_define_stack(struct addrspace *as, vaddr_t *stackptr)
|
||||
{
|
||||
KASSERT(as->as_stackpbase != 0);
|
||||
|
||||
*stackptr = USERSTACK;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
as_copy(struct addrspace *old, struct addrspace **ret)
|
||||
{
|
||||
struct addrspace *new;
|
||||
|
||||
dumbvm_can_sleep();
|
||||
|
||||
new = as_create();
|
||||
if (new==NULL) {
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
new->as_vbase1 = old->as_vbase1;
|
||||
new->as_npages1 = old->as_npages1;
|
||||
new->as_vbase2 = old->as_vbase2;
|
||||
new->as_npages2 = old->as_npages2;
|
||||
|
||||
/* (Mis)use as_prepare_load to allocate some physical memory. */
|
||||
if (as_prepare_load(new)) {
|
||||
as_destroy(new);
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
KASSERT(new->as_pbase1 != 0);
|
||||
KASSERT(new->as_pbase2 != 0);
|
||||
KASSERT(new->as_stackpbase != 0);
|
||||
|
||||
memmove((void *)PADDR_TO_KVADDR(new->as_pbase1),
|
||||
(const void *)PADDR_TO_KVADDR(old->as_pbase1),
|
||||
old->as_npages1*PAGE_SIZE);
|
||||
|
||||
memmove((void *)PADDR_TO_KVADDR(new->as_pbase2),
|
||||
(const void *)PADDR_TO_KVADDR(old->as_pbase2),
|
||||
old->as_npages2*PAGE_SIZE);
|
||||
|
||||
memmove((void *)PADDR_TO_KVADDR(new->as_stackpbase),
|
||||
(const void *)PADDR_TO_KVADDR(old->as_stackpbase),
|
||||
DUMBVM_STACKPAGES*PAGE_SIZE);
|
||||
|
||||
*ret = new;
|
||||
return 0;
|
||||
}
|
153
kern/arch/mips/vm/ram.c
Normal file
153
kern/arch/mips/vm/ram.c
Normal file
@@ -0,0 +1,153 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
|
||||
* The President and Fellows of Harvard College.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <types.h>
|
||||
#include <lib.h>
|
||||
#include <vm.h>
|
||||
#include <mainbus.h>
|
||||
|
||||
|
||||
vaddr_t firstfree; /* first free virtual address; set by start.S */
|
||||
|
||||
static paddr_t firstpaddr; /* address of first free physical page */
|
||||
static paddr_t lastpaddr; /* one past end of last free physical page */
|
||||
|
||||
/*
|
||||
* Called very early in system boot to figure out how much physical
|
||||
* RAM is available.
|
||||
*/
|
||||
void
|
||||
ram_bootstrap(void)
|
||||
{
|
||||
size_t ramsize;
|
||||
|
||||
/* Get size of RAM. */
|
||||
ramsize = mainbus_ramsize();
|
||||
|
||||
/*
|
||||
* This is the same as the last physical address, as long as
|
||||
* we have less than 512 megabytes of memory. If we had more,
|
||||
* we wouldn't be able to access it all through kseg0 and
|
||||
* everything would get a lot more complicated. This is not a
|
||||
* case we are going to worry about.
|
||||
*/
|
||||
if (ramsize > 512*1024*1024) {
|
||||
ramsize = 512*1024*1024;
|
||||
}
|
||||
|
||||
lastpaddr = ramsize;
|
||||
|
||||
/*
|
||||
* Get first free virtual address from where start.S saved it.
|
||||
* Convert to physical address.
|
||||
*/
|
||||
firstpaddr = firstfree - MIPS_KSEG0;
|
||||
|
||||
kprintf("%uk physical memory available\n",
|
||||
(lastpaddr-firstpaddr)/1024);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is for allocating physical memory prior to VM
|
||||
* initialization.
|
||||
*
|
||||
* The pages it hands back will not be reported to the VM system when
|
||||
* the VM system calls ram_getsize(). If it's desired to free up these
|
||||
* pages later on after bootup is complete, some mechanism for adding
|
||||
* them to the VM system's page management must be implemented.
|
||||
* Alternatively, one can do enough VM initialization early so that
|
||||
* this function is never needed.
|
||||
*
|
||||
* Note: while the error return value of 0 is a legal physical address,
|
||||
* it's not a legal *allocatable* physical address, because it's the
|
||||
* page with the exception handlers on it.
|
||||
*
|
||||
* This function should not be called once the VM system is initialized,
|
||||
* so it is not synchronized.
|
||||
*/
|
||||
paddr_t
|
||||
ram_stealmem(unsigned long npages)
|
||||
{
|
||||
size_t size;
|
||||
paddr_t paddr;
|
||||
|
||||
size = npages * PAGE_SIZE;
|
||||
|
||||
if (firstpaddr + size > lastpaddr) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
paddr = firstpaddr;
|
||||
firstpaddr += size;
|
||||
|
||||
return paddr;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is intended to be called by the VM system when it
|
||||
* initializes in order to find out what memory it has available to
|
||||
* manage. Physical memory begins at physical address 0 and ends with
|
||||
* the address returned by this function. We assume that physical
|
||||
* memory is contiguous. This is not universally true, but is true on
|
||||
* the MIPS platforms we intend to run on.
|
||||
*
|
||||
* lastpaddr is constant once set by ram_bootstrap(), so this function
|
||||
* need not be synchronized.
|
||||
*
|
||||
* It is recommended, however, that this function be used only to
|
||||
* initialize the VM system, after which the VM system should take
|
||||
* charge of knowing what memory exists.
|
||||
*/
|
||||
paddr_t
|
||||
ram_getsize(void)
|
||||
{
|
||||
return lastpaddr;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is intended to be called by the VM system when it
|
||||
* initializes in order to find out what memory it has available to
|
||||
* manage.
|
||||
*
|
||||
* It can only be called once, and once called ram_stealmem() will
|
||||
* no longer work, as that would invalidate the result it returned
|
||||
* and lead to multiple things using the same memory.
|
||||
*
|
||||
* This function should not be called once the VM system is initialized,
|
||||
* so it is not synchronized.
|
||||
*/
|
||||
paddr_t
|
||||
ram_getfirstfree(void)
|
||||
{
|
||||
paddr_t ret;
|
||||
|
||||
ret = firstpaddr;
|
||||
firstpaddr = lastpaddr = 0;
|
||||
return ret;
|
||||
}
|
204
kern/arch/mips/vm/tlb-mips161.S
Normal file
204
kern/arch/mips/vm/tlb-mips161.S
Normal file
@@ -0,0 +1,204 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
|
||||
* The President and Fellows of Harvard College.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <kern/mips/regdefs.h>
|
||||
#include <mips/specialreg.h>
|
||||
|
||||
/*
|
||||
* TLB handling for the MIPS-161.
|
||||
*
|
||||
* The MIPS-161 uses the simpler MIPS-1 (r2000/r3000) TLB rather
|
||||
* than the paired-page TLB of later MIPS models.
|
||||
*
|
||||
* However, we handle MIPS32 pipeline hazards. If you want to run on
|
||||
* a real MIPS-1, change the ssnops to plain nops and check where and
|
||||
* how many you need in the matching processor docs.
|
||||
*
|
||||
* (ssnop means "superscalar nop"; it exists because the pipeline
|
||||
* hazards require a fixed number of cycles, and a superscalar CPU can
|
||||
* potentially issue arbitrarily many nops in one cycle.)
|
||||
*/
|
||||
|
||||
.text
|
||||
.set noreorder
|
||||
.set mips32 /* so we can use ssnop */
|
||||
|
||||
/*
|
||||
* tlb_random: use the "tlbwr" instruction to write a TLB entry
|
||||
* into a (very pseudo-) random slot in the TLB.
|
||||
*
|
||||
* Pipeline hazard: must wait between setting entryhi/lo and
|
||||
* doing the tlbwr. Use two cycles; some processors may vary.
|
||||
*/
|
||||
.globl tlb_random
|
||||
.type tlb_random,@function
|
||||
.ent tlb_random
|
||||
tlb_random:
|
||||
mtc0 a0, c0_entryhi /* store the passed entry into the */
|
||||
mtc0 a1, c0_entrylo /* tlb entry registers */
|
||||
ssnop /* wait for pipeline hazard */
|
||||
ssnop
|
||||
tlbwr /* do it */
|
||||
j ra
|
||||
nop
|
||||
.end tlb_random
|
||||
|
||||
/*
|
||||
* tlb_write: use the "tlbwi" instruction to write a TLB entry
|
||||
* into a selected slot in the TLB.
|
||||
*
|
||||
* Pipeline hazard: must wait between setting entryhi/lo and
|
||||
* doing the tlbwi. Use two cycles; some processors may vary.
|
||||
*/
|
||||
.text
|
||||
.globl tlb_write
|
||||
.type tlb_write,@function
|
||||
.ent tlb_write
|
||||
tlb_write:
|
||||
mtc0 a0, c0_entryhi /* store the passed entry into the */
|
||||
mtc0 a1, c0_entrylo /* tlb entry registers */
|
||||
sll t0, a2, CIN_INDEXSHIFT /* shift the passed index into place */
|
||||
mtc0 t0, c0_index /* store the shifted index into the index register */
|
||||
ssnop /* wait for pipeline hazard */
|
||||
ssnop
|
||||
tlbwi /* do it */
|
||||
j ra
|
||||
nop
|
||||
.end tlb_write
|
||||
|
||||
/*
|
||||
* tlb_read: use the "tlbr" instruction to read a TLB entry
|
||||
* from a selected slot in the TLB.
|
||||
*
|
||||
* Pipeline hazard: must wait between setting c0_index and
|
||||
* doing the tlbr. Use two cycles; some processors may vary.
|
||||
* Similarly, three more cycles before reading c0_entryhi/lo.
|
||||
*/
|
||||
.text
|
||||
.globl tlb_read
|
||||
.type tlb_read,@function
|
||||
.ent tlb_read
|
||||
tlb_read:
|
||||
sll t0, a2, CIN_INDEXSHIFT /* shift the passed index into place */
|
||||
mtc0 t0, c0_index /* store the shifted index into the index register */
|
||||
ssnop /* wait for pipeline hazard */
|
||||
ssnop
|
||||
tlbr /* do it */
|
||||
ssnop /* wait for pipeline hazard */
|
||||
ssnop
|
||||
ssnop
|
||||
mfc0 t0, c0_entryhi /* get the tlb entry out of the */
|
||||
mfc0 t1, c0_entrylo /* tlb entry registers */
|
||||
sw t0, 0(a0) /* store through the passed pointer */
|
||||
j ra
|
||||
sw t1, 0(a1) /* store (in delay slot) */
|
||||
.end tlb_read
|
||||
|
||||
/*
|
||||
* tlb_probe: use the "tlbp" instruction to find the index in the
|
||||
* TLB of a TLB entry matching the relevant parts of the one supplied.
|
||||
*
|
||||
* Pipeline hazard: must wait between setting c0_entryhi/lo and
|
||||
* doing the tlbp. Use two cycles; some processors may vary.
|
||||
* Similarly, two more cycles before reading c0_index.
|
||||
*/
|
||||
.text
|
||||
.globl tlb_probe
|
||||
.type tlb_probe,@function
|
||||
.ent tlb_probe
|
||||
tlb_probe:
|
||||
mtc0 a0, c0_entryhi /* store the passed entry into the */
|
||||
mtc0 a1, c0_entrylo /* tlb entry registers */
|
||||
ssnop /* wait for pipeline hazard */
|
||||
ssnop
|
||||
tlbp /* do it */
|
||||
ssnop /* wait for pipeline hazard */
|
||||
ssnop
|
||||
mfc0 t0, c0_index /* fetch the index back in t0 */
|
||||
|
||||
/*
|
||||
* If the high bit (CIN_P) of c0_index is set, the probe failed.
|
||||
* The high bit is not set <--> c0_index (now in t0) >= 0.
|
||||
*/
|
||||
|
||||
bgez t0, 1f /* did probe succeed? if so, skip forward */
|
||||
nop /* delay slot */
|
||||
addi v0, z0, -1 /* set return value to -1 to indicate failure */
|
||||
j ra /* done */
|
||||
nop /* delay slot */
|
||||
|
||||
1:
|
||||
/* succeeded - get the index field from the index register value */
|
||||
andi t1, t0, CIN_INDEX /* mask off the field */
|
||||
j ra /* done */
|
||||
sra v0, t1, CIN_INDEXSHIFT /* shift it (in delay slot) */
|
||||
.end tlb_probe
|
||||
|
||||
|
||||
/*
|
||||
* tlb_reset
|
||||
*
|
||||
* Initialize the TLB. At processor startup, the TLB state is completely
|
||||
* undefined. So be sure to avoid creating any duplicates. Also make sure
|
||||
* that the initialization entries don't duplicate the INVALID entries
|
||||
* defined in tlb.h. (This way you can write the invalid entries in
|
||||
* without having to use tlbp to find out if they're going to cause dups.)
|
||||
*
|
||||
* This function is not defined in tlb.h because it's only called from
|
||||
* start.S.
|
||||
*
|
||||
* Pipeline hazards are as above.
|
||||
*/
|
||||
.text
|
||||
.globl tlb_reset
|
||||
.type tlb_reset,@function
|
||||
.ent tlb_reset
|
||||
tlb_reset:
|
||||
li t0, 0 /* t0 <- tlb index number (shifted) */
|
||||
li t1, 0x81000000 /* t1 <- tlb reset vaddr */
|
||||
1:
|
||||
mtc0 $0, c0_entrylo /* set up proposed tlb entry for reset */
|
||||
mtc0 t1, c0_entryhi
|
||||
ssnop /* wait for pipeline hazard */
|
||||
ssnop
|
||||
tlbp /* check if it already exists */
|
||||
ssnop /* wait for pipeline hazard */
|
||||
ssnop
|
||||
mfc0 t2, c0_index
|
||||
bgez t2, 1b /* if it does, loop back */
|
||||
addiu t1, t1, 0x1000 /* next vaddr (in delay slot) */
|
||||
mtc0 t0, c0_index /* doesn't exist, set index to write to */
|
||||
ssnop /* wait for pipeline hazard */
|
||||
ssnop
|
||||
addiu t0, t0, 0x100 /* next tlb index (shifted) */
|
||||
bne t0, 0x4000, 1b /* if it's not the last tlb index, loop */
|
||||
tlbwi /* write tlb entry (in delay slot) */
|
||||
j ra /* done */
|
||||
nop /* delay slot */
|
||||
.end tlb_reset
|
Reference in New Issue
Block a user