Initial Spring 2016 commit.
This commit is contained in:
49
kern/arch/mips/locore/cache-mips161.S
Normal file
49
kern/arch/mips/locore/cache-mips161.S
Normal file
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2009
|
||||
* The President and Fellows of Harvard College.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <kern/mips/regdefs.h>
|
||||
|
||||
/*
|
||||
* Cache flushing code for the System/161 MIPS variant, which
|
||||
* (for now at least) has magically coherent caches. Almost all
|
||||
* real MIPS processors require explicit cache control of one
|
||||
* form or another; it can be quite a nuisance. (It is particularly
|
||||
* nasty on the MIPS-1, that is, r2000/r3000.)
|
||||
*/
|
||||
|
||||
.text
|
||||
.set noreorder
|
||||
|
||||
.globl mips_flushicache
|
||||
.type mips_flushicache,@function
|
||||
.ent mips_flushicache
|
||||
mips_flushicache:
|
||||
j ra
|
||||
nop
|
||||
.end mips_flushicache
|
355
kern/arch/mips/locore/exception-mips1.S
Normal file
355
kern/arch/mips/locore/exception-mips1.S
Normal file
@@ -0,0 +1,355 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
|
||||
* The President and Fellows of Harvard College.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <kern/mips/regdefs.h>
|
||||
#include <mips/specialreg.h>
|
||||
|
||||
/*
|
||||
* Entry points for exceptions.
|
||||
*
|
||||
* MIPS-1 (r2000/r3000) style exception handling, with the "rfe"
|
||||
* instruction rather than "eret", and the three sets of status bits.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Do not allow the assembler to use $1 (at), because we need to be
|
||||
* able to save it.
|
||||
*/
|
||||
.set noat
|
||||
.set noreorder
|
||||
|
||||
/*
|
||||
* UTLB exception handler.
|
||||
*
|
||||
* This code is copied to address 0x80000000, where the MIPS processor
|
||||
* automatically invokes it.
|
||||
*
|
||||
* To avoid colliding with the other exception code, it must not
|
||||
* exceed 128 bytes (32 instructions).
|
||||
*
|
||||
* This is the special entry point for the fast-path TLB refill for
|
||||
* faults in the user address space. We don't implement fast-path TLB
|
||||
* refill by default. Note that if you do, you either need to make
|
||||
* sure the refill code doesn't fault or write extra code in
|
||||
* common_exception to tidy up after such faults.
|
||||
*/
|
||||
|
||||
.text
|
||||
.globl mips_utlb_handler
|
||||
.type mips_utlb_handler,@function
|
||||
.ent mips_utlb_handler
|
||||
mips_utlb_handler:
|
||||
j common_exception /* Don't need to do anything special */
|
||||
nop /* Delay slot */
|
||||
.globl mips_utlb_end
|
||||
mips_utlb_end:
|
||||
.end mips_utlb_handler
|
||||
|
||||
/*
|
||||
* General exception handler.
|
||||
*
|
||||
* This code is copied to address 0x80000080, where
|
||||
* the MIPS processor automatically invokes it.
|
||||
*/
|
||||
|
||||
.text
|
||||
.globl mips_general_handler
|
||||
.type mips_general_handler,@function
|
||||
.ent mips_general_handler
|
||||
mips_general_handler:
|
||||
j common_exception /* Don't need to do anything special */
|
||||
nop /* Delay slot */
|
||||
.globl mips_general_end
|
||||
mips_general_end:
|
||||
.end mips_general_handler
|
||||
|
||||
/* This keeps gdb from conflating common_exception and mips_general_end */
|
||||
nop /* padding */
|
||||
|
||||
|
||||
/*
|
||||
* Shared exception code for both handlers.
|
||||
*/
|
||||
|
||||
.text
|
||||
.type common_exception,@function
|
||||
.ent common_exception
|
||||
common_exception:
|
||||
mfc0 k0, c0_status /* Get status register */
|
||||
andi k0, k0, CST_KUp /* Check the we-were-in-user-mode bit */
|
||||
beq k0, $0, 1f /* If clear, from kernel, already have stack */
|
||||
nop /* delay slot */
|
||||
|
||||
/* Coming from user mode - find kernel stack */
|
||||
mfc0 k1, c0_context /* we keep the CPU number here */
|
||||
srl k1, k1, CTX_PTBASESHIFT /* shift it to get just the CPU number */
|
||||
sll k1, k1, 2 /* shift it back to make an array index */
|
||||
lui k0, %hi(cpustacks) /* get base address of cpustacks[] */
|
||||
addu k0, k0, k1 /* index it */
|
||||
move k1, sp /* Save previous stack pointer in k1 */
|
||||
b 2f /* Skip to common code */
|
||||
lw sp, %lo(cpustacks)(k0) /* Load kernel stack pointer (in delay slot) */
|
||||
1:
|
||||
/* Coming from kernel mode - just save previous stuff */
|
||||
move k1, sp /* Save previous stack in k1 (delay slot) */
|
||||
2:
|
||||
/*
|
||||
* At this point:
|
||||
* Interrupts are off. (The processor did this for us.)
|
||||
* k0 contains the value for curthread, to go into s7.
|
||||
* k1 contains the old stack pointer.
|
||||
* sp points into the kernel stack.
|
||||
* All other registers are untouched.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Allocate stack space for 37 words to hold the trap frame,
|
||||
* plus four more words for a minimal argument block, plus
|
||||
* one more for proper (64-bit) stack alignment.
|
||||
*/
|
||||
addi sp, sp, -168
|
||||
|
||||
/*
|
||||
* Save general registers.
|
||||
* We exclude k0/k1, which the kernel is free to clobber (and which
|
||||
* we already have clobbered), and $0, whose value is fixed.
|
||||
*
|
||||
* The order here must match mips/include/trapframe.h.
|
||||
*
|
||||
* gdb disassembles this code to try to figure out what registers
|
||||
* are where, and it isn't very bright. So in order to make gdb be
|
||||
* able to trace the stack back through here, we play some silly
|
||||
* games.
|
||||
*
|
||||
* In particular:
|
||||
* (1) We store the return address register into the epc slot,
|
||||
* which makes gdb think it's the return address slot. Then
|
||||
* we store the real epc value over that.
|
||||
* (2) We store the current sp into the sp slot, which makes gdb
|
||||
* think it's the stack pointer slot. Then we store the real
|
||||
* value.
|
||||
* (3) gdb also assumes that saved registers in a function are
|
||||
* saved in order. This is why we put epc where it is, and
|
||||
* handle the real value of ra afterwards.
|
||||
* (4) Because gdb will think we're saving k0 and k1, we need to
|
||||
* leave slots for them in the trap frame, even though the
|
||||
* stuff we save there is useless.
|
||||
*
|
||||
* This logic has not been tested against a recent gdb and has
|
||||
* probably bitrotted. Someone(TM) should figure out what gdb
|
||||
* currently expects -- or maybe even patch gdb to understand a
|
||||
* better form of this that doesn't waste so many cycles.
|
||||
*/
|
||||
sw ra, 160(sp) /* dummy for gdb */
|
||||
sw s8, 156(sp) /* save s8 */
|
||||
sw sp, 152(sp) /* dummy for gdb */
|
||||
sw gp, 148(sp) /* save gp */
|
||||
sw k1, 144(sp) /* dummy for gdb */
|
||||
sw k0, 140(sp) /* dummy for gdb */
|
||||
|
||||
sw k1, 152(sp) /* real saved sp */
|
||||
nop /* delay slot for store */
|
||||
|
||||
mfc0 k1, c0_epc /* Copr.0 reg 13 == PC for exception */
|
||||
sw k1, 160(sp) /* real saved PC */
|
||||
|
||||
sw t9, 136(sp)
|
||||
sw t8, 132(sp)
|
||||
sw s7, 128(sp)
|
||||
sw s6, 124(sp)
|
||||
sw s5, 120(sp)
|
||||
sw s4, 116(sp)
|
||||
sw s3, 112(sp)
|
||||
sw s2, 108(sp)
|
||||
sw s1, 104(sp)
|
||||
sw s0, 100(sp)
|
||||
sw t7, 96(sp)
|
||||
sw t6, 92(sp)
|
||||
sw t5, 88(sp)
|
||||
sw t4, 84(sp)
|
||||
sw t3, 80(sp)
|
||||
sw t2, 76(sp)
|
||||
sw t1, 72(sp)
|
||||
sw t0, 68(sp)
|
||||
sw a3, 64(sp)
|
||||
sw a2, 60(sp)
|
||||
sw a1, 56(sp)
|
||||
sw a0, 52(sp)
|
||||
sw v1, 48(sp)
|
||||
sw v0, 44(sp)
|
||||
sw AT, 40(sp)
|
||||
|
||||
sw ra, 36(sp)
|
||||
|
||||
/*
|
||||
* Save special registers.
|
||||
*/
|
||||
mfhi t0
|
||||
mflo t1
|
||||
sw t0, 32(sp)
|
||||
sw t1, 28(sp)
|
||||
|
||||
/*
|
||||
* Save remaining exception context information.
|
||||
*/
|
||||
|
||||
mfc0 t2, c0_status /* Copr.0 reg 11 == status */
|
||||
sw t2, 20(sp)
|
||||
mfc0 t3, c0_vaddr /* Copr.0 reg 8 == faulting vaddr */
|
||||
sw t3, 16(sp)
|
||||
mfc0 t4, c0_cause
|
||||
sw t4, 24(sp) /* Copr.0 reg 13 == exception cause */
|
||||
|
||||
/*
|
||||
* Pretend to save $0 for gdb's benefit.
|
||||
*/
|
||||
sw $0, 12(sp)
|
||||
|
||||
/*
|
||||
* Load the curthread register if coming from user mode.
|
||||
*/
|
||||
andi k0, t2, CST_KUp /* Check the we-were-in-user-mode bit */
|
||||
beq k0, $0, 3f /* If clear, were in kernel, skip ahead */
|
||||
nop /* delay slot */
|
||||
|
||||
mfc0 k1, c0_context /* we keep the CPU number here */
|
||||
srl k1, k1, CTX_PTBASESHIFT /* shift it to get just the CPU number */
|
||||
sll k1, k1, 2 /* shift it back to make an array index */
|
||||
lui k0, %hi(cputhreads) /* get base address of cputhreads[] */
|
||||
addu k0, k0, k1 /* index it */
|
||||
lw s7, %lo(cputhreads)(k0) /* Load curthread value */
|
||||
3:
|
||||
|
||||
/*
|
||||
* Load the kernel GP value.
|
||||
*/
|
||||
la gp, _gp
|
||||
|
||||
/*
|
||||
* Prepare to call mips_trap(struct trapframe *)
|
||||
*/
|
||||
|
||||
addiu a0, sp, 16 /* set argument - pointer to the trapframe */
|
||||
jal mips_trap /* call it */
|
||||
nop /* delay slot */
|
||||
|
||||
/* Something must be here or gdb doesn't find the stack frame. */
|
||||
nop
|
||||
|
||||
/*
|
||||
* Now restore stuff and return from the exception.
|
||||
* Interrupts should be off.
|
||||
*/
|
||||
exception_return:
|
||||
|
||||
/* 16(sp) no need to restore tf_vaddr */
|
||||
lw t0, 20(sp) /* load status register value into t0 */
|
||||
nop /* load delay slot */
|
||||
mtc0 t0, c0_status /* store it back to coprocessor 0 */
|
||||
/* 24(sp) no need to restore tf_cause */
|
||||
|
||||
/* restore special registers */
|
||||
lw t1, 28(sp)
|
||||
lw t0, 32(sp)
|
||||
mtlo t1
|
||||
mthi t0
|
||||
|
||||
/* load the general registers */
|
||||
lw ra, 36(sp)
|
||||
|
||||
lw AT, 40(sp)
|
||||
lw v0, 44(sp)
|
||||
lw v1, 48(sp)
|
||||
lw a0, 52(sp)
|
||||
lw a1, 56(sp)
|
||||
lw a2, 60(sp)
|
||||
lw a3, 64(sp)
|
||||
lw t0, 68(sp)
|
||||
lw t1, 72(sp)
|
||||
lw t2, 76(sp)
|
||||
lw t3, 80(sp)
|
||||
lw t4, 84(sp)
|
||||
lw t5, 88(sp)
|
||||
lw t6, 92(sp)
|
||||
lw t7, 96(sp)
|
||||
lw s0, 100(sp)
|
||||
lw s1, 104(sp)
|
||||
lw s2, 108(sp)
|
||||
lw s3, 112(sp)
|
||||
lw s4, 116(sp)
|
||||
lw s5, 120(sp)
|
||||
lw s6, 124(sp)
|
||||
lw s7, 128(sp)
|
||||
lw t8, 132(sp)
|
||||
lw t9, 136(sp)
|
||||
|
||||
/* 140(sp) "saved" k0 was dummy garbage anyway */
|
||||
/* 144(sp) "saved" k1 was dummy garbage anyway */
|
||||
|
||||
lw gp, 148(sp) /* restore gp */
|
||||
/* 152(sp) stack pointer - below */
|
||||
lw s8, 156(sp) /* restore s8 */
|
||||
lw k0, 160(sp) /* fetch exception return PC into k0 */
|
||||
|
||||
lw sp, 152(sp) /* fetch saved sp (must be last) */
|
||||
|
||||
/* done */
|
||||
jr k0 /* jump back */
|
||||
rfe /* in delay slot */
|
||||
.end common_exception
|
||||
|
||||
/*
|
||||
* Code to enter user mode for the first time.
|
||||
* Does not return.
|
||||
*
|
||||
* This is called from mips_usermode().
|
||||
* Interrupts on this processor should be off.
|
||||
*/
|
||||
|
||||
.text
|
||||
.globl asm_usermode
|
||||
.type asm_usermode,@function
|
||||
.ent asm_usermode
|
||||
asm_usermode:
|
||||
/*
|
||||
* a0 is the address of a trapframe to use for exception "return".
|
||||
* It's allocated on our stack.
|
||||
*
|
||||
* Move it to the stack pointer - we don't need the actual stack
|
||||
* position any more. (When we come back from usermode, cpustacks[]
|
||||
* will be used to reinitialize our stack pointer, and that was
|
||||
* set by mips_usermode.)
|
||||
*
|
||||
* Then just jump to the exception return code above.
|
||||
*/
|
||||
|
||||
j exception_return
|
||||
addiu sp, a0, -16 /* in delay slot */
|
||||
.end asm_usermode
|
438
kern/arch/mips/locore/trap.c
Normal file
438
kern/arch/mips/locore/trap.c
Normal file
@@ -0,0 +1,438 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
|
||||
* The President and Fellows of Harvard College.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <types.h>
|
||||
#include <signal.h>
|
||||
#include <lib.h>
|
||||
#include <mips/specialreg.h>
|
||||
#include <mips/trapframe.h>
|
||||
#include <cpu.h>
|
||||
#include <spl.h>
|
||||
#include <thread.h>
|
||||
#include <current.h>
|
||||
#include <vm.h>
|
||||
#include <mainbus.h>
|
||||
#include <syscall.h>
|
||||
|
||||
|
||||
/* in exception-*.S */
|
||||
extern __DEAD void asm_usermode(struct trapframe *tf);
|
||||
|
||||
/* called only from assembler, so not declared in a header */
|
||||
void mips_trap(struct trapframe *tf);
|
||||
|
||||
|
||||
/* Names for trap codes */
|
||||
#define NTRAPCODES 13
|
||||
static const char *const trapcodenames[NTRAPCODES] = {
|
||||
"Interrupt",
|
||||
"TLB modify trap",
|
||||
"TLB miss on load",
|
||||
"TLB miss on store",
|
||||
"Address error on load",
|
||||
"Address error on store",
|
||||
"Bus error on code",
|
||||
"Bus error on data",
|
||||
"System call",
|
||||
"Break instruction",
|
||||
"Illegal instruction",
|
||||
"Coprocessor unusable",
|
||||
"Arithmetic overflow",
|
||||
};
|
||||
|
||||
/*
|
||||
* Function called when user-level code hits a fatal fault.
|
||||
*/
|
||||
static
|
||||
void
|
||||
kill_curthread(vaddr_t epc, unsigned code, vaddr_t vaddr)
|
||||
{
|
||||
int sig = 0;
|
||||
|
||||
KASSERT(code < NTRAPCODES);
|
||||
switch (code) {
|
||||
case EX_IRQ:
|
||||
case EX_IBE:
|
||||
case EX_DBE:
|
||||
case EX_SYS:
|
||||
/* should not be seen */
|
||||
KASSERT(0);
|
||||
sig = SIGABRT;
|
||||
break;
|
||||
case EX_MOD:
|
||||
case EX_TLBL:
|
||||
case EX_TLBS:
|
||||
sig = SIGSEGV;
|
||||
break;
|
||||
case EX_ADEL:
|
||||
case EX_ADES:
|
||||
sig = SIGBUS;
|
||||
break;
|
||||
case EX_BP:
|
||||
sig = SIGTRAP;
|
||||
break;
|
||||
case EX_RI:
|
||||
sig = SIGILL;
|
||||
break;
|
||||
case EX_CPU:
|
||||
sig = SIGSEGV;
|
||||
break;
|
||||
case EX_OVF:
|
||||
sig = SIGFPE;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* You will probably want to change this.
|
||||
*/
|
||||
|
||||
kprintf("Fatal user mode trap %u sig %d (%s, epc 0x%x, vaddr 0x%x)\n",
|
||||
code, sig, trapcodenames[code], epc, vaddr);
|
||||
panic("I don't know how to handle this\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* General trap (exception) handling function for mips.
|
||||
* This is called by the assembly-language exception handler once
|
||||
* the trapframe has been set up.
|
||||
*/
|
||||
void
|
||||
mips_trap(struct trapframe *tf)
|
||||
{
|
||||
uint32_t code;
|
||||
/*bool isutlb; -- not used */
|
||||
bool iskern;
|
||||
int spl;
|
||||
|
||||
/* The trap frame is supposed to be 37 registers long. */
|
||||
KASSERT(sizeof(struct trapframe)==(37*4));
|
||||
|
||||
/*
|
||||
* Extract the exception code info from the register fields.
|
||||
*/
|
||||
code = (tf->tf_cause & CCA_CODE) >> CCA_CODESHIFT;
|
||||
/*isutlb = (tf->tf_cause & CCA_UTLB) != 0;*/
|
||||
iskern = (tf->tf_status & CST_KUp) == 0;
|
||||
|
||||
KASSERT(code < NTRAPCODES);
|
||||
|
||||
/* Make sure we haven't run off our stack */
|
||||
if (curthread != NULL && curthread->t_stack != NULL) {
|
||||
KASSERT((vaddr_t)tf > (vaddr_t)curthread->t_stack);
|
||||
KASSERT((vaddr_t)tf < (vaddr_t)(curthread->t_stack
|
||||
+ STACK_SIZE));
|
||||
}
|
||||
|
||||
/* Interrupt? Call the interrupt handler and return. */
|
||||
if (code == EX_IRQ) {
|
||||
int old_in;
|
||||
bool doadjust;
|
||||
|
||||
old_in = curthread->t_in_interrupt;
|
||||
curthread->t_in_interrupt = 1;
|
||||
|
||||
/*
|
||||
* The processor has turned interrupts off; if the
|
||||
* currently recorded interrupt state is interrupts on
|
||||
* (spl of 0), adjust the recorded state to match, and
|
||||
* restore after processing the interrupt.
|
||||
*
|
||||
* How can we get an interrupt if the recorded state
|
||||
* is interrupts off? Well, as things currently stand
|
||||
* when the CPU finishes idling it flips interrupts on
|
||||
* and off to allow things to happen, but leaves
|
||||
* curspl high while doing so.
|
||||
*
|
||||
* While we're here, assert that the interrupt
|
||||
* handling code hasn't leaked a spinlock or an
|
||||
* splhigh().
|
||||
*/
|
||||
|
||||
if (curthread->t_curspl == 0) {
|
||||
KASSERT(curthread->t_curspl == 0);
|
||||
KASSERT(curthread->t_iplhigh_count == 0);
|
||||
curthread->t_curspl = IPL_HIGH;
|
||||
curthread->t_iplhigh_count++;
|
||||
doadjust = true;
|
||||
}
|
||||
else {
|
||||
doadjust = false;
|
||||
}
|
||||
|
||||
mainbus_interrupt(tf);
|
||||
|
||||
if (doadjust) {
|
||||
KASSERT(curthread->t_curspl == IPL_HIGH);
|
||||
KASSERT(curthread->t_iplhigh_count == 1);
|
||||
curthread->t_iplhigh_count--;
|
||||
curthread->t_curspl = 0;
|
||||
}
|
||||
|
||||
curthread->t_in_interrupt = old_in;
|
||||
goto done2;
|
||||
}
|
||||
|
||||
/*
|
||||
* The processor turned interrupts off when it took the trap.
|
||||
*
|
||||
* While we're in the kernel, and not actually handling an
|
||||
* interrupt, restore the interrupt state to where it was in
|
||||
* the previous context, which may be low (interrupts on).
|
||||
*
|
||||
* Do this by forcing splhigh(), which may do a redundant
|
||||
* cpu_irqoff() but forces the stored MI interrupt state into
|
||||
* sync, then restoring the previous state.
|
||||
*/
|
||||
spl = splhigh();
|
||||
splx(spl);
|
||||
|
||||
/* Syscall? Call the syscall handler and return. */
|
||||
if (code == EX_SYS) {
|
||||
/* Interrupts should have been on while in user mode. */
|
||||
KASSERT(curthread->t_curspl == 0);
|
||||
KASSERT(curthread->t_iplhigh_count == 0);
|
||||
|
||||
DEBUG(DB_SYSCALL, "syscall: #%d, args %x %x %x %x\n",
|
||||
tf->tf_v0, tf->tf_a0, tf->tf_a1, tf->tf_a2, tf->tf_a3);
|
||||
|
||||
syscall(tf);
|
||||
goto done;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ok, it wasn't any of the really easy cases.
|
||||
* Call vm_fault on the TLB exceptions.
|
||||
* Panic on the bus error exceptions.
|
||||
*/
|
||||
switch (code) {
|
||||
case EX_MOD:
|
||||
if (vm_fault(VM_FAULT_READONLY, tf->tf_vaddr)==0) {
|
||||
goto done;
|
||||
}
|
||||
break;
|
||||
case EX_TLBL:
|
||||
if (vm_fault(VM_FAULT_READ, tf->tf_vaddr)==0) {
|
||||
goto done;
|
||||
}
|
||||
break;
|
||||
case EX_TLBS:
|
||||
if (vm_fault(VM_FAULT_WRITE, tf->tf_vaddr)==0) {
|
||||
goto done;
|
||||
}
|
||||
break;
|
||||
case EX_IBE:
|
||||
case EX_DBE:
|
||||
/*
|
||||
* This means you loaded invalid TLB entries, or
|
||||
* touched invalid parts of the direct-mapped
|
||||
* segments. These are serious kernel errors, so
|
||||
* panic.
|
||||
*
|
||||
* The MIPS won't even tell you what invalid address
|
||||
* caused the bus error.
|
||||
*/
|
||||
panic("Bus error exception, PC=0x%x\n", tf->tf_epc);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we get to this point, it's a fatal fault - either it's
|
||||
* one of the other exceptions, like illegal instruction, or
|
||||
* it was a page fault we couldn't handle.
|
||||
*/
|
||||
|
||||
if (!iskern) {
|
||||
/*
|
||||
* Fatal fault in user mode.
|
||||
* Kill the current user process.
|
||||
*/
|
||||
kill_curthread(tf->tf_epc, code, tf->tf_vaddr);
|
||||
goto done;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fatal fault in kernel mode.
|
||||
*
|
||||
* If pcb_badfaultfunc is set, we do not panic; badfaultfunc is
|
||||
* set by copyin/copyout and related functions to signify that
|
||||
* the addresses they're accessing are userlevel-supplied and
|
||||
* not trustable. What we actually want to do is resume
|
||||
* execution at the function pointed to by badfaultfunc. That's
|
||||
* going to be "copyfail" (see copyinout.c), which longjmps
|
||||
* back to copyin/copyout or wherever and returns EFAULT.
|
||||
*
|
||||
* Note that we do not just *call* this function, because that
|
||||
* won't necessarily do anything. We want the control flow
|
||||
* that is currently executing in copyin (or whichever), and
|
||||
* is stopped while we process the exception, to *teleport* to
|
||||
* copyfail.
|
||||
*
|
||||
* This is accomplished by changing tf->tf_epc and returning
|
||||
* from the exception handler.
|
||||
*/
|
||||
|
||||
if (curthread != NULL &&
|
||||
curthread->t_machdep.tm_badfaultfunc != NULL) {
|
||||
tf->tf_epc = (vaddr_t) curthread->t_machdep.tm_badfaultfunc;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/*
|
||||
* Really fatal kernel-mode fault.
|
||||
*/
|
||||
|
||||
kprintf("panic: Fatal exception %u (%s) in kernel mode\n", code,
|
||||
trapcodenames[code]);
|
||||
kprintf("panic: EPC 0x%x, exception vaddr 0x%x\n",
|
||||
tf->tf_epc, tf->tf_vaddr);
|
||||
|
||||
panic("I can't handle this... I think I'll just die now...\n");
|
||||
|
||||
done:
|
||||
/*
|
||||
* Turn interrupts off on the processor, without affecting the
|
||||
* stored interrupt state.
|
||||
*/
|
||||
cpu_irqoff();
|
||||
done2:
|
||||
|
||||
/*
|
||||
* The boot thread can get here (e.g. on interrupt return) but
|
||||
* since it doesn't go to userlevel, it can't be returning to
|
||||
* userlevel, so there's no need to set cputhreads[] and
|
||||
* cpustacks[]. Just return.
|
||||
*/
|
||||
if (curthread->t_stack == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
cputhreads[curcpu->c_number] = (vaddr_t)curthread;
|
||||
cpustacks[curcpu->c_number] = (vaddr_t)curthread->t_stack + STACK_SIZE;
|
||||
|
||||
/*
|
||||
* This assertion will fail if either
|
||||
* (1) curthread->t_stack is corrupted, or
|
||||
* (2) the trap frame is somehow on the wrong kernel stack.
|
||||
*
|
||||
* If cpustacks[] is corrupted, the next trap back to the
|
||||
* kernel will (most likely) hang the system, so it's better
|
||||
* to find out now.
|
||||
*/
|
||||
KASSERT(SAME_STACK(cpustacks[curcpu->c_number]-1, (vaddr_t)tf));
|
||||
}
|
||||
|
||||
/*
|
||||
* Function for entering user mode.
|
||||
*
|
||||
* This should not be used by threads returning from traps - they
|
||||
* should just return from mips_trap(). It should be used by threads
|
||||
* entering user mode for the first time - whether the child thread in
|
||||
* a fork(), or into a brand-new address space after exec(), or when
|
||||
* starting the first userlevel program.
|
||||
*
|
||||
* It works by jumping into the exception return code.
|
||||
*
|
||||
* mips_usermode is common code for this. It cannot usefully be called
|
||||
* outside the mips port, but should be called from one of the
|
||||
* following places:
|
||||
* - enter_new_process, for use by exec and equivalent.
|
||||
* - enter_forked_process, in syscall.c, for use by fork.
|
||||
*/
|
||||
void
|
||||
mips_usermode(struct trapframe *tf)
|
||||
{
|
||||
|
||||
/*
|
||||
* Interrupts should be off within the kernel while entering
|
||||
* user mode. However, while in user mode, interrupts should
|
||||
* be on. To interact properly with the spl-handling logic
|
||||
* above, we explicitly call spl0() and then call cpu_irqoff().
|
||||
*/
|
||||
spl0();
|
||||
cpu_irqoff();
|
||||
|
||||
cputhreads[curcpu->c_number] = (vaddr_t)curthread;
|
||||
cpustacks[curcpu->c_number] = (vaddr_t)curthread->t_stack + STACK_SIZE;
|
||||
|
||||
/*
|
||||
* This assertion will fail if either
|
||||
* (1) cpustacks[] is corrupted, or
|
||||
* (2) the trap frame is not on our own kernel stack, or
|
||||
* (3) the boot thread tries to enter user mode.
|
||||
*
|
||||
* If cpustacks[] is corrupted, the next trap back to the
|
||||
* kernel will (most likely) hang the system, so it's better
|
||||
* to find out now.
|
||||
*
|
||||
* It's necessary for the trap frame used here to be on the
|
||||
* current thread's own stack. It cannot correctly be on
|
||||
* either another thread's stack or in the kernel heap.
|
||||
* (Exercise: why?)
|
||||
*/
|
||||
KASSERT(SAME_STACK(cpustacks[curcpu->c_number]-1, (vaddr_t)tf));
|
||||
|
||||
/*
|
||||
* This actually does it. See exception-*.S.
|
||||
*/
|
||||
asm_usermode(tf);
|
||||
}
|
||||
|
||||
/*
|
||||
* enter_new_process: go to user mode after loading an executable.
|
||||
*
|
||||
* Performs the necessary initialization so that the user program will
|
||||
* get the arguments supplied in argc/argv (note that argv must be a
|
||||
* user-level address) and the environment pointer env (ditto), and
|
||||
* begin executing at the specified entry point. The stack pointer is
|
||||
* initialized from the stackptr argument. Note that passing argc/argv
|
||||
* may use additional stack space on some other platforms (but not on
|
||||
* mips).
|
||||
*
|
||||
* Unless you implement execve() that passes environments around, just
|
||||
* pass NULL for the environment.
|
||||
*
|
||||
* Works by creating an ersatz trapframe.
|
||||
*/
|
||||
void
|
||||
enter_new_process(int argc, userptr_t argv, userptr_t env,
|
||||
vaddr_t stack, vaddr_t entry)
|
||||
{
|
||||
struct trapframe tf;
|
||||
|
||||
bzero(&tf, sizeof(tf));
|
||||
|
||||
tf.tf_status = CST_IRQMASK | CST_IEp | CST_KUp;
|
||||
tf.tf_epc = entry;
|
||||
tf.tf_a0 = argc;
|
||||
tf.tf_a1 = (vaddr_t)argv;
|
||||
tf.tf_a2 = (vaddr_t)env;
|
||||
tf.tf_sp = stack;
|
||||
|
||||
mips_usermode(&tf);
|
||||
}
|
Reference in New Issue
Block a user