xv6-65oo2/kernel/riscv.h

355 lines
6.6 KiB
C
Raw Normal View History

2019-06-05 15:42:03 +00:00
// which hart (core) is this?
static inline uint64
r_mhartid()
{
uint64 x;
2019-06-05 19:05:56 +00:00
asm volatile("csrr %0, mhartid" : "=r" (x) );
2019-06-05 15:42:03 +00:00
return x;
}
2019-05-31 13:45:59 +00:00
// Machine Status Register, mstatus
2019-07-25 09:35:03 +00:00
#define MSTATUS_MPP_MASK (3L << 11) // previous mode.
2019-05-31 13:45:59 +00:00
#define MSTATUS_MPP_M (3L << 11)
#define MSTATUS_MPP_S (1L << 11)
#define MSTATUS_MPP_U (0L << 11)
2019-07-25 09:35:03 +00:00
#define MSTATUS_MIE (1L << 3) // machine-mode interrupt enable.
2019-05-31 13:45:59 +00:00
static inline uint64
r_mstatus()
{
uint64 x;
2019-06-05 19:05:56 +00:00
asm volatile("csrr %0, mstatus" : "=r" (x) );
2019-05-31 13:45:59 +00:00
return x;
}
static inline void
w_mstatus(uint64 x)
{
2019-06-05 19:05:56 +00:00
asm volatile("csrw mstatus, %0" : : "r" (x));
2019-05-31 13:45:59 +00:00
}
// machine exception program counter, holds the
// instruction address to which a return from
// exception will go.
static inline void
w_mepc(uint64 x)
{
2019-06-05 19:05:56 +00:00
asm volatile("csrw mepc, %0" : : "r" (x));
2019-05-31 13:45:59 +00:00
}
// Supervisor Status Register, sstatus
#define SSTATUS_SPP (1L << 8) // Previous mode, 1=Supervisor, 0=User
#define SSTATUS_SPIE (1L << 5) // Supervisor Previous Interrupt Enable
#define SSTATUS_UPIE (1L << 4) // User Previous Interrupt Enable
#define SSTATUS_SIE (1L << 1) // Supervisor Interrupt Enable
#define SSTATUS_UIE (1L << 0) // User Interrupt Enable
2019-05-31 13:45:59 +00:00
static inline uint64
r_sstatus()
{
uint64 x;
2019-06-05 19:05:56 +00:00
asm volatile("csrr %0, sstatus" : "=r" (x) );
2019-05-31 13:45:59 +00:00
return x;
}
static inline void
w_sstatus(uint64 x)
{
2019-06-05 19:05:56 +00:00
asm volatile("csrw sstatus, %0" : : "r" (x));
2019-05-31 13:45:59 +00:00
}
// Supervisor Interrupt Pending
static inline uint64
r_sip()
{
uint64 x;
2019-06-05 19:05:56 +00:00
asm volatile("csrr %0, sip" : "=r" (x) );
return x;
}
static inline void
w_sip(uint64 x)
{
2019-06-05 19:05:56 +00:00
asm volatile("csrw sip, %0" : : "r" (x));
}
// Supervisor Interrupt Enable
#define SIE_SEIE (1L << 9) // external
#define SIE_STIE (1L << 5) // timer
#define SIE_SSIE (1L << 1) // software
static inline uint64
r_sie()
{
uint64 x;
2019-06-05 19:05:56 +00:00
asm volatile("csrr %0, sie" : "=r" (x) );
return x;
}
static inline void
w_sie(uint64 x)
{
2019-06-05 19:05:56 +00:00
asm volatile("csrw sie, %0" : : "r" (x));
}
// Machine-mode Interrupt Enable
#define MIE_MEIE (1L << 11) // external
2019-07-25 09:35:03 +00:00
#define MIE_MTIE (1L << 7) // timer
#define MIE_MSIE (1L << 3) // software
static inline uint64
r_mie()
{
uint64 x;
2019-06-05 19:05:56 +00:00
asm volatile("csrr %0, mie" : "=r" (x) );
return x;
}
static inline void
w_mie(uint64 x)
{
2019-06-05 19:05:56 +00:00
asm volatile("csrw mie, %0" : : "r" (x));
}
2019-05-31 13:45:59 +00:00
// machine exception program counter, holds the
// instruction address to which a return from
// exception will go.
static inline void
w_sepc(uint64 x)
{
2019-06-05 19:05:56 +00:00
asm volatile("csrw sepc, %0" : : "r" (x));
2019-05-31 13:45:59 +00:00
}
static inline uint64
r_sepc()
{
uint64 x;
2019-06-05 19:05:56 +00:00
asm volatile("csrr %0, sepc" : "=r" (x) );
2019-05-31 13:45:59 +00:00
return x;
}
// Machine Exception Delegation
static inline uint64
r_medeleg()
{
uint64 x;
2019-06-05 19:05:56 +00:00
asm volatile("csrr %0, medeleg" : "=r" (x) );
2019-05-31 13:45:59 +00:00
return x;
}
static inline void
w_medeleg(uint64 x)
{
2019-06-05 19:05:56 +00:00
asm volatile("csrw medeleg, %0" : : "r" (x));
2019-05-31 13:45:59 +00:00
}
// Machine Interrupt Delegation
static inline uint64
r_mideleg()
{
uint64 x;
2019-06-05 19:05:56 +00:00
asm volatile("csrr %0, mideleg" : "=r" (x) );
2019-05-31 13:45:59 +00:00
return x;
}
static inline void
w_mideleg(uint64 x)
{
2019-06-05 19:05:56 +00:00
asm volatile("csrw mideleg, %0" : : "r" (x));
2019-05-31 13:45:59 +00:00
}
// Supervisor Trap-Vector Base Address
// low two bits are mode.
static inline void
w_stvec(uint64 x)
{
2019-06-05 19:05:56 +00:00
asm volatile("csrw stvec, %0" : : "r" (x));
2019-05-31 13:45:59 +00:00
}
static inline uint64
r_stvec()
{
uint64 x;
2019-06-05 19:05:56 +00:00
asm volatile("csrr %0, stvec" : "=r" (x) );
return x;
}
// Machine-mode interrupt vector
static inline void
w_mtvec(uint64 x)
{
2019-06-05 19:05:56 +00:00
asm volatile("csrw mtvec, %0" : : "r" (x));
}
2019-05-31 13:45:59 +00:00
// use riscv's sv39 page table scheme.
#define SATP_SV39 (8L << 60)
#define MAKE_SATP(pagetable) (SATP_SV39 | (((uint64)pagetable) >> 12))
// supervisor address translation and protection;
// holds the address of the page table.
static inline void
w_satp(uint64 x)
{
2019-06-05 19:05:56 +00:00
asm volatile("csrw satp, %0" : : "r" (x));
2019-05-31 13:45:59 +00:00
}
static inline uint64
r_satp()
{
uint64 x;
2019-06-05 19:05:56 +00:00
asm volatile("csrr %0, satp" : "=r" (x) );
2019-05-31 13:45:59 +00:00
return x;
}
// Supervisor Scratch register, for early trap handler in trampoline.S.
static inline void
w_sscratch(uint64 x)
{
2019-06-05 19:05:56 +00:00
asm volatile("csrw sscratch, %0" : : "r" (x));
2019-05-31 13:45:59 +00:00
}
static inline void
w_mscratch(uint64 x)
{
2019-06-05 19:05:56 +00:00
asm volatile("csrw mscratch, %0" : : "r" (x));
}
// Supervisor Trap Cause
2019-05-31 13:45:59 +00:00
static inline uint64
r_scause()
{
uint64 x;
2019-06-05 19:05:56 +00:00
asm volatile("csrr %0, scause" : "=r" (x) );
2019-05-31 13:45:59 +00:00
return x;
}
// Supervisor Trap Value
static inline uint64
r_stval()
{
uint64 x;
2019-06-05 19:05:56 +00:00
asm volatile("csrr %0, stval" : "=r" (x) );
return x;
}
// Machine-mode Counter-Enable
static inline void
w_mcounteren(uint64 x)
{
2019-06-05 19:05:56 +00:00
asm volatile("csrw mcounteren, %0" : : "r" (x));
}
static inline uint64
r_mcounteren()
{
uint64 x;
2019-06-05 19:05:56 +00:00
asm volatile("csrr %0, mcounteren" : "=r" (x) );
return x;
}
// machine-mode cycle counter
static inline uint64
r_time()
{
uint64 x;
2019-06-05 19:05:56 +00:00
asm volatile("csrr %0, time" : "=r" (x) );
return x;
}
2019-07-26 14:17:02 +00:00
// enable device interrupts
static inline void
intr_on()
{
w_sstatus(r_sstatus() | SSTATUS_SIE);
}
2019-07-26 14:17:02 +00:00
// disable device interrupts
static inline void
intr_off()
{
w_sstatus(r_sstatus() & ~SSTATUS_SIE);
}
2019-07-26 14:17:02 +00:00
// are device interrupts enabled?
static inline int
intr_get()
{
uint64 x = r_sstatus();
return (x & SSTATUS_SIE) != 0;
}
static inline uint64
r_sp()
{
uint64 x;
2019-06-05 19:05:56 +00:00
asm volatile("mv %0, sp" : "=r" (x) );
return x;
}
2019-06-05 15:42:03 +00:00
// read and write tp, the thread pointer, which holds
// this core's hartid (core number), the index into cpus[].
static inline uint64
r_tp()
{
uint64 x;
2019-06-05 19:05:56 +00:00
asm volatile("mv %0, tp" : "=r" (x) );
2019-06-05 15:42:03 +00:00
return x;
}
static inline void
w_tp(uint64 x)
{
2019-06-05 19:05:56 +00:00
asm volatile("mv tp, %0" : : "r" (x));
2019-06-05 15:42:03 +00:00
}
static inline uint64
r_ra()
{
uint64 x;
asm volatile("mv %0, ra" : "=r" (x) );
return x;
}
2019-09-03 20:25:11 +00:00
// flush the TLB.
static inline void
sfence_vma()
{
// the zero, zero means flush all TLB entries.
asm volatile("sfence.vma zero, zero");
}
2019-05-31 13:45:59 +00:00
#define PGSIZE 4096 // bytes per page
#define PGSHIFT 12 // bits of offset within a page
#define PGROUNDUP(sz) (((sz)+PGSIZE-1) & ~(PGSIZE-1))
#define PGROUNDDOWN(a) (((a)) & ~(PGSIZE-1))
#define PTE_V (1L << 0) // valid
#define PTE_R (1L << 1)
#define PTE_W (1L << 2)
#define PTE_X (1L << 3)
#define PTE_U (1L << 4) // 1 -> user can access
// shift a physical address to the right place for a PTE.
#define PA2PTE(pa) ((((uint64)pa) >> 12) << 10)
#define PTE2PA(pte) (((pte) >> 10) << 12)
2019-09-17 10:07:58 +00:00
#define PTE_FLAGS(pte) ((pte) & 0x3FF)
2019-05-31 13:45:59 +00:00
// extract the three 9-bit page table indices from a virtual address.
#define PXMASK 0x1FF // 9 bits
#define PXSHIFT(level) (PGSHIFT+(9*(level)))
#define PX(level, va) ((((uint64) (va)) >> PXSHIFT(level)) & PXMASK)
// one beyond the highest possible virtual address.
// MAXVA is actually one bit less than the max allowed by
// Sv39, to avoid having to sign-extend virtual addresses
// that have the high bit set.
#define MAXVA (1L << (9 + 9 + 9 + 12 - 1))
typedef uint64 pte_t;
typedef uint64 *pagetable_t; // 512 PTEs