page table allocation functionality
This commit is contained in:
parent
fad7eaea3a
commit
b94c694061
2 changed files with 55 additions and 17 deletions
|
|
@ -10,6 +10,9 @@ void init_gdt();
|
|||
void init_idt();
|
||||
|
||||
// --- paging ---
|
||||
//
|
||||
// We don't use the PAT functionality. We set all bits (writethrough, cache_disable, PAT) to 0.
|
||||
// For access protection we only use MTRRs.
|
||||
|
||||
void init_paging(void);
|
||||
|
||||
|
|
@ -62,6 +65,8 @@ bool pt_map_range(struct vpn virt, struct ppn phys, uint64_t num_pages,
|
|||
bool pt_map_range_current(struct vpn virt, struct ppn phys, uint64_t num_pages,
|
||||
bool writable, bool supervisor, bool global);
|
||||
|
||||
void pt_free(struct ppn root);
|
||||
|
||||
struct mem_range {
|
||||
struct vpn vpn_start;
|
||||
struct pt_entry entry_start;
|
||||
|
|
|
|||
|
|
@ -3,7 +3,8 @@
|
|||
#include "x86_64/asm.h"
|
||||
#include "x86_64/mem.h"
|
||||
|
||||
#include "std.h" // remove later, this is only for interrupt handler
|
||||
#include "std.h"
|
||||
#include "ram.h"
|
||||
|
||||
static uint64_t gdt[3];
|
||||
|
||||
|
|
@ -102,6 +103,24 @@ void init_idt() {
|
|||
|
||||
// --- paging ---
|
||||
|
||||
void init_paging() {
|
||||
// assert paging enabled (PG)
|
||||
ASSERT((get_cr0() >> 31) & 1ull);
|
||||
// assert PAE enabled (this should never be disabled in 64-bit mode)
|
||||
ASSERT((get_cr4() >> 5) & 1ull);
|
||||
// PSE is ignored in long mode
|
||||
// ASSERT((get_cr4() >> 4) & 1ull);
|
||||
|
||||
// TODO check that everything is setup correctly
|
||||
// - CR0.WP
|
||||
// - Long-Mode Active (EFER.LMA)
|
||||
// - PAT index 000 points to default strategy
|
||||
// - See if NX bits are used and decide if we want to
|
||||
// - See of MPK is used and decide if we want to
|
||||
// - SMEP/SMAP?
|
||||
// - ...
|
||||
}
|
||||
|
||||
#define MSR_PAT 0x277
|
||||
|
||||
enum page_attr get_pa(uint8_t index) {
|
||||
|
|
@ -117,21 +136,6 @@ enum page_attr get_pa(uint8_t index) {
|
|||
return value;
|
||||
}
|
||||
|
||||
void set_pa(uint8_t index, enum page_attr attr) {
|
||||
ASSERT(index < 8);
|
||||
ASSERT(attr == PA_UC
|
||||
|| attr == PA_WC
|
||||
|| attr == PA_WT
|
||||
|| attr == PA_WP
|
||||
|| attr == PA_WB
|
||||
|| attr == PA_UCMINUS);
|
||||
uint64_t value = readmsr(MSR_PAT);
|
||||
uint64_t mask = 0x7ull << (index << 3);
|
||||
value &= ~mask; // clear bits
|
||||
value |= (uint64_t)attr << (index << 3);
|
||||
writemsr(MSR_PAT, value);
|
||||
}
|
||||
|
||||
uint64_t pt_entry_pack(const struct pt_entry *ent_in) {
|
||||
uint64_t retval = (uint64_t)ent_in->present
|
||||
| ((uint64_t)ent_in->writable) << 1
|
||||
|
|
@ -257,6 +261,9 @@ static struct ppn get_cr3_ppn(void) {
|
|||
|
||||
#define NUM_LEVELS 4
|
||||
|
||||
// TODO for inspection, we need to accumulate permissions over all levels
|
||||
// (upper supervisor bit will mean lower PTs are also supervisor, even without their bit set)
|
||||
|
||||
static uint64_t *pt_get_leaf_ptr(struct vpn vpn, struct ppn cr3, bool alloc) {
|
||||
uint64_t va_value = va_to_value(va_from_vpn(vpn));
|
||||
int level = NUM_LEVELS;
|
||||
|
|
@ -277,7 +284,22 @@ static uint64_t *pt_get_leaf_ptr(struct vpn vpn, struct ppn cr3, bool alloc) {
|
|||
|
||||
if (!ent.present) {
|
||||
if (alloc) {
|
||||
TODO();
|
||||
bool success = ram_alloc_frame(&ent.ppn, RAM_PAGE_NORMAL);
|
||||
ASSERT(success);
|
||||
ent.level = level;
|
||||
ent.present = true;
|
||||
// maximum privileges in upper level
|
||||
ent.writable = true;
|
||||
ent.supervisor = false;
|
||||
ent.writethrough = false;
|
||||
ent.cache_disable = false;
|
||||
ent.accessed = false;
|
||||
ent.dirty = false;
|
||||
ent.global = false; // TODO should this be true for lower global mappings? probably
|
||||
ent.hugepage = false;
|
||||
ent.pat_bit = false;
|
||||
// add into page table
|
||||
*entry_ptr = pt_entry_pack(&ent);
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -329,6 +351,11 @@ bool pt_map_single(struct vpn virt, struct ppn phys,
|
|||
// disable caching bits because we want to use MTRRs
|
||||
ent.pat_bit = false;
|
||||
|
||||
// additional sanity check to avoid double mapping
|
||||
struct pt_entry ent;
|
||||
pt_entry_unpack(*leaf_ptr, 1, &ent);
|
||||
ASSERT(!ent.present);
|
||||
|
||||
*leaf_ptr = pt_entry_pack(&ent);
|
||||
return true;
|
||||
}
|
||||
|
|
@ -345,6 +372,7 @@ bool pt_map_range(struct vpn virt, struct ppn phys, uint64_t num_pages,
|
|||
{
|
||||
for (uint64_t i = 0; i < num_pages; i++) {
|
||||
// TODO error handling: what to do if it fails in the middle?
|
||||
// TODO huge pages
|
||||
pt_map_single(virt, phys, writable, supervisor, global, cr3);
|
||||
virt = vpn_from_pagenum(vpn_to_pagenum(virt) + 1);
|
||||
phys = ppn_from_pagenum(ppn_to_pagenum(phys) + 1);
|
||||
|
|
@ -358,6 +386,11 @@ bool pt_map_range_current(struct vpn virt, struct ppn phys, uint64_t num_pages,
|
|||
return pt_map_range(virt, phys, num_pages, writable, supervisor, global, get_cr3_ppn());
|
||||
}
|
||||
|
||||
void pt_free(struct ppn root) {
|
||||
// this assumes single ownership
|
||||
TODO();
|
||||
}
|
||||
|
||||
// --- range finder ---
|
||||
|
||||
void mem_range_print(const struct mem_range *mr) {
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue