change interfaces to get ranges in buffer instead of printing them

This commit is contained in:
uosfz 2025-03-16 01:13:48 +01:00
parent 3932df92ad
commit 84f80b9029
Signed by: uosfz
SSH key fingerprint: SHA256:FlktuluyhTQg3jHZNLKwxOOC5hbfrUXM0tz3IA3lGJo
3 changed files with 105 additions and 79 deletions

View file

@ -29,6 +29,21 @@ void pt_entry_unpack(uint64_t ent_in, struct pt_entry *ent_out);
bool pt_translate(struct va va, struct ppn cr3, struct pa *pa_out);
bool pt_translate_current(struct va va, struct pa *pa_out);
void pt_entry_print(const struct pt_entry *ent);
void pt_print_ranges();
struct mem_range {
struct vpn vpn_start;
struct pt_entry entry_start;
uint64_t npages;
};
struct mem_range_buf {
struct mem_range *ptr;
uint64_t next_entry;
uint64_t num_entries;
};
void mem_range_print(const struct mem_range *mr);
void pt_get_ranges(struct mem_range_buf *buf_out);
#endif

View file

@ -77,6 +77,8 @@ void check_initrd() {
putln();
}
struct mem_range range_ptr[10];
/******************************************
* Entry point, called by BOOTBOOT Loader *
******************************************/
@ -134,7 +136,11 @@ void _start() {
__asm__("int $0x80" ::);
printf("Test after interrupt\n");
pt_print_ranges();
struct mem_range_buf buf_out = { .ptr = range_ptr, .next_entry = 0, .num_entries = 10 };
pt_get_ranges(&buf_out);
for (uint64_t i = 0; i < buf_out.next_entry; i++) {
mem_range_print(&buf_out.ptr[i]);
}
// get mmapents
for (MMapEnt *mmap_ent = &bootboot.mmap;

View file

@ -145,79 +145,6 @@ void pt_entry_print(const struct pt_entry *ent) {
printf("}\n");
}
#define CR4_LA57 12
static struct vpn arange_vpn_start;
static struct pt_entry arange_entry_start;
static uint64_t arange_npages = 0;
static void arange_print() {
uint64_t virt_canonical = va_to_canonical(va_from_vpn(arange_vpn_start));
uint64_t phys_start = pa_to_value(pa_from_ppn(arange_entry_start.ppn));
printf("virt: %p .. %p\n",
virt_canonical,
virt_canonical + (arange_npages << 12));
printf("phys: %p .. %p\n",
phys_start,
phys_start + (arange_npages << 12));
printf("attr: %c, %c, %s, %s, %s | npages: %lu, size: %lu]\n\n",
arange_entry_start.writable ? 'w' : 'r',
arange_entry_start.supervisor ? 's' : 'u',
arange_entry_start.writethrough ? "wt" : "wb",
arange_entry_start.cache_disable ? "cached:n" : "cached:y",
arange_entry_start.global ? "global:y" : "global:n",
arange_npages,
arange_npages << 12);
}
#define LOWEST_LEVEL(depth, num_levels) ((depth) == (num_levels) - 1)
#define LEAF(depth, num_levels, ent) (LOWEST_LEVEL(depth, num_levels) || (ent).page_attr_table_low)
static void pt_print_ranges_rec(struct ppn ppn, int depth, int num_levels, uint64_t virt_prev) {
struct pt_entry ent;
for (uint64_t i = 0; i < 512; i++) {
uint64_t entry = *(uint64_t*)pa_to_pointer(pa_from_ppn_with_offset(ppn, i * 8));
pt_entry_unpack(entry, &ent);
if (!ent.present) {
continue;
}
uint64_t virt_part = i << (12 + 9*(num_levels - depth - 1));
uint64_t virt_new = virt_prev | virt_part;
if (LEAF(depth, num_levels, ent)) {
struct vpn curr_vpn = vpn_from_aligned_va(va_from_value(virt_new));
// for huge pages this is > 1
uint64_t num_pages_covered = 1ull << (9 * (num_levels - depth - 1));
if (arange_npages == 0) {
arange_vpn_start = curr_vpn;
arange_entry_start = ent;
arange_npages = num_pages_covered;
} else {
if (vpn_to_pagenum(curr_vpn) == vpn_to_pagenum(arange_vpn_start) + arange_npages
&& ppn_to_pagenum(ent.ppn) == ppn_to_pagenum(arange_entry_start.ppn) + arange_npages
&& ent.writable == arange_entry_start.writable
&& ent.supervisor == arange_entry_start.supervisor
&& ent.writethrough == arange_entry_start.writethrough
&& ent.cache_disable == arange_entry_start.cache_disable
&& ent.global == arange_entry_start.global)
// TODO should PAT also be same?
{
arange_npages += num_pages_covered;
} else {
// print last range and start new one
arange_print();
arange_vpn_start = vpn_from_aligned_va(va_from_value(virt_new));
arange_entry_start = ent;
arange_npages = num_pages_covered;
}
}
} else {
pt_print_ranges_rec(ent.ppn, depth + 1, num_levels, virt_new);
}
}
}
static uint64_t *pt_get_leaf_ptr(struct vpn vpn, struct ppn cr3, int num_levels, bool alloc) {
ASSERT(num_levels == 4);
@ -306,7 +233,84 @@ bool pt_map_range(struct vpn virt, struct ppn phys, uint64_t num_pages,
return true;
}
void pt_print_ranges() {
// --- range finder ---
static struct mem_range curr_range;
void mem_range_print(const struct mem_range *mr) {
uint64_t virt_canonical = va_to_canonical(va_from_vpn(mr->vpn_start));
uint64_t phys_start = pa_to_value(pa_from_ppn(mr->entry_start.ppn));
printf("virt: %p .. %p\n",
virt_canonical,
virt_canonical + (mr->npages << 12));
printf("phys: %p .. %p\n",
phys_start,
phys_start + (mr->npages << 12));
printf("attr: %c, %c, %s, %s, %s | npages: %lu, size: %lu\n\n",
mr->entry_start.writable ? 'w' : 'r',
mr->entry_start.supervisor ? 's' : 'u',
mr->entry_start.writethrough ? "wt" : "wb",
mr->entry_start.cache_disable ? "cached:n" : "cached:y",
mr->entry_start.global ? "global:y" : "global:n",
mr->npages,
mr->npages << 12);
}
#define LOWEST_LEVEL(depth, num_levels) ((depth) == (num_levels) - 1)
#define LEAF(depth, num_levels, ent) (LOWEST_LEVEL(depth, num_levels) || (ent).page_attr_table_low)
static void pt_get_ranges_rec(struct ppn ppn, int depth, int num_levels, uint64_t virt_prev,
struct mem_range_buf *buf_out)
{
struct pt_entry ent;
for (uint64_t i = 0; i < 512; i++) {
uint64_t entry = *(uint64_t*)pa_to_pointer(pa_from_ppn_with_offset(ppn, i << 3));
pt_entry_unpack(entry, &ent);
if (!ent.present) {
continue;
}
uint64_t virt_part = i << (12 + 9*(num_levels - depth - 1));
uint64_t virt_new = virt_prev | virt_part;
if (LEAF(depth, num_levels, ent)) {
struct vpn curr_vpn = vpn_from_aligned_va(va_from_value(virt_new));
// for huge pages this is > 1
uint64_t num_pages_covered = 1ull << (9 * (num_levels - depth - 1));
if (curr_range.npages == 0) {
curr_range.vpn_start = curr_vpn;
curr_range.entry_start = ent;
curr_range.npages = num_pages_covered;
} else {
if (vpn_to_pagenum(curr_vpn) == vpn_to_pagenum(curr_range.vpn_start) + curr_range.npages
&& ppn_to_pagenum(ent.ppn) == ppn_to_pagenum(curr_range.entry_start.ppn) + curr_range.npages
&& ent.writable == curr_range.entry_start.writable
&& ent.supervisor == curr_range.entry_start.supervisor
&& ent.writethrough == curr_range.entry_start.writethrough
&& ent.cache_disable == curr_range.entry_start.cache_disable
&& ent.global == curr_range.entry_start.global)
// TODO should PAT also be same?
{
curr_range.npages += num_pages_covered;
} else {
// print last range and start new one
ASSERT(buf_out->next_entry < buf_out->num_entries);
buf_out->ptr[buf_out->next_entry++] = curr_range;
curr_range.vpn_start = curr_vpn;
curr_range.entry_start = ent;
curr_range.npages = num_pages_covered;
}
}
} else {
pt_get_ranges_rec(ent.ppn, depth + 1, num_levels, virt_new, buf_out);
}
}
}
#define CR4_LA57 12
void pt_get_ranges(struct mem_range_buf *buf_out) {
uint64_t cr3;
uint64_t cr4;
__asm__("mov %%cr3, %0" : "=r"(cr3)::);
@ -320,7 +324,8 @@ void pt_print_ranges() {
struct ppn ppn = ppn_from_aligned_pa(pa_from_value(cr3 & 0x000ffffffffff000ull));
pt_print_ranges_rec(ppn, 0, levels, 0);
// print last range
arange_print();
pt_get_ranges_rec(ppn, 0, levels, 0, buf_out);
// last range
ASSERT(buf_out->next_entry < buf_out->num_entries);
buf_out->ptr[buf_out->next_entry++] = curr_range;
}