Compare commits

...

13 commits

11 changed files with 540 additions and 0 deletions

2
.gitignore vendored
View file

@ -4,3 +4,5 @@ build/
drive.img
/compile_commands.json
/opt
/.cache
/.clang-format

View file

@ -34,6 +34,8 @@ KERNEL_SOURCES := \
src/tar.c \
src/time.c \
src/std.c \
src/slab.c \
src/nodevec.c \
$(KERNEL_SOURCES_$(ARCH)) \
# end of kernel sources list

View file

@ -48,6 +48,8 @@ struct pa pa_from_pt_with_idx(struct ppn ppn, unsigned int idx);
uint64_t pa_to_value(struct pa pa);
void *pa_to_pointer(struct pa pa);
uint64_t pa_offset(struct pa pa);
// only pass pointers here that have been created with `pa_to_pointer`
struct pa pa_from_pointer(void *ptr);
struct ppn ppn_from_pagenum(uint64_t pagenum);
struct ppn ppn_from_aligned_pa(struct pa pa);

51
include/nodevec.h Normal file
View file

@ -0,0 +1,51 @@
#ifndef KARLOS_NODEVEC_H
#define KARLOS_NODEVEC_H
#include <stddef.h>
#include <stdbool.h>
struct nvec_node {
struct nvec_node *prev;
struct nvec_node *next;
size_t len;
size_t cap;
};
struct nvec {
struct nvec_node *head;
struct nvec_node *tail;
size_t obj_size;
size_t len;
};
/// Initializes an empty `nvec`.
void nvec_init(struct nvec *nv, size_t obj_size);
/// Removes all elements from the `nvec` and frees associated memory.
void nvec_clear(struct nvec *nv);
/// Returns the number of elements in the `nvec`.
size_t nvec_len(const struct nvec *nv);
/// Returns a pointer to the element at `idx`, or `NULL` if the index is invalid.
void *nvec_get(struct nvec *nv, size_t idx);
/// Reserves space for a new element at the end of the `nvec` and returns a pointer to it.
void *nvec_push(struct nvec *nv);
/// Copies the last element to `obj_buffer`, and removes it from the `nvec`.
/// The `obj_buffer` may be `NULL`, in which case the element is simply removed.
/// Returns whether an element has been popped.
bool nvec_pop(struct nvec *nv, void *obj_buffer);
// --- invariant checking ---
void nvec_check(const struct nvec *nv);
// --- iteration ---
struct nvec_iter {
struct nvec_node *curr_node;
size_t obj_size;
size_t idx;
};
void nvec_iter_init(struct nvec_iter *it, struct nvec *nv);
void *nvec_iter_next(struct nvec_iter *it);
#endif

View file

@ -7,6 +7,9 @@
#include <stdbool.h>
#include <address.h>
#define PAGE_SIZE 4096
#define PAGE_BASE(ptr) (void *)((intptr_t)(ptr) & ~(intptr_t)0xfff)
enum frame_size {
RAM_PAGE_NORMAL = 0,
RAM_PAGE_LARGE = 1,
@ -28,4 +31,7 @@ bool ram_alloc_buffer(struct ppn *ppn_out, struct ram_buffer_requirements req);
bool ram_alloc_buffer_zeroed(struct ppn *ppn_out, struct ram_buffer_requirements req);
void ram_free(struct ppn ppn);
// convenience interfaces
void *ram_alloc_page_zeroed_asserted();
#endif

60
include/slab.h Normal file
View file

@ -0,0 +1,60 @@
#ifndef KARLOS_SLAB_H
#define KARLOS_SLAB_H
#include <paging.h>
#include <stddef.h>
#include <stdint.h>
/// Per-cache descriptor: one cache per object kind
/// Usually this is allocated statically somewhere.
struct slab_cache {
size_t obj_size; // bytes per object
struct slab *empty_slabs; // slabs with all slots free
struct slab *partial_slabs; // slabs with some free slots
struct slab *full_slabs; // slabs with no free slots
void (*constructor)(void *obj);
void (*destructor)(void *obj);
};
/// Per-slab descriptor.
/// Lives directly at the top of the page which it describes.
struct slab {
struct free_stack *free_stack; // stack of free slots in this page
size_t free_count; // how many slots are free right now
struct slab_cache *cache; // backwards link to the containing cache
struct slab *next; // link to next slab_page in cache list
uint64_t magic; // guard against objects outside of slab
};
/// Linking data structure.
struct free_stack {
uint64_t magic; // guard against buffer overflows
struct free_stack *next;
};
/// Initializes the cache, without allocating any memory.
/// `obj_size` must be > 0.
/// constructor and destructor may be `NULL`, in which case allocated objects are not initialized.
void slab_cache_init(
struct slab_cache *cache,
size_t obj_size,
void (*constructor)(void *),
void (*destructor)(void *));
/// Allocates one element from the cache.
/// Panics on memory exhaustion. Never returns `NULL`.
void *slab_cache_alloc(struct slab_cache *cache);
/// Frees an element previously allocated with `slab_cache_alloc`.
/// Panics if `obj` is `NULL`.
void slab_cache_free(void *obj);
/// Cleans up and returns to kernel all empty pages the cache currently holds.
/// Partial and full pages remain untouched.
void slab_cache_return_unused_mem(struct slab_cache *cache);
/// Cleans up and returns to kernel all empty pages the cache currently holds.
/// Panics if the cache is not empty.
void slab_cache_destroy(struct slab_cache *cache);
#endif /* KARLOS_SLAB_H */

View file

@ -37,6 +37,8 @@
#include "console.h"
#include "cpu.h"
#include "paging.h"
#include "slab.h"
#include "nodevec.h"
#include "x86_64/cmos.h"
#include "x86_64/ps2_driver.h"

177
src/nodevec.c Normal file
View file

@ -0,0 +1,177 @@
#include "nodevec.h"
#include "address.h"
#include "ram.h"
#include "std.h"
void nvec_init(struct nvec *nv, size_t obj_size) {
ASSERT(nv != NULL);
ASSERT(obj_size > 0);
ASSERT(obj_size < PAGE_SIZE - sizeof(struct nvec_node));
nv->head = NULL;
nv->tail = NULL;
nv->obj_size = obj_size;
nv->len = 0;
}
void nvec_clear(struct nvec *nv) {
struct nvec_node *curr_node = nv->head;
while (curr_node != NULL) {
struct nvec_node *next = curr_node->next;
ram_free(ppn_from_aligned_pa(pa_from_pointer(curr_node)));
curr_node = next;
}
nv->head = NULL;
nv->tail = NULL;
nv->len = 0;
}
size_t nvec_len(const struct nvec *nv) {
ASSERT(nv != NULL);
return nv->len;
}
static void *node_get(struct nvec_node *node, size_t obj_size, size_t idx) {
ASSERT(node != NULL);
ASSERT(idx < node->len);
return (char*)node + sizeof(struct nvec_node) + idx * obj_size;
}
static bool nvec_goto(struct nvec *nv, size_t idx, struct nvec_node **node_out, size_t *idx_out) {
ASSERT(nv != NULL);
ASSERT(node_out != NULL);
ASSERT(idx_out != NULL);
size_t len_seen = 0;
for (struct nvec_node *curr_node = nv->head; curr_node != NULL; curr_node = curr_node->next) {
if (idx < len_seen + curr_node->len) {
*node_out = curr_node;
*idx_out = idx - len_seen;
return true;
}
len_seen += curr_node->len;
}
return false;
}
void *nvec_get(struct nvec *nv, size_t idx) {
ASSERT(nv != NULL);
struct nvec_node *node;
size_t inner_idx;
if (nvec_goto(nv, idx, &node, &inner_idx)) {
return node_get(node, nv->obj_size, inner_idx);
} else {
return NULL;
}
}
static struct nvec_node *node_new(size_t obj_size) {
struct nvec_node *node = ram_alloc_page_zeroed_asserted();
node->prev = NULL;
node->next = NULL;
node->len = 0;
node->cap = (PAGE_SIZE - sizeof(struct nvec_node)) / obj_size;
return node;
}
/// Only call this method if node is not full.
static void *node_push(struct nvec_node *node, size_t obj_size) {
ASSERT(node != NULL);
ASSERT(node->len < node->cap);
void *pos = (char*)node + sizeof(struct nvec_node) + node->len * obj_size;
node->len += 1;
return pos;
}
void *nvec_push(struct nvec *nv) {
ASSERT(nv != NULL);
if (nv->tail == NULL) {
ASSERT(nv->head == NULL);
ASSERT(nv->len == 0);
struct nvec_node *node = node_new(nv->obj_size);
nv->head = nv->tail = node;
} else if (nv->tail->len == nv->tail->cap) {
struct nvec_node *node = node_new(nv->obj_size);
node->prev = nv->tail;
node->next = NULL;
nv->tail->next = node;
nv->tail = node;
}
nv->len += 1;
return node_push(nv->tail, nv->obj_size);
}
bool nvec_pop(struct nvec *nv, void *obj_buffer) {
ASSERT(nv != NULL);
if (nv->tail == NULL) {
return false;
}
ASSERT(nv->tail->len > 0);
if (obj_buffer != NULL) {
memcpy(obj_buffer, node_get(nv->tail, nv->obj_size, nv->tail->len - 1), nv->obj_size);
}
struct nvec_node *tail = nv->tail;
nv->len -= 1;
nv->tail->len -= 1;
if (nv->tail->len == 0) {
if (nv->tail->prev != NULL) {
nv->tail->prev->next = NULL;
}
nv->tail = nv->tail->prev;
if (nv->tail == NULL) {
nv->head = NULL;
}
ram_free(ppn_from_aligned_pa(pa_from_pointer(tail)));
}
return true;
}
// --- invariant checking ---
void nvec_check(const struct nvec *nv) {
ASSERT(nv != NULL);
ASSERT(nv->obj_size <= PAGE_SIZE - sizeof(struct nvec_node));
if (nv->head == NULL || nv->tail == NULL || nv->len == 0) {
ASSERT(nv->head == NULL);
ASSERT(nv->tail == NULL);
ASSERT(nv->len == 0);
}
size_t cnt = 0;
struct nvec_node *prev = NULL;
for (struct nvec_node *curr_node = nv->head; curr_node != NULL; curr_node = curr_node->next) {
cnt += curr_node->len;
ASSERT(curr_node->prev == prev);
prev = curr_node;
}
ASSERT(prev == nv->tail);
ASSERT(cnt == nv->len);
}
// --- iteration ----
void nvec_iter_init(struct nvec_iter *it, struct nvec *nv) {
ASSERT(it != NULL);
ASSERT(nv != NULL);
it->curr_node = nv->head;
it->obj_size = nv->obj_size;
it->idx = 0;
}
void *nvec_iter_next(struct nvec_iter *it) {
ASSERT(it != NULL);
if (it->curr_node == NULL) {
return NULL;
}
if (it->idx == it->curr_node->len) {
it->curr_node = it->curr_node->next;
it->idx = 0;
}
if (it->curr_node == NULL) {
return NULL;
}
void *pos = node_get(it->curr_node, it->obj_size, it->idx);
it->idx += 1;
return pos;
}

View file

@ -1,3 +1,4 @@
#include "address.h"
#include "std.h"
#include "ram.h"
#include "bootboot.h"
@ -433,3 +434,10 @@ ram_init(void)
struct ppn ppn_ignored;
ram_alloc_frame(&ppn_ignored, RAM_PAGE_NORMAL);
}
// convenience functions
void *ram_alloc_page_zeroed_asserted() {
struct ppn ppn;
ASSERT(ram_alloc_frame_zeroed(&ppn, RAM_PAGE_NORMAL));
return pa_to_pointer(pa_from_ppn(ppn));
}

225
src/slab.c Normal file
View file

@ -0,0 +1,225 @@
#include "slab.h"
#include "address.h"
#include "ram.h"
#include "std.h"
#define EFF_SPACE_IN_SLAB (PAGE_SIZE - sizeof(struct slab))
#define SLOT_SIZE(obj_size) ((obj_size) + sizeof(struct free_stack))
#define SLOTS_PER_SLAB(obj_size) (EFF_SPACE_IN_SLAB / SLOT_SIZE(obj_size))
#define ROUND_UP_8(sz) (((sz) + 7) & ~(size_t)0x7)
void slab_cache_init(
struct slab_cache *cache,
size_t obj_size,
void (*constructor)(void *),
void (*destructor)(void *))
{
ASSERT(cache != NULL);
ASSERT(obj_size > 0);
ASSERT(SLOT_SIZE(obj_size) <= EFF_SPACE_IN_SLAB);
cache->obj_size = ROUND_UP_8(obj_size);
cache->empty_slabs = NULL;
cache->partial_slabs = NULL;
cache->full_slabs = NULL;
cache->constructor = constructor;
cache->destructor = destructor;
}
#define SLAB_MAGIC 0x736c61626d616765
#define STACK_MAGIC 0x686920737461636b
static void slab_cache_new_slab(struct slab_cache *cache) {
ASSERT(cache != NULL);
void *raw = ram_alloc_page_zeroed_asserted();
struct slab *as_slab = raw;
as_slab->free_count = SLOTS_PER_SLAB(cache->obj_size);
// build the free-slot stack in the bytes after the header
char *slots_base = (char *)raw + sizeof(struct slab);
ASSERT((intptr_t)slots_base % 8 == 0);
struct free_stack *head = NULL;
for (size_t i = 0; i < as_slab->free_count; i++) {
// the stack structure lives after the corresponding object
void *obj = slots_base + i * SLOT_SIZE(cache->obj_size);
if (cache->constructor) {
cache->constructor(obj);
}
struct free_stack *node = (struct free_stack*)((char*)obj + cache->obj_size);
node->magic = STACK_MAGIC;
node->next = head;
head = node;
}
as_slab->free_stack = head;
as_slab->cache = cache;
as_slab->magic = SLAB_MAGIC;
// link this new slab_page into the cache's empty_slabs list
as_slab->next = cache->empty_slabs;
cache->empty_slabs = as_slab;
}
static void *slab_obj_alloc(struct slab *slab) {
ASSERT(slab != NULL);
ASSERT(slab->free_stack != NULL && slab->free_count > 0);
slab->free_count--;
void *free = slab->free_stack;
slab->free_stack = slab->free_stack->next;
// move to beginning of object
return (char*)free - slab->cache->obj_size;
}
void *slab_cache_alloc(struct slab_cache *cache) {
ASSERT(cache != NULL);
if (cache->partial_slabs != NULL) {
struct slab *slab = cache->partial_slabs;
void *obj = slab_obj_alloc(slab);
// if that drained the slab, move it to the full list
if (slab->free_stack == NULL) {
cache->partial_slabs = slab->next;
slab->next = cache->full_slabs;
cache->full_slabs = slab;
}
return obj;
}
if (cache->empty_slabs == NULL) {
slab_cache_new_slab(cache);
}
struct slab *slab = cache->empty_slabs;
// move previously empty slab to partial or full list
cache->empty_slabs = slab->next;
if (SLOTS_PER_SLAB(cache->obj_size) > 1) {
slab->next = cache->partial_slabs;
cache->partial_slabs = slab;
} else {
slab->next = cache->full_slabs;
cache->full_slabs = slab;
}
return slab_obj_alloc(slab);
}
static void slab_cache_remove_slab_from_list(
struct slab_cache *cache,
struct slab *slab,
struct slab **list_ptr)
{
ASSERT(cache != NULL);
ASSERT(slab != NULL);
ASSERT(list_ptr != NULL);
struct slab *prev = NULL;
for (struct slab *curr = *list_ptr; curr != NULL; curr = curr->next) {
if (curr == slab) {
if (prev != NULL) {
prev->next = slab->next;
} else {
// this was the very first slab
*list_ptr = slab->next;
}
return;
}
prev = curr;
}
UNREACHABLE();
}
static void slab_obj_free(struct slab *slab, void *obj) {
ASSERT(slab != NULL);
ASSERT(obj != NULL);
ASSERT(slab->free_count < SLOTS_PER_SLAB(slab->cache->obj_size));
ASSERT(PAGE_BASE(obj) == (void*)slab);
struct free_stack *node = (struct free_stack *)((char*)obj + slab->cache->obj_size);
ASSERT(node->magic == STACK_MAGIC);
// NOTE: no memset(0) the memory because of constructor/destructor stuff
node->next = slab->free_stack;
slab->free_stack = node;
slab->free_count += 1;
}
static void slab_cache_return_slab_to_kernel(
struct slab_cache *cache,
struct slab *slab)
{
ASSERT(cache != NULL);
ASSERT(slab != NULL);
ASSERT(slab->free_count == SLOTS_PER_SLAB(cache->obj_size));
char *slots_base = (char *)slab + sizeof(struct slab);
ASSERT((intptr_t)slots_base % 8 == 0);
for (size_t i = 0; i < slab->free_count; i++) {
void *obj = slots_base + i * SLOT_SIZE(cache->obj_size);
if (cache->destructor) {
cache->destructor(obj);
}
}
ram_free(ppn_from_aligned_pa(pa_from_pointer(slab)));
}
void slab_cache_free(void *obj) {
ASSERT(obj != NULL);
ASSERT((intptr_t)obj % 8 == 0);
struct slab *slab = PAGE_BASE(obj);
ASSERT(slab->magic == SLAB_MAGIC);
bool was_partial_before = slab->free_stack != NULL;
slab_obj_free(slab, obj);
// slab is either full or partial
// afterwards it's either partial or empty
if (slab->free_count == SLOTS_PER_SLAB(slab->cache->obj_size)) {
// empty now
if (was_partial_before) {
slab_cache_remove_slab_from_list(slab->cache, slab, &slab->cache->partial_slabs);
} else {
slab_cache_remove_slab_from_list(slab->cache, slab, &slab->cache->full_slabs);
}
// algorithm: we keep at most one empty slab, anything beyond that
// we return to the kernel immediately
if (slab->cache->empty_slabs != NULL) {
slab_cache_return_slab_to_kernel(slab->cache, slab);
} else {
slab->next = NULL;
slab->cache->empty_slabs = slab;
}
} else {
// partial now
if (!was_partial_before) {
slab_cache_remove_slab_from_list(slab->cache, slab, &slab->cache->full_slabs);
slab->next = slab->cache->partial_slabs;
slab->cache->partial_slabs = slab;
}
}
}
void slab_cache_return_unused_mem(struct slab_cache *cache) {
ASSERT(cache != NULL);
struct slab *slab = cache->empty_slabs;
while (slab != NULL) {
struct slab *next_slab = slab->next; // read next pointer before freeing mem
slab_cache_return_slab_to_kernel(cache, slab);
slab = next_slab;
}
}
void slab_cache_destroy(struct slab_cache *cache) {
ASSERT(cache != NULL);
ASSERT(cache->partial_slabs == NULL);
ASSERT(cache->full_slabs == NULL);
slab_cache_return_unused_mem(cache);
}

View file

@ -125,6 +125,11 @@ uint64_t pa_offset(struct pa pa) {
return pa.value & 0xfff;
}
struct pa pa_from_pointer(void *ptr) {
struct va va = va_from_canonical((uint64_t) ptr);
return pa_from_value(va_to_value(va) - identity_mapping_start);
}
struct ppn ppn_from_pagenum(uint64_t pagenum) {
ASSERT(pagenum < (1ull << 24));
return (struct ppn){ .pagenum = pagenum };