mirror of
https://github.com/BoredDevNL/BoredOS.git
synced 2026-05-15 18:58:40 +00:00
feat(mem): harden slab allocator and fix paging race conditions
This commit is contained in:
parent
77744464e3
commit
f5a79e451a
4 changed files with 37 additions and 8 deletions
|
|
@ -44,6 +44,7 @@ static MemBlock *block_list = _bootstrap_blocks;
|
||||||
static int block_capacity = BLOCK_LIST_INITIAL_CAPACITY;
|
static int block_capacity = BLOCK_LIST_INITIAL_CAPACITY;
|
||||||
static int block_count = 0;
|
static int block_count = 0;
|
||||||
static bool on_heap = false;
|
static bool on_heap = false;
|
||||||
|
static bool growing = false;
|
||||||
|
|
||||||
static size_t memory_pool_size = 0;
|
static size_t memory_pool_size = 0;
|
||||||
static size_t total_allocated = 0;
|
static size_t total_allocated = 0;
|
||||||
|
|
@ -90,8 +91,12 @@ static uint32_t get_timestamp(void) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool insert_block_at(int idx, void *addr, size_t size, bool allocated, uint32_t id) {
|
static bool insert_block_at(int idx, void *addr, size_t size, bool allocated, uint32_t id) {
|
||||||
if (block_count >= block_capacity && !grow_block_list())
|
// Proactive growth: If we're within 10 slots of full, grow the list now.
|
||||||
return false;
|
// This ensures we always have metadata space to split blocks even during a nested kmalloc.
|
||||||
|
if (block_count >= block_capacity - 10 && !growing) {
|
||||||
|
grow_block_list();
|
||||||
|
}
|
||||||
|
if (block_count >= block_capacity) return false;
|
||||||
for (int j = block_count; j > idx; j--)
|
for (int j = block_count; j > idx; j--)
|
||||||
block_list[j] = block_list[j - 1];
|
block_list[j] = block_list[j - 1];
|
||||||
block_list[idx] = (MemBlock){
|
block_list[idx] = (MemBlock){
|
||||||
|
|
@ -192,7 +197,6 @@ static void _kfree_locked(void *ptr) {
|
||||||
// _kmalloc_locked can call grow_block_list again if the block list fills
|
// _kmalloc_locked can call grow_block_list again if the block list fills
|
||||||
// during the allocation of the new array, causing infinite recursion without this flag.
|
// during the allocation of the new array, causing infinite recursion without this flag.
|
||||||
static bool grow_block_list(void) {
|
static bool grow_block_list(void) {
|
||||||
static bool growing = false;
|
|
||||||
if (growing) return false;
|
if (growing) return false;
|
||||||
growing = true;
|
growing = true;
|
||||||
|
|
||||||
|
|
@ -200,12 +204,17 @@ static bool grow_block_list(void) {
|
||||||
MemBlock *nl = (MemBlock *)_kmalloc_locked((size_t)new_cap * sizeof(MemBlock), 8);
|
MemBlock *nl = (MemBlock *)_kmalloc_locked((size_t)new_cap * sizeof(MemBlock), 8);
|
||||||
if (!nl) { growing = false; return false; }
|
if (!nl) { growing = false; return false; }
|
||||||
|
|
||||||
if (on_heap) _kfree_locked(block_list);
|
|
||||||
mem_memcpy(nl, block_list, (size_t)block_count * sizeof(MemBlock));
|
mem_memcpy(nl, block_list, (size_t)block_count * sizeof(MemBlock));
|
||||||
|
|
||||||
|
MemBlock *old_ptr = block_list;
|
||||||
|
bool old_on_heap = on_heap;
|
||||||
|
|
||||||
block_list = nl;
|
block_list = nl;
|
||||||
block_capacity = new_cap;
|
block_capacity = new_cap;
|
||||||
on_heap = true;
|
on_heap = true;
|
||||||
growing = false;
|
growing = false;
|
||||||
|
|
||||||
|
if (old_on_heap) _kfree_locked(old_ptr);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -350,8 +359,20 @@ static void *slab_alloc(int cls) {
|
||||||
serial_write(" page="); k_itoa_hex((uint64_t)page, b); serial_write(b);
|
serial_write(" page="); k_itoa_hex((uint64_t)page, b); serial_write(b);
|
||||||
serial_write(" fl="); k_itoa_hex((uint64_t)obj, b); serial_write(b);
|
serial_write(" fl="); k_itoa_hex((uint64_t)obj, b); serial_write(b);
|
||||||
serial_write("\n");
|
serial_write("\n");
|
||||||
|
|
||||||
|
// Remove the corrupted page from the list to avoid hitting it again
|
||||||
|
if (cache->pages == page) {
|
||||||
|
cache->pages = page->next;
|
||||||
|
} else {
|
||||||
|
SlabPage *prev = cache->pages;
|
||||||
|
while (prev && prev->next != page) prev = prev->next;
|
||||||
|
if (prev) prev->next = page->next;
|
||||||
|
}
|
||||||
|
|
||||||
page->free_count = 0;
|
page->free_count = 0;
|
||||||
page->freelist = NULL;
|
page->freelist = NULL;
|
||||||
|
page->next = NULL; // Isolate it
|
||||||
|
|
||||||
page = slab_new_page(cls);
|
page = slab_new_page(cls);
|
||||||
if (!page) return NULL;
|
if (!page) return NULL;
|
||||||
page->next = cache->pages;
|
page->next = cache->pages;
|
||||||
|
|
@ -452,6 +473,15 @@ void *kmalloc(size_t size) {
|
||||||
return kmalloc_aligned(size, 8);
|
return kmalloc_aligned(size, 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// kcalloc ensures memory is zeroed, which is critical for many kernel and library
|
||||||
|
// structures (like lwIP PCBs) that assume a null-initialized state.
|
||||||
|
void *kcalloc(size_t n, size_t size) {
|
||||||
|
size_t total = n * size;
|
||||||
|
void *ptr = kmalloc(total);
|
||||||
|
if (ptr) mem_memset(ptr, 0, total);
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
|
||||||
void kfree(void *ptr) {
|
void kfree(void *ptr) {
|
||||||
if (!ptr || !initialized) return;
|
if (!ptr || !initialized) return;
|
||||||
uint64_t rflags = spinlock_acquire_irqsave(&mm_lock);
|
uint64_t rflags = spinlock_acquire_irqsave(&mm_lock);
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@
|
||||||
|
|
||||||
// SLAB_CLASSES and SLAB_MAX_SIZE must stay in sync with the slab_sizes[] table in memory_manager.c.
|
// SLAB_CLASSES and SLAB_MAX_SIZE must stay in sync with the slab_sizes[] table in memory_manager.c.
|
||||||
#define DEFAULT_POOL_SIZE (128 * 1024 * 1024)
|
#define DEFAULT_POOL_SIZE (128 * 1024 * 1024)
|
||||||
#define BLOCK_LIST_INITIAL_CAPACITY 64
|
#define BLOCK_LIST_INITIAL_CAPACITY 1024
|
||||||
#define SLAB_CLASSES 7
|
#define SLAB_CLASSES 7
|
||||||
#define SLAB_MAX_SIZE 512
|
#define SLAB_MAX_SIZE 512
|
||||||
|
|
||||||
|
|
@ -44,6 +44,7 @@ typedef struct {
|
||||||
void memory_manager_init_from_memmap(struct limine_memmap_response *memmap);
|
void memory_manager_init_from_memmap(struct limine_memmap_response *memmap);
|
||||||
|
|
||||||
void* kmalloc(size_t size);
|
void* kmalloc(size_t size);
|
||||||
|
void* kcalloc(size_t n, size_t size);
|
||||||
// alignment must be a power of 2. Requests <= 512 B with alignment <= 8 are served by the slab allocator.
|
// alignment must be a power of 2. Requests <= 512 B with alignment <= 8 are served by the slab allocator.
|
||||||
void* kmalloc_aligned(size_t size, size_t alignment);
|
void* kmalloc_aligned(size_t size, size_t alignment);
|
||||||
void kfree(void *ptr);
|
void kfree(void *ptr);
|
||||||
|
|
|
||||||
|
|
@ -135,9 +135,6 @@ void paging_destroy_user_pml4_phys(uint64_t pml4_phys) {
|
||||||
|
|
||||||
for (int pt_idx = 0; pt_idx < 512; pt_idx++) {
|
for (int pt_idx = 0; pt_idx < 512; pt_idx++) {
|
||||||
if (pt->entries[pt_idx] & PT_PRESENT) {
|
if (pt->entries[pt_idx] & PT_PRESENT) {
|
||||||
uint64_t phys_addr = pt->entries[pt_idx] & PT_ADDR_MASK;
|
|
||||||
extern void kfree(void* ptr);
|
|
||||||
kfree((void*)p2v(phys_addr));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
extern void kfree(void* ptr);
|
extern void kfree(void* ptr);
|
||||||
|
|
|
||||||
|
|
@ -35,6 +35,7 @@ uint64_t paging_create_user_pml4_phys(void);
|
||||||
|
|
||||||
void paging_switch_directory(uint64_t pml4_phys);
|
void paging_switch_directory(uint64_t pml4_phys);
|
||||||
|
|
||||||
|
// Destroys a user page directory, reclaiming all physical memory used for page table structures.
|
||||||
void paging_destroy_user_pml4_phys(uint64_t pml4_phys);
|
void paging_destroy_user_pml4_phys(uint64_t pml4_phys);
|
||||||
|
|
||||||
void paging_init(void);
|
void paging_init(void);
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue