port: tinyvmem
This commit is contained in:
parent
5bbc163ff3
commit
9c367ca8f1
|
@ -3,26 +3,26 @@
|
|||
-Wextra
|
||||
-Werror
|
||||
-fcolor-diagnostics
|
||||
-Isrc/kernel/klibs
|
||||
-Isrc/kernel/archs
|
||||
-I.cutekit/extern/cute-engineering
|
||||
-Isrc/kernel/klibs/stdc-shim
|
||||
-Isrc/libs
|
||||
-D__ck_freestanding__
|
||||
-I.cutekit/extern/cute-engineering
|
||||
-Isrc/kernel/klibs
|
||||
-Isrc/kernel/klibs/stdc-shim
|
||||
-Isrc/kernel/archs
|
||||
-D__ck_toolchain_value=clang
|
||||
-D__ck_bits_value=64
|
||||
-D__ck_loader_value=limine
|
||||
-D__ck_bits_64__
|
||||
-D__ck_arch_value=x86_64
|
||||
-D__ck_sys_kernel__
|
||||
-D__ck_abi_sysv__
|
||||
-D__ck_freestanding__
|
||||
-D__ck_loader_limine__
|
||||
-D__ck_arch_x86_64__
|
||||
-D__ck_sys_value=kernel
|
||||
-D__ck_abi_value=sysv
|
||||
-D__ck_toolchain_clang__
|
||||
-D__ck_encoding_value=utf8
|
||||
-D__ck_arch_x86_64__
|
||||
-D__ck_arch_value=x86_64
|
||||
-D__ck_toolchain_clang__
|
||||
-D__ck_abi_value=sysv
|
||||
-D__ck_encoding_utf8__
|
||||
-D__ck_sys_value=kernel
|
||||
-D__ck_sys_kernel__
|
||||
-D__ck_loader_value=limine
|
||||
-ffreestanding
|
||||
-fno-stack-protector
|
||||
-mno-80387
|
||||
|
@ -32,4 +32,5 @@
|
|||
-mno-sse2
|
||||
-mno-red-zone
|
||||
-Dauto=__auto_type
|
||||
-g
|
||||
-Isrc/kernel/klibs
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#include <dbg/log.h>
|
||||
#include <hal.h>
|
||||
#include <tinyvmem/tinyvmem.h>
|
||||
|
||||
#include "pmm.h"
|
||||
#include "pre-sched.h"
|
||||
|
|
9
src/kernel/klibs/tinyvmem/manifest.json
Normal file
9
src/kernel/klibs/tinyvmem/manifest.json
Normal file
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"$schema": "https://schemas.cute.engineering/stable/cutekit.manifest.component.v1",
|
||||
"type": "lib",
|
||||
"id": "mem",
|
||||
"requires": [
|
||||
"dbg",
|
||||
"stdc-shim"
|
||||
]
|
||||
}
|
624
src/kernel/klibs/tinyvmem/tinyvmem.c
Normal file
624
src/kernel/klibs/tinyvmem/tinyvmem.c
Normal file
|
@ -0,0 +1,624 @@
|
|||
/*
|
||||
* Public Domain implementation of the VMem Resource Allocator
|
||||
*
|
||||
* See: Adams, A. and Bonwick, J. (2001). Magazines and Vmem: Extending the Slab
|
||||
* Allocator to Many CPUs and Arbitrary Resources.
|
||||
* More implementation details are available in "vmem.h"
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
#include <sys/queue.h>
|
||||
|
||||
#include "tinyvmem.h"
|
||||
|
||||
#ifndef __ck_sys_kernel__
|
||||
# include <assert.h>
|
||||
# include <stdio.h>
|
||||
# include <stdlib.h>
|
||||
# define vmem_printf printf
|
||||
# define ASSERT assert
|
||||
# define vmem_alloc_pages(x) malloc(x * 4096)
|
||||
#else
|
||||
# include <dbg/log.h>
|
||||
# include <hal.h>
|
||||
# include "../../core/pmm.h"
|
||||
# define vmem_printf(...) ((void)0)
|
||||
# define ASSERT(x) (x ? (void)0 : _ASSERT(__FILE__, __LINE__, #x))
|
||||
|
||||
static void _ASSERT(char file[static 1], size_t lineno, char expr[static 1])
|
||||
{
|
||||
error$("Assertion failed: %s:%d: %s", file, lineno, expr);
|
||||
hal_panic();
|
||||
}
|
||||
|
||||
static void *vmem_alloc_pages(size_t x)
|
||||
{
|
||||
PmmObj obj = pmm_alloc(x);
|
||||
if (obj.base == 0)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
return (void *)hal_mmap_l2h(obj.base);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#define ARR_SIZE(x) (sizeof(x) / sizeof(*x))
|
||||
#define VMEM_ADDR_MIN 0
|
||||
#define VMEM_ADDR_MAX (~(uintptr_t)0)
|
||||
|
||||
/* Assuming FREELISTS_N is 64,
|
||||
* we can calculate the freelist index by substracting the leading zero count from 64
|
||||
* For example, the size 4096. clzl(4096) is 51, 64 - 51 is 13.
|
||||
* We then need to substract 1 from 13 because 2^13 equals 8192.
|
||||
*/
|
||||
#define GET_LIST(size) (FREELISTS_N - __builtin_clzl(size) - 1)
|
||||
|
||||
#define VMEM_ALIGNUP(addr, align) \
|
||||
(((addr) + (align)-1) & ~((align)-1))
|
||||
|
||||
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
|
||||
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
|
||||
|
||||
/* We need to keep a global freelist of segments because allocating virtual memory (e.g allocating a segment) requires segments to describe it. (kernel only)
|
||||
In non-kernel code, this is handled by the host `malloc` and `free` standard library functions */
|
||||
static VmemSegment static_segs[128];
|
||||
static VmemSegList free_segs = LIST_HEAD_INITIALIZER(free_segs);
|
||||
static int nfreesegs = 0;
|
||||
|
||||
[[maybe_unused]] static const char *seg_type_str[] = {
|
||||
"allocated",
|
||||
"free",
|
||||
"span",
|
||||
};
|
||||
|
||||
#ifdef __ck_sys_kernel__
|
||||
|
||||
void vmem_lock(void);
|
||||
void vmem_unlock(void);
|
||||
|
||||
#else
|
||||
# define vmem_lock()
|
||||
# define vmem_unlock()
|
||||
#endif
|
||||
|
||||
static VmemSegment *seg_alloc(void)
|
||||
{
|
||||
/* TODO: when bootstrapped, allocate boundary tags dynamically as described in the paper */
|
||||
VmemSegment *vsp;
|
||||
|
||||
vmem_lock();
|
||||
ASSERT(!LIST_EMPTY(&free_segs));
|
||||
vsp = LIST_FIRST(&free_segs);
|
||||
LIST_REMOVE(vsp, seglist);
|
||||
nfreesegs--;
|
||||
vmem_unlock();
|
||||
|
||||
return vsp;
|
||||
}
|
||||
|
||||
static void seg_free(VmemSegment *seg)
|
||||
{
|
||||
vmem_lock();
|
||||
LIST_INSERT_HEAD(&free_segs, seg, seglist);
|
||||
nfreesegs++;
|
||||
vmem_unlock();
|
||||
}
|
||||
|
||||
static int repopulate_segments(void)
|
||||
{
|
||||
struct
|
||||
{
|
||||
VmemSegment segs[64];
|
||||
} *segblock;
|
||||
size_t i;
|
||||
|
||||
if (nfreesegs >= 128)
|
||||
return 0;
|
||||
|
||||
/* Add 64 new segments */
|
||||
segblock = vmem_alloc_pages(1);
|
||||
|
||||
for (i = 0; i < ARR_SIZE(segblock->segs); i++)
|
||||
{
|
||||
seg_free(&segblock->segs[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int seg_fit(VmemSegment *segment, size_t size, size_t align, size_t phase, size_t nocross, uintptr_t minaddr, uintptr_t maxaddr, uintptr_t *addrp)
|
||||
{
|
||||
uintptr_t start, end;
|
||||
ASSERT(size > 0);
|
||||
ASSERT(segment->size >= size);
|
||||
|
||||
start = MAX(segment->base, minaddr);
|
||||
end = MIN(segment->base + segment->size, maxaddr);
|
||||
|
||||
if (start > end)
|
||||
return -VMEM_ERR_NO_MEM;
|
||||
|
||||
/* Phase is the offset from the alignment boundary.
|
||||
* For example, if `start` is 260, `phase` is 8 and align is `64`, we need to do the following calculation:
|
||||
* ALIGN_UP(260 - 8, 64) = 256. 256 + 8 = 264. (264 % 64) is 8 as requested.
|
||||
*/
|
||||
start = VMEM_ALIGNUP(start - phase, align) + phase;
|
||||
|
||||
/* If for some reason, start is smaller than the segment base, we need to ensure start is atleast as big as `align`
|
||||
* This can happen if, for example, we find that `start` is 0 and segment->base is 0x1000.
|
||||
* In this case, align is 0x1000. */
|
||||
if (start < segment->base)
|
||||
{
|
||||
start += align;
|
||||
}
|
||||
|
||||
ASSERT(nocross == 0 && "Not implemented (yet)");
|
||||
|
||||
/* Ensure that `end` is bigger than `start` and we found a segment of the proper size */
|
||||
if (start <= end && (end - start) >= size)
|
||||
{
|
||||
*addrp = start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -VMEM_ERR_NO_MEM;
|
||||
}
|
||||
|
||||
static uint64_t murmur64(uint64_t h)
|
||||
{
|
||||
h ^= h >> 33;
|
||||
h *= 0xff51afd7ed558ccdL;
|
||||
h ^= h >> 33;
|
||||
h *= 0xc4ceb9fe1a85ec53L;
|
||||
h ^= h >> 33;
|
||||
return h;
|
||||
}
|
||||
|
||||
static VmemSegList *hashtable_for_addr(Vmem *vmem, uintptr_t addr)
|
||||
{
|
||||
/* Hash the address and get the remainder */
|
||||
uintptr_t idx = murmur64(addr) % ARR_SIZE(vmem->hashtable);
|
||||
return &vmem->hashtable[idx];
|
||||
}
|
||||
|
||||
static void hashtab_insert(Vmem *vmem, VmemSegment *seg)
|
||||
{
|
||||
LIST_INSERT_HEAD(hashtable_for_addr(vmem, seg->base), seg, seglist);
|
||||
}
|
||||
|
||||
static VmemSegList *freelist_for_size(Vmem *vmem, size_t size)
|
||||
{
|
||||
return &vmem->freelist[GET_LIST(size) - 1];
|
||||
}
|
||||
|
||||
static int vmem_contains(Vmem *vmp, void *address, size_t size)
|
||||
{
|
||||
VmemSegment *seg;
|
||||
uintptr_t start = (uintptr_t)address;
|
||||
uintptr_t end = start + size;
|
||||
|
||||
TAILQ_FOREACH(seg, &vmp->segqueue, segqueue)
|
||||
{
|
||||
if (start >= seg->base && end <= seg->base + seg->size)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void vmem_add_to_freelist(Vmem *vm, VmemSegment *seg)
|
||||
{
|
||||
LIST_INSERT_HEAD(freelist_for_size(vm, seg->size), seg, seglist);
|
||||
}
|
||||
|
||||
static void vmem_insert_segment(Vmem *vm, VmemSegment *seg, VmemSegment *prev)
|
||||
{
|
||||
|
||||
TAILQ_INSERT_AFTER(&vm->segqueue, prev, seg, segqueue);
|
||||
}
|
||||
|
||||
static VmemSegment *vmem_add_internal(Vmem *vmem, void *base, size_t size, bool import)
|
||||
{
|
||||
VmemSegment *newspan, *newfree;
|
||||
|
||||
newspan = seg_alloc();
|
||||
|
||||
ASSERT(newspan);
|
||||
|
||||
newspan->base = (uintptr_t)base;
|
||||
newspan->size = size;
|
||||
newspan->type = SEGMENT_SPAN;
|
||||
newspan->imported = import;
|
||||
|
||||
newfree = seg_alloc();
|
||||
|
||||
ASSERT(newfree);
|
||||
|
||||
newfree->base = (uintptr_t)base;
|
||||
newfree->size = size;
|
||||
newfree->type = SEGMENT_FREE;
|
||||
|
||||
TAILQ_INSERT_TAIL(&vmem->segqueue, newspan, segqueue);
|
||||
vmem_insert_segment(vmem, newfree, newspan);
|
||||
vmem_add_to_freelist(vmem, newfree);
|
||||
|
||||
return newfree;
|
||||
}
|
||||
|
||||
static int vmem_import(Vmem *vmp, size_t size, int vmflag)
|
||||
{
|
||||
void *addr;
|
||||
VmemSegment *new_seg;
|
||||
if (!vmp->alloc)
|
||||
return -VMEM_ERR_NO_MEM;
|
||||
|
||||
addr = vmp->alloc(vmp->source, size, vmflag);
|
||||
|
||||
if (!addr)
|
||||
return -VMEM_ERR_NO_MEM;
|
||||
|
||||
new_seg = vmem_add_internal(vmp, addr, size, true);
|
||||
|
||||
if (!new_seg)
|
||||
{
|
||||
vmp->free(vmp->source, addr, size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmem_init(Vmem *ret, char *name, void *base, size_t size, size_t quantum, VmemAlloc *afunc, VmemFree *ffunc, Vmem *source, size_t qcache_max, int vmflag)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
memcpy(ret->name, name, strlen(name));
|
||||
|
||||
ret->base = base;
|
||||
ret->size = size;
|
||||
ret->quantum = quantum;
|
||||
ret->alloc = afunc;
|
||||
ret->free = ffunc;
|
||||
ret->source = source;
|
||||
ret->qcache_max = qcache_max;
|
||||
ret->vmflag = vmflag;
|
||||
ret->stat.free = size;
|
||||
ret->stat.total += size;
|
||||
ret->stat.in_use = 0;
|
||||
ret->stat.import = 0;
|
||||
|
||||
LIST_INIT(&ret->spanlist);
|
||||
TAILQ_INIT(&ret->segqueue);
|
||||
|
||||
for (i = 0; i < ARR_SIZE(ret->freelist); i++)
|
||||
{
|
||||
LIST_INIT(&ret->freelist[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARR_SIZE(ret->hashtable); i++)
|
||||
{
|
||||
LIST_INIT(&ret->hashtable[i]);
|
||||
}
|
||||
|
||||
/* Add initial span */
|
||||
if (!source && size)
|
||||
vmem_add(ret, base, size, vmflag);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vmem_destroy(Vmem *vmp)
|
||||
{
|
||||
VmemSegment *seg;
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < sizeof(vmp->hashtable) / sizeof(*vmp->hashtable); i++)
|
||||
ASSERT(LIST_EMPTY(&vmp->hashtable[i]));
|
||||
|
||||
TAILQ_FOREACH(seg, &vmp->segqueue, segqueue)
|
||||
{
|
||||
seg_free(seg);
|
||||
}
|
||||
}
|
||||
|
||||
void *vmem_add(Vmem *vmp, void *addr, size_t size, int vmflag)
|
||||
{
|
||||
ASSERT(!vmem_contains(vmp, addr, size));
|
||||
|
||||
vmp->stat.free += size;
|
||||
vmp->stat.total += size;
|
||||
(void)vmflag;
|
||||
return vmem_add_internal(vmp, addr, size, false);
|
||||
}
|
||||
|
||||
void *vmem_xalloc(Vmem *vmp, size_t size, size_t align, size_t phase,
|
||||
size_t nocross, void *minaddr, void *maxaddr, int vmflag)
|
||||
{
|
||||
VmemSegList *first_list = freelist_for_size(vmp, size), *end = &vmp->freelist[FREELISTS_N], *list = NULL;
|
||||
VmemSegment *new_seg = NULL, *new_seg2 = NULL, *seg = NULL;
|
||||
uintptr_t start = 0;
|
||||
void *ret = NULL;
|
||||
|
||||
ASSERT(nocross == 0 && "Not implemented yet");
|
||||
|
||||
/* If we don't want a specific alignment, we can just use the quantum */
|
||||
/* FIXME: What if `align` is not quantum aligned? Maybe add an ASSERT() ? */
|
||||
|
||||
if (align == 0)
|
||||
{
|
||||
align = vmp->quantum;
|
||||
}
|
||||
|
||||
if (!(vmflag & VM_BOOTSTRAP))
|
||||
ASSERT(repopulate_segments() == 0);
|
||||
|
||||
/* Allocate the new segments */
|
||||
/* NOTE: new_seg2 might be unused, in that case, it is freed */
|
||||
new_seg = seg_alloc();
|
||||
new_seg2 = seg_alloc();
|
||||
|
||||
ASSERT(new_seg && new_seg2);
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (vmflag & VM_INSTANTFIT) /* VM_INSTANTFIT */
|
||||
{
|
||||
/* If the size is not a power of two, use freelist[n+1] instead of freelist[n] */
|
||||
if ((size & (size - 1)) != 0)
|
||||
{
|
||||
first_list++;
|
||||
}
|
||||
|
||||
/* We just get the first segment from the list. This ensures constant-time allocation.
|
||||
* Note that we do not need to check the size of the segments because they are guaranteed to be big enough (see freelist_for_size)
|
||||
*/
|
||||
for (list = first_list; list < end; list++)
|
||||
{
|
||||
seg = LIST_FIRST(list);
|
||||
if (seg != NULL)
|
||||
{
|
||||
if (seg_fit(seg, size, align, phase, nocross, (uintptr_t)minaddr, (uintptr_t)maxaddr, &start) == 0)
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
else if (vmflag & VM_BESTFIT) /* VM_BESTFIT */
|
||||
{
|
||||
/* TODO: Should we bother going through the entire list to find the absolute best fit? */
|
||||
|
||||
/* We go through every segment in every list until we find the smallest free segment that can satisfy the allocation */
|
||||
for (list = first_list; list < end; list++)
|
||||
LIST_FOREACH(seg, list, seglist)
|
||||
{
|
||||
if (seg->size >= size)
|
||||
{
|
||||
|
||||
/* Try to make the segment fit */
|
||||
if (seg_fit(seg, size, align, phase, nocross, (uintptr_t)minaddr, (uintptr_t)maxaddr, &start) == 0)
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (vmflag & VM_NEXTFIT)
|
||||
{
|
||||
ASSERT(!"TODO: implement nextfit");
|
||||
}
|
||||
|
||||
if (vmem_import(vmp, size, vmflag) == 0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
ASSERT(!"Allocation failed");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
found:
|
||||
ASSERT(seg != NULL);
|
||||
ASSERT(seg->type == SEGMENT_FREE);
|
||||
ASSERT(seg->size >= size);
|
||||
|
||||
/* Remove the segment from the freelist, it may be added back when modified */
|
||||
LIST_REMOVE(seg, seglist);
|
||||
|
||||
if (seg->base != start)
|
||||
{
|
||||
/* If the start is not the base of the segment, we need to create another segment;
|
||||
* new_seg2 is a free segment that starts at `base` and ends at `start-base`.
|
||||
* We also need to make make `seg` start at `start` and reduce its size.
|
||||
* For example, if we allocate a segment [0x100, 0x1000] in a [0, 0x10000] span, we need to split [0, 0x10000] into
|
||||
* [0x0, 0x100] (free), [0x100, 0x1000] (allocated), [0x1000, 0x10000] (free). In this case, `base` is 0 and `start` is 0x100.
|
||||
* This would create a segment with size 0x100-0 that starts at 0.
|
||||
*/
|
||||
new_seg2->type = SEGMENT_FREE;
|
||||
new_seg2->base = seg->base;
|
||||
new_seg2->size = start - seg->base;
|
||||
|
||||
/* Make `seg` start at `start`, following the example, this would make `(seg->base)` 0x100 */
|
||||
seg->base = start;
|
||||
|
||||
/* Since we offset the segment by `start-(seg->base)`, we need to reduce `seg`'s size */
|
||||
seg->size -= new_seg2->size;
|
||||
|
||||
vmem_add_to_freelist(vmp, new_seg2);
|
||||
|
||||
/* Put this new segment before the allocated segment */
|
||||
vmem_insert_segment(vmp, new_seg2, TAILQ_PREV(seg, VmemSegQueue, segqueue));
|
||||
|
||||
/* Ensure it doesn't get freed */
|
||||
new_seg2 = NULL;
|
||||
}
|
||||
|
||||
ASSERT(seg->base == start);
|
||||
|
||||
if (seg->size != size && (seg->size - size) > vmp->quantum - 1)
|
||||
{
|
||||
|
||||
/* In the case where the segment's size is bigger than the requested size, we need to split the segment into two:
|
||||
* one free part of size `seg->size - size` and another allocated one of size `size`. For example, if we want to allocate [0, 0x1000]
|
||||
* and the segment is [0, 0x10000], we have to create a new segment, [0, 0x1000] and offset the current segment by `size`. Therefore ending up with:
|
||||
* [0, 0x1000] (allocated) [0x1000, 0x10000] */
|
||||
new_seg->type = SEGMENT_ALLOCATED;
|
||||
new_seg->base = seg->base;
|
||||
new_seg->size = size;
|
||||
|
||||
/* Offset the segment */
|
||||
seg->base += size;
|
||||
seg->size -= size;
|
||||
|
||||
/* Add it back to the freelist */
|
||||
vmem_add_to_freelist(vmp, seg);
|
||||
|
||||
/* Put this new allocated segment before the segment */
|
||||
vmem_insert_segment(vmp, new_seg, TAILQ_PREV(seg, VmemSegQueue, segqueue));
|
||||
|
||||
hashtab_insert(vmp, new_seg);
|
||||
}
|
||||
else
|
||||
{
|
||||
seg->type = SEGMENT_ALLOCATED;
|
||||
hashtab_insert(vmp, seg);
|
||||
seg_free(new_seg);
|
||||
new_seg = seg;
|
||||
}
|
||||
|
||||
if (new_seg2 != NULL)
|
||||
seg_free(new_seg2);
|
||||
|
||||
ASSERT(new_seg->size >= size);
|
||||
|
||||
vmp->stat.free -= new_seg->size;
|
||||
vmp->stat.in_use += new_seg->size;
|
||||
|
||||
new_seg->type = SEGMENT_ALLOCATED;
|
||||
|
||||
ret = (void *)new_seg->base;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *vmem_alloc(Vmem *vmp, size_t size, int vmflag)
|
||||
{
|
||||
return vmem_xalloc(vmp, size, 0, 0, 0, (void *)VMEM_ADDR_MIN, (void *)VMEM_ADDR_MAX, vmflag);
|
||||
}
|
||||
|
||||
void vmem_xfree(Vmem *vmp, void *addr, size_t size)
|
||||
{
|
||||
VmemSegment *seg, *neighbor;
|
||||
VmemSegList *list;
|
||||
|
||||
list = hashtable_for_addr(vmp, (uintptr_t)addr);
|
||||
|
||||
LIST_FOREACH(seg, list, seglist)
|
||||
{
|
||||
if (seg->base == (uintptr_t)addr)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(seg->size == size);
|
||||
|
||||
/* Remove the segment from the hashtable */
|
||||
LIST_REMOVE(seg, seglist);
|
||||
|
||||
/* Coalesce to the right */
|
||||
neighbor = TAILQ_NEXT(seg, segqueue);
|
||||
|
||||
if (neighbor && neighbor->type == SEGMENT_FREE)
|
||||
{
|
||||
/* Remove our neighbor since we're merging with it */
|
||||
LIST_REMOVE(neighbor, seglist);
|
||||
|
||||
TAILQ_REMOVE(&vmp->segqueue, neighbor, segqueue);
|
||||
|
||||
seg->size += neighbor->size;
|
||||
|
||||
seg_free(neighbor);
|
||||
}
|
||||
|
||||
/* Coalesce to the left */
|
||||
neighbor = TAILQ_PREV(seg, VmemSegQueue, segqueue);
|
||||
|
||||
if (neighbor->type == SEGMENT_FREE)
|
||||
{
|
||||
LIST_REMOVE(neighbor, seglist);
|
||||
TAILQ_REMOVE(&vmp->segqueue, neighbor, segqueue);
|
||||
|
||||
seg->size += neighbor->size;
|
||||
seg->base = neighbor->base;
|
||||
|
||||
seg_free(neighbor);
|
||||
}
|
||||
|
||||
neighbor = TAILQ_PREV(seg, VmemSegQueue, segqueue);
|
||||
|
||||
ASSERT(neighbor->type == SEGMENT_SPAN || neighbor->type == SEGMENT_ALLOCATED);
|
||||
|
||||
seg->type = SEGMENT_FREE;
|
||||
|
||||
if (vmp->free != NULL && neighbor->type == SEGMENT_SPAN && neighbor->imported == true && neighbor->size == seg->size)
|
||||
{
|
||||
uintptr_t span_addr = seg->base;
|
||||
size_t span_size = seg->size;
|
||||
|
||||
TAILQ_REMOVE(&vmp->segqueue, seg, segqueue);
|
||||
seg_free(seg);
|
||||
TAILQ_REMOVE(&vmp->segqueue, neighbor, segqueue);
|
||||
seg_free(neighbor);
|
||||
|
||||
vmp->free(vmp->source, (void *)span_addr, span_size);
|
||||
}
|
||||
else
|
||||
{
|
||||
vmem_add_to_freelist(vmp, seg);
|
||||
}
|
||||
|
||||
vmp->stat.in_use -= size;
|
||||
vmp->stat.free += size;
|
||||
}
|
||||
|
||||
void vmem_free(Vmem *vmp, void *addr, size_t size)
|
||||
{
|
||||
vmem_xfree(vmp, addr, size);
|
||||
}
|
||||
|
||||
void vmem_dump(Vmem *vmp)
|
||||
{
|
||||
VmemSegment *span;
|
||||
size_t i;
|
||||
|
||||
vmem_printf("-- VMem arena \"%s\" segments -- \n", vmp->name);
|
||||
|
||||
TAILQ_FOREACH(span, &vmp->segqueue, segqueue)
|
||||
{
|
||||
vmem_printf("[0x%lx, 0x%lx] (%s)",
|
||||
span->base, span->base + span->size, seg_type_str[span->type]);
|
||||
if (span->imported)
|
||||
vmem_printf("(imported)");
|
||||
vmem_printf("\n");
|
||||
}
|
||||
|
||||
vmem_printf("Hashtable:\n ");
|
||||
|
||||
for (i = 0; i < ARR_SIZE(vmp->hashtable); i++)
|
||||
LIST_FOREACH(span, &vmp->hashtable[i], seglist)
|
||||
{
|
||||
vmem_printf("%lx: [address: %p, size %p]\n", murmur64(span->base), (void *)span->base, (void *)span->size);
|
||||
}
|
||||
vmem_printf("Stat:\n");
|
||||
vmem_printf("- in_use: %ld\n", vmp->stat.in_use);
|
||||
vmem_printf("- free: %ld\n", vmp->stat.free);
|
||||
vmem_printf("- total: %ld\n", vmp->stat.total);
|
||||
}
|
||||
|
||||
void vmem_bootstrap(void)
|
||||
{
|
||||
size_t i;
|
||||
for (i = 0; i < ARR_SIZE(static_segs); i++)
|
||||
{
|
||||
seg_free(&static_segs[i]);
|
||||
}
|
||||
}
|
157
src/kernel/klibs/tinyvmem/tinyvmem.h
Normal file
157
src/kernel/klibs/tinyvmem/tinyvmem.h
Normal file
|
@ -0,0 +1,157 @@
|
|||
/* Implementation of the VMem resource allocator
|
||||
as described in https://www.usenix.org/legacy/event/usenix01/full_papers/bonwick/bonwick.pdf
|
||||
*/
|
||||
|
||||
#ifndef _VMEM_H
|
||||
#define _VMEM_H
|
||||
#include <limits.h>
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <sys/queue.h>
|
||||
|
||||
/* Directs vmem to use the smallest
|
||||
free segment that can satisfy the allocation. This
|
||||
policy tends to minimize fragmentation of very
|
||||
small, precious resources (cited from paper) */
|
||||
#define VM_BESTFIT (1 << 0)
|
||||
|
||||
/* Directs vmem to provide a
|
||||
good approximation to best−fit in guaranteed
|
||||
constant time. This is the default allocation policy. (cited from paper) */
|
||||
#define VM_INSTANTFIT (1 << 1)
|
||||
|
||||
/* Directs vmem to use the next free
|
||||
segment after the one previously allocated. This is
|
||||
useful for things like process IDs, where we want
|
||||
to cycle through all the IDs before reusing them. (cited from paper) */
|
||||
#define VM_NEXTFIT (1 << 2)
|
||||
|
||||
#define VM_SLEEP (1 << 3)
|
||||
#define VM_NOSLEEP (1 << 4)
|
||||
|
||||
/* Used to eliminate cyclic dependencies when refilling the segment freelist:
|
||||
We need to allocate new segments but to allocate new segments, we need to refill the list, this flag ensures that no refilling occurs. */
|
||||
#define VM_BOOTSTRAP (1 << 5)
|
||||
|
||||
#define VMEM_ERR_NO_MEM 1
|
||||
|
||||
struct vmem;
|
||||
|
||||
/* Vmem allows one arena to import its resources from
|
||||
another. vmem_create() specifies the source arena,
|
||||
and the functions to allocate and free from that source. The arena imports new spans as needed, and gives
|
||||
them back when all their segments have been freed. (cited from paper) These types describe those functions.
|
||||
*/
|
||||
typedef void *VmemAlloc(struct vmem *vmem, size_t size, int flags);
|
||||
typedef void VmemFree(struct vmem *vmem, void *addr, size_t size);
|
||||
|
||||
/* We can't use boundary tags because the resource we're managing is not necessarily memory.
|
||||
To counter this, we can use *external boundary tags*. For each segment in the arena
|
||||
we allocate a boundary tag to manage it. */
|
||||
|
||||
/* sizeof(void *) * CHAR_BIT (8) freelists provides us with a freelist for every power-of-2 length that can fit within the host's virtual address space (64 bit) */
|
||||
#define FREELISTS_N sizeof(void *) * CHAR_BIT
|
||||
#define HASHTABLES_N 16
|
||||
|
||||
typedef struct vmem_segment
|
||||
{
|
||||
enum
|
||||
{
|
||||
SEGMENT_ALLOCATED,
|
||||
SEGMENT_FREE,
|
||||
SEGMENT_SPAN
|
||||
} type;
|
||||
|
||||
bool imported; /* Non-zero if imported */
|
||||
|
||||
uintptr_t base; /* base address of the segment */
|
||||
uintptr_t size; /* size of the segment */
|
||||
|
||||
/* clang-format off */
|
||||
TAILQ_ENTRY(vmem_segment) segqueue; /* Points to Vmem::segqueue */
|
||||
LIST_ENTRY(vmem_segment) seglist; /* If free, points to Vmem::freelist, if allocated, points to Vmem::hashtable, else Vmem::spanlist */
|
||||
/* clang-format on */
|
||||
|
||||
} VmemSegment;
|
||||
|
||||
typedef LIST_HEAD(VmemSegList, vmem_segment) VmemSegList;
|
||||
typedef TAILQ_HEAD(VmemSegQueue, vmem_segment) VmemSegQueue;
|
||||
|
||||
/* Statistics about a Vmem arena, NOTE: this isn't described in the original paper and was added by me. Inspired by Illumos and Solaris'vmem_kstat_t */
|
||||
typedef struct
|
||||
{
|
||||
size_t in_use; /* Memory in use */
|
||||
size_t import; /* Imported memory */
|
||||
size_t total; /* Total memory in the area */
|
||||
size_t alloc; /* Number of allocations */
|
||||
size_t free; /* Number of frees */
|
||||
} VmemStat;
|
||||
|
||||
/* Description of an arena, a collection of resources. An arena is simply a set of integers. */
|
||||
typedef struct vmem
|
||||
{
|
||||
char name[64]; /* Descriptive name for debugging purposes */
|
||||
void *base; /* Start of initial span */
|
||||
size_t size; /* Size of initial span */
|
||||
size_t quantum; /* Unit of currency */
|
||||
VmemAlloc *alloc; /* Import alloc function */
|
||||
VmemFree *free; /* Import free function */
|
||||
struct vmem *source; /* Import arena */
|
||||
size_t qcache_max; /* Maximum size to cache */
|
||||
int vmflag; /* VM_SLEEP or VM_NOSLEEP */
|
||||
|
||||
VmemSegQueue segqueue;
|
||||
VmemSegList freelist[FREELISTS_N]; /* Power of two freelists. Freelists[n] contains all free segments whose sizes are in the range [2^n, 2^n+1] */
|
||||
VmemSegList hashtable[HASHTABLES_N]; /* Allocated segments */
|
||||
VmemSegList spanlist; /* Span marker segments */
|
||||
|
||||
VmemStat stat;
|
||||
} Vmem;
|
||||
|
||||
/* Initializes a vmem arena (no malloc) */
|
||||
int vmem_init(Vmem *vmem, char *name, void *base, size_t size, size_t quantum, VmemAlloc *afunc, VmemFree *ffunc, Vmem *source, size_t qcache_max, int vmflag);
|
||||
|
||||
/* Destroys arena `vmp` */
|
||||
void vmem_destroy(Vmem *vmp);
|
||||
|
||||
/* Allocates size bytes from vmp. Returns the allocated address on success, NULL on failure.
|
||||
vmem_alloc() fails only if vmflag specifies VM_NOSLEEP and no resources are currently available.
|
||||
vmflag may also specify an allocation policy (VM_BESTFIT, VM_INSTANTFIT, or VM_NEXTFIT).
|
||||
If no policy is specified the default is VM_INSTANTFIT, which provides a good
|
||||
approximation to best−fit in guaranteed constant time. (cited from paper) */
|
||||
void *vmem_alloc(Vmem *vmp, size_t size, int vmflag);
|
||||
|
||||
/* Frees `size` bytes at address `addr` in arena `vmp` */
|
||||
void vmem_free(Vmem *vmp, void *addr, size_t size);
|
||||
|
||||
/*
|
||||
Allocates size bytes at offset phase from an align boundary such that the resulting segment
|
||||
[addr, addr + size) is a subset of [minaddr, maxaddr) that does not straddle a nocross−
|
||||
aligned boundary. vmflag is as above. One performance caveat: if either minaddr or maxaddr is
|
||||
non−NULL, vmem may not be able to satisfy the allocation in constant time. If allocations within a
|
||||
given [minaddr, maxaddr) range are common it is more efficient to declare that range to be its own
|
||||
arena and use unconstrained allocations on the new arena (cited from paper).
|
||||
*/
|
||||
void *vmem_xalloc(Vmem *vmp, size_t size, size_t align, size_t phase,
|
||||
size_t nocross, void *minaddr, void *maxaddr, int vmflag);
|
||||
|
||||
/*
|
||||
Frees size bytes at addr, where addr was a constrained allocation. vmem_xfree() must be used if
|
||||
the original allocation was a vmem_xalloc() because both routines bypass the quantum caches. (Cited from paper)
|
||||
*/
|
||||
void vmem_xfree(Vmem *vmp, void *addr, size_t size);
|
||||
|
||||
/* Adds the span [addr, addr + size) to arena vmp. Returns addr on success, NULL on failure.
|
||||
vmem_add() will fail only if vmflag is VM_NOSLEEP and no resources are currently available. (cited from paper) */
|
||||
void *vmem_add(Vmem *vmp, void *addr, size_t size, int vmflag);
|
||||
|
||||
/* Dumps the arena `vmp` using the `kprintf` function */
|
||||
void vmem_dump(Vmem *vmp);
|
||||
|
||||
/* Initializes Vmem */
|
||||
void vmem_bootstrap(void);
|
||||
|
||||
void vmem_set_page_alloc(void *(*func)(size_t));
|
||||
|
||||
#endif
|
Loading…
Reference in a new issue