41 #define SLOTS_ALL_ZERO ((uint64_t) 0) 42 #define SLOTS_FIRST ((uint64_t) 1) 43 #define FIRST_FREE_SLOT(s) ((size_t) __builtin_ctzll(s)) 44 #define FREE_SLOTS(s) ((size_t) __builtin_popcountll(s)) 45 #define ONE_USED_SLOT(slots, empty_slotmask) \ 48 (~(slots) & (empty_slotmask)) & \ 49 ((~(slots) & (empty_slotmask)) - 1) \ 54 static int slab_is_valid(
const struct slab_chain *
const sch)
56 assert(POWEROF2(PAGE_SIZE));
57 assert(POWEROF2(sch->slabsize));
58 assert(POWEROF2(sch->pages_per_alloc));
60 assert(sch->itemcount >= 2 && sch->itemcount <= 64);
61 assert(sch->itemsize >= 1 && sch->itemsize <= SIZE_MAX);
62 assert(sch->pages_per_alloc >= PAGE_SIZE);
63 assert(sch->pages_per_alloc >= sch->slabsize);
65 assert(offsetof(
struct slab_header, data) + sch->itemsize * sch->itemcount <= sch->slabsize);
67 assert(sch->empty_slotmask == ~SLOTS_ALL_ZERO >> (64 - sch->itemcount));
68 assert(sch->initial_slotmask == (sch->empty_slotmask ^ SLOTS_FIRST));
69 assert(sch->alignment_mask == ~(sch->slabsize - 1));
72 { sch->full, sch->empty, sch->partial };
74 for (
size_t head = 0; head < 3; ++head) {
77 for (slab = heads[head]; slab != NULL; slab = slab->next) {
79 assert(slab->prev == NULL);
81 assert(slab->prev == prev);
85 assert(slab->slots == SLOTS_ALL_ZERO);
89 assert(slab->slots == sch->empty_slotmask);
93 assert((slab->slots & ~sch->empty_slotmask) == SLOTS_ALL_ZERO);
94 assert(FREE_SLOTS(slab->slots) >= 1);
95 assert(FREE_SLOTS(slab->slots) < sch->itemcount);
99 if (slab->refcount == 0) {
100 assert((uintptr_t) slab % sch->slabsize == 0);
102 if (sch->slabsize >= PAGE_SIZE)
103 assert((uintptr_t) slab->page % sch->slabsize == 0);
105 assert((uintptr_t) slab->page % PAGE_SIZE == 0);
107 if (sch->slabsize >= PAGE_SIZE)
108 assert((uintptr_t) slab % sch->slabsize == 0);
110 assert((uintptr_t) slab % PAGE_SIZE == 0);
121 struct slab_chain *slab_init(
const size_t itemsize)
123 assert(itemsize >= 1 && itemsize <= SIZE_MAX);
124 assert(POWEROF2(PAGE_SIZE));
127 sch->itemsize = itemsize;
130 const size_t data_offset = offsetof(
struct slab_header, data);
131 const size_t least_slabsize = data_offset + 64 * sch->itemsize;
132 sch->slabsize = (size_t)1 << (
size_t)ceil(log2(least_slabsize));
135 if (sch->slabsize - least_slabsize != 0) {
136 const size_t shrinked_slabsize = sch->slabsize >> 1;
138 if (data_offset < shrinked_slabsize && shrinked_slabsize - data_offset >= 2 * sch->itemsize) {
139 sch->slabsize = shrinked_slabsize;
140 sch->itemcount = (shrinked_slabsize - data_offset) / sch->itemsize;
144 sch->pages_per_alloc = sch->slabsize > PAGE_SIZE ? sch->slabsize : PAGE_SIZE;
146 sch->empty_slotmask = ~SLOTS_ALL_ZERO >> (64 - sch->itemcount);
147 sch->initial_slotmask = sch->empty_slotmask ^ SLOTS_FIRST;
148 sch->alignment_mask = ~(sch->slabsize - 1);
149 sch->partial = sch->empty = sch->full = NULL;
151 assert(slab_is_valid(sch));
156 void *slab_alloc(
struct slab_chain *
const sch)
163 assert(slab_is_valid(sch));
165 if (
likely(sch->partial != NULL)) {
167 register const size_t slot = FIRST_FREE_SLOT(sch->partial->slots);
168 sch->partial->slots ^= SLOTS_FIRST << slot;
170 if (
unlikely(sch->partial->slots == SLOTS_ALL_ZERO)) {
175 if (
likely((sch->partial = sch->partial->next) != NULL))
176 sch->partial->prev = NULL;
178 if (
likely((tmp->next = sch->full) != NULL))
179 sch->full->prev = tmp;
182 ret = sch->full->data + slot * sch->itemsize;
185 ret = sch->partial->data + slot * sch->itemsize;
188 }
else if (
likely((sch->partial = sch->empty) != NULL)) {
190 if (
likely((sch->empty = sch->empty->next) != NULL))
191 sch->empty->prev = NULL;
193 sch->partial->next = NULL;
196 unlikely(sch->partial->refcount != 0) ? sch->partial->refcount++ : sch->partial->page->refcount++;
198 sch->partial->slots = sch->initial_slotmask;
199 ret = sch->partial->data;
203 if (sch->slabsize <= PAGE_SIZE) {
204 sch->partial = mmap(NULL, sch->pages_per_alloc, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
206 if (
unlikely(sch->partial == MAP_FAILED)) {
208 ret = sch->partial = NULL;
212 const int err = posix_memalign((
void **)&sch->partial, sch->slabsize, sch->pages_per_alloc);
215 fprintf(stderr,
"posix_memalign(align=%zu, size=%zu): %d\n", sch->slabsize, sch->pages_per_alloc, err);
217 ret = sch->partial = NULL;
224 const char *
const page_end = (
char *)sch->partial + sch->pages_per_alloc;
230 .c = (
const char *)sch->partial + sch->slabsize
233 __builtin_prefetch(sch->partial, 1);
235 sch->partial->prev = sch->partial->next = NULL;
236 sch->partial->refcount = 1;
237 sch->partial->slots = sch->initial_slotmask;
239 if (
likely(curr.c != page_end)) {
241 curr.s->refcount = 0;
242 curr.s->page = sch->partial;
243 curr.s->slots = sch->empty_slotmask;
244 sch->empty = prev = curr.s;
246 while (
likely((curr.c += sch->slabsize) != page_end)) {
249 curr.s->refcount = 0;
250 curr.s->page = sch->partial;
251 curr.s->slots = sch->empty_slotmask;
258 ret = sch->partial->data;
266 void slab_free(
struct slab_chain *
const sch,
const void *
const addr)
270 assert(slab_is_valid(sch));
276 ((uintptr_t) addr & sch->alignment_mask);
278 register const int slot = ((
char *)addr - (
char *)slab - offsetof(
struct slab_header, data)) / sch->itemsize;
280 if (
unlikely(slab->slots == SLOTS_ALL_ZERO)) {
282 slab->slots = SLOTS_FIRST << slot;
284 if (
likely(slab != sch->full)) {
285 if (
likely((slab->prev->next = slab->next) != NULL))
286 slab->next->prev = slab->prev;
289 }
else if (
likely((sch->full = sch->full->next) != NULL)) {
290 sch->full->prev = NULL;
293 slab->next = sch->partial;
295 if (
likely(sch->partial != NULL))
296 sch->partial->prev = slab;
299 }
else if (
unlikely(ONE_USED_SLOT(slab->slots, sch->empty_slotmask))) {
301 if (
unlikely(slab->refcount == 1 || (slab->refcount == 0 && slab->page->refcount == 1))) {
304 if (
likely(slab != sch->partial)) {
305 if (
likely((slab->prev->next = slab->next) != NULL))
306 slab->next->prev = slab->prev;
308 if (
likely((sch->partial = sch->partial->next) != NULL)) {
309 sch->partial->prev = NULL;
312 void *
const page =
unlikely(slab->refcount != 0) ? slab : slab->page;
313 const char *
const page_end = (
char *)page + sch->pages_per_alloc;
321 for (s.c = page; s.c != page_end; s.c += sch->slabsize) {
326 else if (
likely((s.s->prev->next = s.s->next) != NULL))
327 s.s->next->prev = s.s->prev;
330 if (
unlikely(found_head && (sch->empty = sch->empty->next) != NULL))
331 sch->empty->prev = NULL;
333 if (sch->slabsize <= PAGE_SIZE) {
334 if (
unlikely(munmap(page, sch->pages_per_alloc) == -1))
340 slab->slots = sch->empty_slotmask;
342 if (
likely(slab != sch->partial)) {
343 if (
likely((slab->prev->next = slab->next) != NULL))
344 slab->next->prev = slab->prev;
348 if (
likely((sch->partial = sch->partial->next) != NULL)) {
349 sch->partial->prev = NULL;
352 slab->next = sch->empty;
354 if (
likely(sch->empty != NULL))
355 sch->empty->prev = slab;
359 unlikely(slab->refcount != 0) ? slab->refcount-- : slab->page->refcount--;
363 slab->slots |= SLOTS_FIRST << slot;
370 void slab_destroy(
const struct slab_chain *
const sch)
373 assert(slab_is_valid(sch));
375 struct slab_header *
const heads[] = { sch->partial, sch->empty, sch->full };
376 struct slab_header *pages_head = NULL, *pages_tail;
378 for (
size_t i = 0; i < 3; ++i) {
381 while (slab != NULL) {
382 if (slab->refcount != 0) {
389 pages_tail->next = page;
398 if (
likely(pages_head != NULL)) {
399 pages_tail->next = NULL;
402 if (sch->slabsize <= PAGE_SIZE) {
404 void *
const target = page;
407 if (
unlikely(munmap(target, sch->pages_per_alloc) == -1))
409 }
while (page != NULL);
412 void *
const target = page;
415 }
while (page != NULL);
#define likely(exp)
Optimize the branch as likely taken.
#define spinlock_init(s)
Spinlock initialization.
Core ROOT-Sim functionalities.
Memory Manager main header.
void spin_lock(spinlock_t *s)
#define unlikely(exp)
Optimize the branch as likely not taken.
void spin_unlock(spinlock_t *s)