43 #define SLOTS_ALL_ZERO ((uint64_t) 0) 44 #define SLOTS_FIRST ((uint64_t) 1) 45 #define FIRST_FREE_SLOT(s) ((size_t) __builtin_ctzll(s)) 46 #define FREE_SLOTS(s) ((size_t) __builtin_popcountll(s)) 47 #define ONE_USED_SLOT(slots, empty_slotmask) \ 50 (~(slots) & (empty_slotmask)) & \ 51 ((~(slots) & (empty_slotmask)) - 1) \ 56 static int slab_is_valid(
const struct slab_chain *
const sch)
58 assert(POWEROF2(PAGE_SIZE));
59 assert(POWEROF2(sch->slabsize));
60 assert(POWEROF2(sch->pages_per_alloc));
62 assert(sch->itemcount >= 2 && sch->itemcount <= 64);
63 assert(sch->itemsize >= 1 && sch->itemsize <= SIZE_MAX);
64 assert(sch->pages_per_alloc >= PAGE_SIZE);
65 assert(sch->pages_per_alloc >= sch->slabsize);
67 assert(offsetof(
struct slab_header, data) + sch->itemsize * sch->itemcount <= sch->slabsize);
69 assert(sch->empty_slotmask == ~SLOTS_ALL_ZERO >> (64 - sch->itemcount));
70 assert(sch->initial_slotmask == (sch->empty_slotmask ^ SLOTS_FIRST));
71 assert(sch->alignment_mask == ~(sch->slabsize - 1));
74 { sch->full, sch->empty, sch->partial };
76 for (
size_t head = 0; head < 3; ++head) {
79 for (slab = heads[head]; slab != NULL; slab = slab->next) {
81 assert(slab->prev == NULL);
83 assert(slab->prev == prev);
87 assert(slab->slots == SLOTS_ALL_ZERO);
91 assert(slab->slots == sch->empty_slotmask);
95 assert((slab->slots & ~sch->empty_slotmask) == SLOTS_ALL_ZERO);
96 assert(FREE_SLOTS(slab->slots) >= 1);
97 assert(FREE_SLOTS(slab->slots) < sch->itemcount);
101 if (slab->refcount == 0) {
102 assert((uintptr_t) slab % sch->slabsize == 0);
104 if (sch->slabsize >= PAGE_SIZE)
105 assert((uintptr_t) slab->page % sch->slabsize == 0);
107 assert((uintptr_t) slab->page % PAGE_SIZE == 0);
109 if (sch->slabsize >= PAGE_SIZE)
110 assert((uintptr_t) slab % sch->slabsize == 0);
112 assert((uintptr_t) slab % PAGE_SIZE == 0);
123 struct slab_chain *slab_init(
const size_t itemsize)
125 assert(itemsize >= 1 && itemsize <= SIZE_MAX);
126 assert(POWEROF2(PAGE_SIZE));
129 sch->itemsize = itemsize;
132 const size_t data_offset = offsetof(
struct slab_header, data);
133 const size_t least_slabsize = data_offset + 64 * sch->itemsize;
134 sch->slabsize = (size_t)1 << (
size_t)ceil(log2(least_slabsize));
137 if (sch->slabsize - least_slabsize != 0) {
138 const size_t shrinked_slabsize = sch->slabsize >> 1;
140 if (data_offset < shrinked_slabsize && shrinked_slabsize - data_offset >= 2 * sch->itemsize) {
141 sch->slabsize = shrinked_slabsize;
142 sch->itemcount = (shrinked_slabsize - data_offset) / sch->itemsize;
146 sch->pages_per_alloc = sch->slabsize > PAGE_SIZE ? sch->slabsize : PAGE_SIZE;
148 sch->empty_slotmask = ~SLOTS_ALL_ZERO >> (64 - sch->itemcount);
149 sch->initial_slotmask = sch->empty_slotmask ^ SLOTS_FIRST;
150 sch->alignment_mask = ~(sch->slabsize - 1);
151 sch->partial = sch->empty = sch->full = NULL;
153 assert(slab_is_valid(sch));
158 void *slab_alloc(
struct slab_chain *
const sch)
165 assert(slab_is_valid(sch));
167 if (
likely(sch->partial != NULL)) {
169 register const size_t slot = FIRST_FREE_SLOT(sch->partial->slots);
170 sch->partial->slots ^= SLOTS_FIRST << slot;
172 if (
unlikely(sch->partial->slots == SLOTS_ALL_ZERO)) {
177 if (
likely((sch->partial = sch->partial->next) != NULL))
178 sch->partial->prev = NULL;
180 if (
likely((tmp->next = sch->full) != NULL))
181 sch->full->prev = tmp;
184 ret = sch->full->data + slot * sch->itemsize;
187 ret = sch->partial->data + slot * sch->itemsize;
190 }
else if (
likely((sch->partial = sch->empty) != NULL)) {
192 if (
likely((sch->empty = sch->empty->next) != NULL))
193 sch->empty->prev = NULL;
195 sch->partial->next = NULL;
198 unlikely(sch->partial->refcount != 0) ? sch->partial->refcount++ : sch->partial->page->refcount++;
200 sch->partial->slots = sch->initial_slotmask;
201 ret = sch->partial->data;
205 if (sch->slabsize <= PAGE_SIZE) {
206 sch->partial = mmap(NULL, sch->pages_per_alloc, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
208 if (
unlikely(sch->partial == MAP_FAILED)) {
210 ret = sch->partial = NULL;
214 const int err = posix_memalign((
void **)&sch->partial, sch->slabsize, sch->pages_per_alloc);
218 strerror_r(err, err_str, 512);
219 fprintf(stderr,
"posix_memalign(align=%zu, size=%zu): %s\n", sch->slabsize, sch->pages_per_alloc, err_str);
221 ret = sch->partial = NULL;
228 const char *
const page_end = (
char *)sch->partial + sch->pages_per_alloc;
234 .c = (
const char *)sch->partial + sch->slabsize
237 __builtin_prefetch(sch->partial, 1);
239 sch->partial->prev = sch->partial->next = NULL;
240 sch->partial->refcount = 1;
241 sch->partial->slots = sch->initial_slotmask;
243 if (
likely(curr.c != page_end)) {
245 curr.s->refcount = 0;
246 curr.s->page = sch->partial;
247 curr.s->slots = sch->empty_slotmask;
248 sch->empty = prev = curr.s;
250 while (
likely((curr.c += sch->slabsize) != page_end)) {
253 curr.s->refcount = 0;
254 curr.s->page = sch->partial;
255 curr.s->slots = sch->empty_slotmask;
262 ret = sch->partial->data;
270 void slab_free(
struct slab_chain *
const sch,
const void *
const addr)
274 assert(slab_is_valid(sch));
280 ((uintptr_t) addr & sch->alignment_mask);
282 register const int slot = ((
char *)addr - (
char *)slab - offsetof(
struct slab_header, data)) / sch->itemsize;
284 if (
unlikely(slab->slots == SLOTS_ALL_ZERO)) {
286 slab->slots = SLOTS_FIRST << slot;
288 if (
likely(slab != sch->full)) {
289 if (
likely((slab->prev->next = slab->next) != NULL))
290 slab->next->prev = slab->prev;
293 }
else if (
likely((sch->full = sch->full->next) != NULL)) {
294 sch->full->prev = NULL;
297 slab->next = sch->partial;
299 if (
likely(sch->partial != NULL))
300 sch->partial->prev = slab;
303 }
else if (
unlikely(ONE_USED_SLOT(slab->slots, sch->empty_slotmask))) {
305 if (
unlikely(slab->refcount == 1 || (slab->refcount == 0 && slab->page->refcount == 1))) {
308 if (
likely(slab != sch->partial)) {
309 if (
likely((slab->prev->next = slab->next) != NULL))
310 slab->next->prev = slab->prev;
312 if (
likely((sch->partial = sch->partial->next) != NULL)) {
313 sch->partial->prev = NULL;
316 void *
const page =
unlikely(slab->refcount != 0) ? slab : slab->page;
317 const char *
const page_end = (
char *)page + sch->pages_per_alloc;
325 for (s.c = page; s.c != page_end; s.c += sch->slabsize) {
330 else if (
likely((s.s->prev->next = s.s->next) != NULL))
331 s.s->next->prev = s.s->prev;
334 if (
unlikely(found_head && (sch->empty = sch->empty->next) != NULL))
335 sch->empty->prev = NULL;
337 if (sch->slabsize <= PAGE_SIZE) {
338 if (
unlikely(munmap(page, sch->pages_per_alloc) == -1))
344 slab->slots = sch->empty_slotmask;
346 if (
likely(slab != sch->partial)) {
347 if (
likely((slab->prev->next = slab->next) != NULL))
348 slab->next->prev = slab->prev;
352 if (
likely((sch->partial = sch->partial->next) != NULL)) {
353 sch->partial->prev = NULL;
356 slab->next = sch->empty;
358 if (
likely(sch->empty != NULL))
359 sch->empty->prev = slab;
363 unlikely(slab->refcount != 0) ? slab->refcount-- : slab->page->refcount--;
367 slab->slots |= SLOTS_FIRST << slot;
374 void slab_destroy(
const struct slab_chain *
const sch)
377 assert(slab_is_valid(sch));
379 struct slab_header *
const heads[] = { sch->partial, sch->empty, sch->full };
380 struct slab_header *pages_head = NULL, *pages_tail;
382 for (
size_t i = 0; i < 3; ++i) {
385 while (slab != NULL) {
386 if (slab->refcount != 0) {
393 pages_tail->next = page;
402 if (
likely(pages_head != NULL)) {
403 pages_tail->next = NULL;
406 if (sch->slabsize <= PAGE_SIZE) {
408 void *
const target = page;
411 if (
unlikely(munmap(target, sch->pages_per_alloc) == -1))
413 }
while (page != NULL);
416 void *
const target = page;
419 }
while (page != NULL);
#define likely(exp)
Optimize the branch as likely taken.
#define spinlock_init(s)
Spinlock initialization.
Core ROOT-Sim functionalities.
Dynamic Memory Logger and Restorer (DyMeLoR)
Memory Manager main header.
void spin_lock(spinlock_t *s)
#define unlikely(exp)
Optimize the branch as likely not taken.
void spin_unlock(spinlock_t *s)