alloc
Function parameters
Parameters
- context:*anyopaque
- len:usize
- alignment:mem.Alignment
- ra:usize
= .{ .alloc = alloc, .resize = resize, .remap = remap, .free = free, }
Values
- vtable
- = .{ .alloc = alloc, .resize = resize, .remap = remap, .free = free, }
Source
Implementation
fn alloc(context: *anyopaque, len: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 {
_ = context;
_ = ra;
const class = sizeClassIndex(len, alignment);
if (class >= size_class_count) {
@branchHint(.unlikely);
return PageAllocator.map(len, alignment);
}
const slot_size = slotSize(class);
assert(slab_len % slot_size == 0);
var search_count: u8 = 0;
var t = Thread.lock();
outer: while (true) {
const top_free_ptr = t.frees[class];
if (top_free_ptr != 0) {
@branchHint(.likely);
defer t.unlock();
const node: *usize = @ptrFromInt(top_free_ptr);
t.frees[class] = node.*;
return @ptrFromInt(top_free_ptr);
}
const next_addr = t.next_addrs[class];
if ((next_addr % slab_len) != 0) {
@branchHint(.likely);
defer t.unlock();
t.next_addrs[class] = next_addr + slot_size;
return @ptrFromInt(next_addr);
}
if (search_count >= max_alloc_search) {
@branchHint(.likely);
defer t.unlock();
// slab alignment here ensures the % slab len earlier catches the end of slots.
const slab = PageAllocator.map(slab_len, .fromByteUnits(slab_len)) orelse return null;
t.next_addrs[class] = @intFromPtr(slab) + slot_size;
return slab;
}
t.unlock();
const cpu_count = getCpuCount();
assert(cpu_count != 0);
var index = thread_index;
while (true) {
index = (index + 1) % cpu_count;
t = &global.threads[index];
if (t.mutex.tryLock()) {
thread_index = index;
search_count += 1;
continue :outer;
}
}
}
}