DoxigAlpha

GeneralPurposeAllocator

Deprecated; to be removed after 0.14.0 is tagged.

Fields of this type

Fields

#
backing_allocator:Allocator
= std.heap.page_allocator
buckets:[small_bucket_count]?*BucketHeader
Tracks the active bucket, which is the one that has free slots in it.
total_requested_bytes:@TypeOf(total_requested_bytes_init)
= total_requested_bytes_init
requested_memory_limit:@TypeOf(requested_memory_limit_init)
= requested_memory_limit_init
mutex:@TypeOf(mutex_init)
= mutex_init

Functions in this namespace

Functions

#
detectLeaks
Emits log messages for leaks and then returns whether there were any leaks.
deinit
Returns `std.heap.Check.leak` if there were leaks; `std.heap.Check.ok` otherwise.

Error sets in this namespace

Error Sets

#

= .{}

Values

#
init
= .{}

Source

Implementation

#
pub fn DebugAllocator(comptime config: Config) type {
    return struct {
        backing_allocator: Allocator = std.heap.page_allocator,
        /// Tracks the active bucket, which is the one that has free slots in it.
        buckets: [small_bucket_count]?*BucketHeader = [1]?*BucketHeader{null} ** small_bucket_count,
        large_allocations: LargeAllocTable = .empty,
        total_requested_bytes: @TypeOf(total_requested_bytes_init) = total_requested_bytes_init,
        requested_memory_limit: @TypeOf(requested_memory_limit_init) = requested_memory_limit_init,
        mutex: @TypeOf(mutex_init) = mutex_init,

        const Self = @This();

        pub const init: Self = .{};

        /// These can be derived from size_class_index but the calculation is nontrivial.
        const slot_counts: [small_bucket_count]SlotIndex = init: {
            @setEvalBranchQuota(10000);
            var result: [small_bucket_count]SlotIndex = undefined;
            for (&result, 0..) |*elem, i| elem.* = calculateSlotCount(i);
            break :init result;
        };

        comptime {
            assert(math.isPowerOfTwo(page_size));
        }

        const page_size = config.page_size;
        const page_align: mem.Alignment = .fromByteUnits(page_size);
        /// Integer type for pointing to slots in a small allocation
        const SlotIndex = std.meta.Int(.unsigned, math.log2(page_size) + 1);

        const total_requested_bytes_init = if (config.enable_memory_limit) @as(usize, 0) else {};
        const requested_memory_limit_init = if (config.enable_memory_limit) @as(usize, math.maxInt(usize)) else {};

        const mutex_init = if (config.MutexType) |T|
            T{}
        else if (config.thread_safe)
            std.Thread.Mutex{}
        else
            DummyMutex{};

        const DummyMutex = struct {
            inline fn lock(_: DummyMutex) void {}
            inline fn unlock(_: DummyMutex) void {}
        };

        const stack_n = config.stack_trace_frames;
        const one_trace_size = @sizeOf(usize) * stack_n;
        const traces_per_slot = 2;

        pub const Error = mem.Allocator.Error;

        /// Avoids creating buckets that would only be able to store a small
        /// number of slots. Value of 1 means 2 is the minimum slot count.
        const minimum_slots_per_bucket_log2 = 1;
        const small_bucket_count = math.log2(page_size) - minimum_slots_per_bucket_log2;
        const largest_bucket_object_size = 1 << (small_bucket_count - 1);
        const LargestSizeClassInt = std.math.IntFittingRange(0, largest_bucket_object_size);

        const bucketCompare = struct {
            fn compare(a: *BucketHeader, b: *BucketHeader) std.math.Order {
                return std.math.order(@intFromPtr(a.page), @intFromPtr(b.page));
            }
        }.compare;

        const LargeAlloc = struct {
            bytes: []u8,
            requested_size: if (config.enable_memory_limit) usize else void,
            stack_addresses: [trace_n][stack_n]usize,
            freed: if (config.retain_metadata) bool else void,
            alignment: if (config.never_unmap and config.retain_metadata) mem.Alignment else void,

            const trace_n = if (config.retain_metadata) traces_per_slot else 1;

            fn dumpStackTrace(self: *LargeAlloc, trace_kind: TraceKind) void {
                std.debug.dumpStackTrace(self.getStackTrace(trace_kind));
            }

            fn getStackTrace(self: *LargeAlloc, trace_kind: TraceKind) std.builtin.StackTrace {
                assert(@intFromEnum(trace_kind) < trace_n);
                const stack_addresses = &self.stack_addresses[@intFromEnum(trace_kind)];
                var len: usize = 0;
                while (len < stack_n and stack_addresses[len] != 0) {
                    len += 1;
                }
                return .{
                    .instruction_addresses = stack_addresses,
                    .index = len,
                };
            }

            fn captureStackTrace(self: *LargeAlloc, ret_addr: usize, trace_kind: TraceKind) void {
                assert(@intFromEnum(trace_kind) < trace_n);
                const stack_addresses = &self.stack_addresses[@intFromEnum(trace_kind)];
                collectStackTrace(ret_addr, stack_addresses);
            }
        };
        const LargeAllocTable = std.AutoHashMapUnmanaged(usize, LargeAlloc);

        /// Bucket: In memory, in order:
        /// * BucketHeader
        /// * bucket_used_bits: [N]usize, // 1 bit for every slot
        /// -- below only exists when config.safety is true --
        /// * requested_sizes: [N]LargestSizeClassInt // 1 int for every slot
        /// * log2_ptr_aligns: [N]u8 // 1 byte for every slot
        /// -- above only exists when config.safety is true --
        /// * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation
        const BucketHeader = struct {
            allocated_count: SlotIndex,
            freed_count: SlotIndex,
            prev: ?*BucketHeader,
            next: ?*BucketHeader,
            canary: usize = config.canary,

            fn fromPage(page_addr: usize, slot_count: usize) *BucketHeader {
                const unaligned = page_addr + page_size - bucketSize(slot_count);
                return @ptrFromInt(unaligned & ~(@as(usize, @alignOf(BucketHeader)) - 1));
            }

            fn usedBits(bucket: *BucketHeader, index: usize) *usize {
                const ptr: [*]u8 = @ptrCast(bucket);
                const bits: [*]usize = @ptrCast(@alignCast(ptr + @sizeOf(BucketHeader)));
                return &bits[index];
            }

            fn requestedSizes(bucket: *BucketHeader, slot_count: usize) []LargestSizeClassInt {
                if (!config.safety) @compileError("requested size is only stored when safety is enabled");
                const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketRequestedSizesStart(slot_count);
                const sizes = @as([*]LargestSizeClassInt, @ptrCast(@alignCast(start_ptr)));
                return sizes[0..slot_count];
            }

            fn log2PtrAligns(bucket: *BucketHeader, slot_count: usize) []mem.Alignment {
                if (!config.safety) @compileError("requested size is only stored when safety is enabled");
                const aligns_ptr = @as([*]u8, @ptrCast(bucket)) + bucketAlignsStart(slot_count);
                return @ptrCast(aligns_ptr[0..slot_count]);
            }

            fn stackTracePtr(
                bucket: *BucketHeader,
                slot_count: usize,
                slot_index: SlotIndex,
                trace_kind: TraceKind,
            ) *[stack_n]usize {
                const start_ptr = @as([*]u8, @ptrCast(bucket)) + bucketStackFramesStart(slot_count);
                const addr = start_ptr + one_trace_size * traces_per_slot * slot_index +
                    @intFromEnum(trace_kind) * @as(usize, one_trace_size);
                return @ptrCast(@alignCast(addr));
            }

            fn captureStackTrace(
                bucket: *BucketHeader,
                ret_addr: usize,
                slot_count: usize,
                slot_index: SlotIndex,
                trace_kind: TraceKind,
            ) void {
                // Initialize them to 0. When determining the count we must look
                // for non zero addresses.
                const stack_addresses = bucket.stackTracePtr(slot_count, slot_index, trace_kind);
                collectStackTrace(ret_addr, stack_addresses);
            }
        };

        pub fn allocator(self: *Self) Allocator {
            return .{
                .ptr = self,
                .vtable = &.{
                    .alloc = alloc,
                    .resize = resize,
                    .remap = remap,
                    .free = free,
                },
            };
        }

        fn bucketStackTrace(
            bucket: *BucketHeader,
            slot_count: usize,
            slot_index: SlotIndex,
            trace_kind: TraceKind,
        ) StackTrace {
            const stack_addresses = bucket.stackTracePtr(slot_count, slot_index, trace_kind);
            var len: usize = 0;
            while (len < stack_n and stack_addresses[len] != 0) {
                len += 1;
            }
            return .{
                .instruction_addresses = stack_addresses,
                .index = len,
            };
        }

        fn bucketRequestedSizesStart(slot_count: usize) usize {
            if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled");
            return mem.alignForward(
                usize,
                @sizeOf(BucketHeader) + usedBitsSize(slot_count),
                @alignOf(LargestSizeClassInt),
            );
        }

        fn bucketAlignsStart(slot_count: usize) usize {
            if (!config.safety) @compileError("requested sizes are not stored unless safety is enabled");
            return bucketRequestedSizesStart(slot_count) + (@sizeOf(LargestSizeClassInt) * slot_count);
        }

        fn bucketStackFramesStart(slot_count: usize) usize {
            const unaligned_start = if (config.safety)
                bucketAlignsStart(slot_count) + slot_count
            else
                @sizeOf(BucketHeader) + usedBitsSize(slot_count);
            return mem.alignForward(usize, unaligned_start, @alignOf(usize));
        }

        fn bucketSize(slot_count: usize) usize {
            return bucketStackFramesStart(slot_count) + one_trace_size * traces_per_slot * slot_count;
        }

        /// This is executed only at compile-time to prepopulate a lookup table.
        fn calculateSlotCount(size_class_index: usize) SlotIndex {
            const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
            var lower: usize = 1 << minimum_slots_per_bucket_log2;
            var upper: usize = (page_size - bucketSize(lower)) / size_class;
            while (upper > lower) {
                const proposed: usize = lower + (upper - lower) / 2;
                if (proposed == lower) return lower;
                const slots_end = proposed * size_class;
                const header_begin = mem.alignForward(usize, slots_end, @alignOf(BucketHeader));
                const end = header_begin + bucketSize(proposed);
                if (end > page_size) {
                    upper = proposed - 1;
                } else {
                    lower = proposed;
                }
            }
            const slots_end = lower * size_class;
            const header_begin = mem.alignForward(usize, slots_end, @alignOf(BucketHeader));
            const end = header_begin + bucketSize(lower);
            assert(end <= page_size);
            return lower;
        }

        fn usedBitsCount(slot_count: usize) usize {
            return (slot_count + (@bitSizeOf(usize) - 1)) / @bitSizeOf(usize);
        }

        fn usedBitsSize(slot_count: usize) usize {
            return usedBitsCount(slot_count) * @sizeOf(usize);
        }

        fn detectLeaksInBucket(bucket: *BucketHeader, size_class_index: usize, used_bits_count: usize) bool {
            const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
            const slot_count = slot_counts[size_class_index];
            var leaks = false;
            for (0..used_bits_count) |used_bits_byte| {
                const used_int = bucket.usedBits(used_bits_byte).*;
                if (used_int != 0) {
                    for (0..@bitSizeOf(usize)) |bit_index_usize| {
                        const bit_index: Log2USize = @intCast(bit_index_usize);
                        const is_used = @as(u1, @truncate(used_int >> bit_index)) != 0;
                        if (is_used) {
                            const slot_index: SlotIndex = @intCast(used_bits_byte * @bitSizeOf(usize) + bit_index);
                            const stack_trace = bucketStackTrace(bucket, slot_count, slot_index, .alloc);
                            const page_addr = @intFromPtr(bucket) & ~(page_size - 1);
                            const addr = page_addr + slot_index * size_class;
                            log.err("memory address 0x{x} leaked: {f}", .{ addr, stack_trace });
                            leaks = true;
                        }
                    }
                }
            }
            return leaks;
        }

        /// Emits log messages for leaks and then returns whether there were any leaks.
        pub fn detectLeaks(self: *Self) bool {
            var leaks = false;

            for (self.buckets, 0..) |init_optional_bucket, size_class_index| {
                var optional_bucket = init_optional_bucket;
                const slot_count = slot_counts[size_class_index];
                const used_bits_count = usedBitsCount(slot_count);
                while (optional_bucket) |bucket| {
                    leaks = detectLeaksInBucket(bucket, size_class_index, used_bits_count) or leaks;
                    optional_bucket = bucket.prev;
                }
            }

            var it = self.large_allocations.valueIterator();
            while (it.next()) |large_alloc| {
                if (config.retain_metadata and large_alloc.freed) continue;
                const stack_trace = large_alloc.getStackTrace(.alloc);
                log.err("memory address 0x{x} leaked: {f}", .{
                    @intFromPtr(large_alloc.bytes.ptr), stack_trace,
                });
                leaks = true;
            }
            return leaks;
        }

        fn freeRetainedMetadata(self: *Self) void {
            comptime assert(config.retain_metadata);
            if (config.never_unmap) {
                // free large allocations that were intentionally leaked by never_unmap
                var it = self.large_allocations.iterator();
                while (it.next()) |large| {
                    if (large.value_ptr.freed) {
                        self.backing_allocator.rawFree(large.value_ptr.bytes, large.value_ptr.alignment, @returnAddress());
                    }
                }
            }
        }

        pub fn flushRetainedMetadata(self: *Self) void {
            comptime assert(config.retain_metadata);
            self.freeRetainedMetadata();
            // also remove entries from large_allocations
            var it = self.large_allocations.iterator();
            while (it.next()) |large| {
                if (large.value_ptr.freed) {
                    _ = self.large_allocations.remove(@intFromPtr(large.value_ptr.bytes.ptr));
                }
            }
        }

        /// Returns `std.heap.Check.leak` if there were leaks; `std.heap.Check.ok` otherwise.
        pub fn deinit(self: *Self) std.heap.Check {
            const leaks = if (config.safety) self.detectLeaks() else false;
            if (config.retain_metadata) self.freeRetainedMetadata();
            self.large_allocations.deinit(self.backing_allocator);
            self.* = undefined;
            return if (leaks) .leak else .ok;
        }

        fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void {
            if (stack_n == 0) return;
            @memset(addresses, 0);
            var stack_trace: StackTrace = .{
                .instruction_addresses = addresses,
                .index = 0,
            };
            std.debug.captureStackTrace(first_trace_addr, &stack_trace);
        }

        fn reportDoubleFree(ret_addr: usize, alloc_stack_trace: StackTrace, free_stack_trace: StackTrace) void {
            var addresses: [stack_n]usize = @splat(0);
            var second_free_stack_trace: StackTrace = .{
                .instruction_addresses = &addresses,
                .index = 0,
            };
            std.debug.captureStackTrace(ret_addr, &second_free_stack_trace);
            log.err("Double free detected. Allocation: {f} First free: {f} Second free: {f}", .{
                alloc_stack_trace, free_stack_trace, second_free_stack_trace,
            });
        }

        /// This function assumes the object is in the large object storage regardless
        /// of the parameters.
        fn resizeLarge(
            self: *Self,
            old_mem: []u8,
            alignment: mem.Alignment,
            new_size: usize,
            ret_addr: usize,
            may_move: bool,
        ) ?[*]u8 {
            if (config.retain_metadata and may_move) {
                // Before looking up the entry (since this could invalidate
                // it), we must reserve space for the new entry in case the
                // allocation is relocated.
                self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1) catch return null;
            }

            const entry = self.large_allocations.getEntry(@intFromPtr(old_mem.ptr)) orelse {
                if (config.safety) {
                    @panic("Invalid free");
                } else {
                    unreachable;
                }
            };

            if (config.retain_metadata and entry.value_ptr.freed) {
                if (config.safety) {
                    reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free));
                    @panic("Unrecoverable double free");
                } else {
                    unreachable;
                }
            }

            if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
                var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
                var free_stack_trace: StackTrace = .{
                    .instruction_addresses = &addresses,
                    .index = 0,
                };
                std.debug.captureStackTrace(ret_addr, &free_stack_trace);
                log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
                    entry.value_ptr.bytes.len,
                    old_mem.len,
                    entry.value_ptr.getStackTrace(.alloc),
                    free_stack_trace,
                });
            }

            // If this would move the allocation into a small size class,
            // refuse the request, because it would require creating small
            // allocation metadata.
            const new_size_class_index: usize = @max(@bitSizeOf(usize) - @clz(new_size - 1), @intFromEnum(alignment));
            if (new_size_class_index < self.buckets.len) return null;

            // Do memory limit accounting with requested sizes rather than what
            // backing_allocator returns because if we want to return
            // error.OutOfMemory, we have to leave allocation untouched, and
            // that is impossible to guarantee after calling
            // backing_allocator.rawResize.
            const prev_req_bytes = self.total_requested_bytes;
            if (config.enable_memory_limit) {
                const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size;
                if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
                    return null;
                }
                self.total_requested_bytes = new_req_bytes;
            }

            const opt_resized_ptr = if (may_move)
                self.backing_allocator.rawRemap(old_mem, alignment, new_size, ret_addr)
            else if (self.backing_allocator.rawResize(old_mem, alignment, new_size, ret_addr))
                old_mem.ptr
            else
                null;

            const resized_ptr = opt_resized_ptr orelse {
                if (config.enable_memory_limit) {
                    self.total_requested_bytes = prev_req_bytes;
                }
                return null;
            };

            if (config.enable_memory_limit) {
                entry.value_ptr.requested_size = new_size;
            }

            if (config.verbose_log) {
                log.info("large resize {d} bytes at {*} to {d} at {*}", .{
                    old_mem.len, old_mem.ptr, new_size, resized_ptr,
                });
            }
            entry.value_ptr.bytes = resized_ptr[0..new_size];
            if (config.resize_stack_traces)
                entry.value_ptr.captureStackTrace(ret_addr, .alloc);

            // Update the key of the hash map if the memory was relocated.
            if (resized_ptr != old_mem.ptr) {
                const large_alloc = entry.value_ptr.*;
                if (config.retain_metadata) {
                    entry.value_ptr.freed = true;
                    entry.value_ptr.captureStackTrace(ret_addr, .free);
                } else {
                    self.large_allocations.removeByPtr(entry.key_ptr);
                }

                const gop = self.large_allocations.getOrPutAssumeCapacity(@intFromPtr(resized_ptr));
                if (config.retain_metadata and !config.never_unmap) {
                    // Backing allocator may be reusing memory that we're retaining metadata for
                    assert(!gop.found_existing or gop.value_ptr.freed);
                } else {
                    assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
                }
                gop.value_ptr.* = large_alloc;
            }

            return resized_ptr;
        }

        /// This function assumes the object is in the large object storage regardless
        /// of the parameters.
        fn freeLarge(
            self: *Self,
            old_mem: []u8,
            alignment: mem.Alignment,
            ret_addr: usize,
        ) void {
            const entry = self.large_allocations.getEntry(@intFromPtr(old_mem.ptr)) orelse {
                if (config.safety) {
                    @panic("Invalid free");
                } else {
                    unreachable;
                }
            };

            if (config.retain_metadata and entry.value_ptr.freed) {
                if (config.safety) {
                    reportDoubleFree(ret_addr, entry.value_ptr.getStackTrace(.alloc), entry.value_ptr.getStackTrace(.free));
                    return;
                } else {
                    unreachable;
                }
            }

            if (config.safety and old_mem.len != entry.value_ptr.bytes.len) {
                var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
                var free_stack_trace = StackTrace{
                    .instruction_addresses = &addresses,
                    .index = 0,
                };
                std.debug.captureStackTrace(ret_addr, &free_stack_trace);
                log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
                    entry.value_ptr.bytes.len,
                    old_mem.len,
                    entry.value_ptr.getStackTrace(.alloc),
                    free_stack_trace,
                });
            }

            if (!config.never_unmap) {
                self.backing_allocator.rawFree(old_mem, alignment, ret_addr);
            }

            if (config.enable_memory_limit) {
                self.total_requested_bytes -= entry.value_ptr.requested_size;
            }

            if (config.verbose_log) {
                log.info("large free {d} bytes at {*}", .{ old_mem.len, old_mem.ptr });
            }

            if (!config.retain_metadata) {
                assert(self.large_allocations.remove(@intFromPtr(old_mem.ptr)));
            } else {
                entry.value_ptr.freed = true;
                entry.value_ptr.captureStackTrace(ret_addr, .free);
            }
        }

        fn alloc(context: *anyopaque, len: usize, alignment: mem.Alignment, ret_addr: usize) ?[*]u8 {
            const self: *Self = @ptrCast(@alignCast(context));
            self.mutex.lock();
            defer self.mutex.unlock();

            if (config.enable_memory_limit) {
                const new_req_bytes = self.total_requested_bytes + len;
                if (new_req_bytes > self.requested_memory_limit) return null;
                self.total_requested_bytes = new_req_bytes;
            }

            const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(len - 1), @intFromEnum(alignment));
            if (size_class_index >= self.buckets.len) {
                @branchHint(.unlikely);
                self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1) catch return null;
                const ptr = self.backing_allocator.rawAlloc(len, alignment, ret_addr) orelse return null;
                const slice = ptr[0..len];

                const gop = self.large_allocations.getOrPutAssumeCapacity(@intFromPtr(slice.ptr));
                if (config.retain_metadata and !config.never_unmap) {
                    // Backing allocator may be reusing memory that we're retaining metadata for
                    assert(!gop.found_existing or gop.value_ptr.freed);
                } else {
                    assert(!gop.found_existing); // This would mean the kernel double-mapped pages.
                }
                gop.value_ptr.bytes = slice;
                if (config.enable_memory_limit)
                    gop.value_ptr.requested_size = len;
                gop.value_ptr.captureStackTrace(ret_addr, .alloc);
                if (config.retain_metadata) {
                    gop.value_ptr.freed = false;
                    if (config.never_unmap) {
                        gop.value_ptr.alignment = alignment;
                    }
                }

                if (config.verbose_log) {
                    log.info("large alloc {d} bytes at {*}", .{ slice.len, slice.ptr });
                }
                return slice.ptr;
            }

            const slot_count = slot_counts[size_class_index];

            if (self.buckets[size_class_index]) |bucket| {
                @branchHint(.likely);
                const slot_index = bucket.allocated_count;
                if (slot_index < slot_count) {
                    @branchHint(.likely);
                    bucket.allocated_count = slot_index + 1;
                    const used_bits_byte = bucket.usedBits(slot_index / @bitSizeOf(usize));
                    const used_bit_index: Log2USize = @intCast(slot_index % @bitSizeOf(usize));
                    used_bits_byte.* |= (@as(usize, 1) << used_bit_index);
                    const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
                    if (config.stack_trace_frames > 0) {
                        bucket.captureStackTrace(ret_addr, slot_count, slot_index, .alloc);
                    }
                    if (config.safety) {
                        bucket.requestedSizes(slot_count)[slot_index] = @intCast(len);
                        bucket.log2PtrAligns(slot_count)[slot_index] = alignment;
                    }
                    const page_addr = @intFromPtr(bucket) & ~(page_size - 1);
                    const addr = page_addr + slot_index * size_class;
                    if (config.verbose_log) {
                        log.info("small alloc {d} bytes at 0x{x}", .{ len, addr });
                    }
                    return @ptrFromInt(addr);
                }
            }

            const page = self.backing_allocator.rawAlloc(page_size, page_align, @returnAddress()) orelse
                return null;
            const bucket: *BucketHeader = .fromPage(@intFromPtr(page), slot_count);
            bucket.* = .{
                .allocated_count = 1,
                .freed_count = 0,
                .prev = self.buckets[size_class_index],
                .next = null,
            };
            if (self.buckets[size_class_index]) |old_head| {
                old_head.next = bucket;
            }
            self.buckets[size_class_index] = bucket;

            if (!config.backing_allocator_zeroes) {
                @memset(@as([*]usize, @as(*[1]usize, bucket.usedBits(0)))[0..usedBitsCount(slot_count)], 0);
                if (config.safety) @memset(bucket.requestedSizes(slot_count), 0);
            }

            bucket.usedBits(0).* = 0b1;

            if (config.stack_trace_frames > 0) {
                bucket.captureStackTrace(ret_addr, slot_count, 0, .alloc);
            }

            if (config.safety) {
                bucket.requestedSizes(slot_count)[0] = @intCast(len);
                bucket.log2PtrAligns(slot_count)[0] = alignment;
            }

            if (config.verbose_log) {
                log.info("small alloc {d} bytes at 0x{x}", .{ len, @intFromPtr(page) });
            }

            return page;
        }

        fn resize(
            context: *anyopaque,
            memory: []u8,
            alignment: mem.Alignment,
            new_len: usize,
            return_address: usize,
        ) bool {
            const self: *Self = @ptrCast(@alignCast(context));
            self.mutex.lock();
            defer self.mutex.unlock();

            const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(memory.len - 1), @intFromEnum(alignment));
            if (size_class_index >= self.buckets.len) {
                return self.resizeLarge(memory, alignment, new_len, return_address, false) != null;
            } else {
                return resizeSmall(self, memory, alignment, new_len, return_address, size_class_index);
            }
        }

        fn remap(
            context: *anyopaque,
            memory: []u8,
            alignment: mem.Alignment,
            new_len: usize,
            return_address: usize,
        ) ?[*]u8 {
            const self: *Self = @ptrCast(@alignCast(context));
            self.mutex.lock();
            defer self.mutex.unlock();

            const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(memory.len - 1), @intFromEnum(alignment));
            if (size_class_index >= self.buckets.len) {
                return self.resizeLarge(memory, alignment, new_len, return_address, true);
            } else {
                return if (resizeSmall(self, memory, alignment, new_len, return_address, size_class_index)) memory.ptr else null;
            }
        }

        fn free(
            context: *anyopaque,
            old_memory: []u8,
            alignment: mem.Alignment,
            return_address: usize,
        ) void {
            const self: *Self = @ptrCast(@alignCast(context));
            self.mutex.lock();
            defer self.mutex.unlock();

            const size_class_index: usize = @max(@bitSizeOf(usize) - @clz(old_memory.len - 1), @intFromEnum(alignment));
            if (size_class_index >= self.buckets.len) {
                @branchHint(.unlikely);
                self.freeLarge(old_memory, alignment, return_address);
                return;
            }

            const slot_count = slot_counts[size_class_index];
            const freed_addr = @intFromPtr(old_memory.ptr);
            const page_addr = freed_addr & ~(page_size - 1);
            const bucket: *BucketHeader = .fromPage(page_addr, slot_count);
            if (bucket.canary != config.canary) @panic("Invalid free");
            const page_offset = freed_addr - page_addr;
            const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
            const slot_index: SlotIndex = @intCast(page_offset / size_class);
            const used_byte_index = slot_index / @bitSizeOf(usize);
            const used_bit_index: Log2USize = @intCast(slot_index % @bitSizeOf(usize));
            const used_byte = bucket.usedBits(used_byte_index);
            const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0;
            if (!is_used) {
                if (config.safety) {
                    reportDoubleFree(
                        return_address,
                        bucketStackTrace(bucket, slot_count, slot_index, .alloc),
                        bucketStackTrace(bucket, slot_count, slot_index, .free),
                    );
                    // Recoverable since this is a free.
                    return;
                } else {
                    unreachable;
                }
            }

            // Definitely an in-use small alloc now.
            if (config.safety) {
                const requested_size = bucket.requestedSizes(slot_count)[slot_index];
                if (requested_size == 0) @panic("Invalid free");
                const slot_alignment = bucket.log2PtrAligns(slot_count)[slot_index];
                if (old_memory.len != requested_size or alignment != slot_alignment) {
                    var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
                    var free_stack_trace: StackTrace = .{
                        .instruction_addresses = &addresses,
                        .index = 0,
                    };
                    std.debug.captureStackTrace(return_address, &free_stack_trace);
                    if (old_memory.len != requested_size) {
                        log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
                            requested_size,
                            old_memory.len,
                            bucketStackTrace(bucket, slot_count, slot_index, .alloc),
                            free_stack_trace,
                        });
                    }
                    if (alignment != slot_alignment) {
                        log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {f} Free: {f}", .{
                            slot_alignment.toByteUnits(),
                            alignment.toByteUnits(),
                            bucketStackTrace(bucket, slot_count, slot_index, .alloc),
                            free_stack_trace,
                        });
                    }
                }
            }

            if (config.enable_memory_limit) {
                self.total_requested_bytes -= old_memory.len;
            }

            if (config.stack_trace_frames > 0) {
                // Capture stack trace to be the "first free", in case a double free happens.
                bucket.captureStackTrace(return_address, slot_count, slot_index, .free);
            }

            used_byte.* &= ~(@as(usize, 1) << used_bit_index);
            if (config.safety) {
                bucket.requestedSizes(slot_count)[slot_index] = 0;
            }
            bucket.freed_count += 1;
            if (bucket.freed_count == bucket.allocated_count) {
                if (bucket.prev) |prev| {
                    prev.next = bucket.next;
                }

                if (bucket.next) |next| {
                    assert(self.buckets[size_class_index] != bucket);
                    next.prev = bucket.prev;
                } else {
                    assert(self.buckets[size_class_index] == bucket);
                    self.buckets[size_class_index] = bucket.prev;
                }

                if (!config.never_unmap) {
                    const page: [*]align(page_size) u8 = @ptrFromInt(page_addr);
                    self.backing_allocator.rawFree(page[0..page_size], page_align, @returnAddress());
                }
            }
            if (config.verbose_log) {
                log.info("small free {d} bytes at {*}", .{ old_memory.len, old_memory.ptr });
            }
        }

        fn resizeSmall(
            self: *Self,
            memory: []u8,
            alignment: mem.Alignment,
            new_len: usize,
            return_address: usize,
            size_class_index: usize,
        ) bool {
            const new_size_class_index: usize = @max(@bitSizeOf(usize) - @clz(new_len - 1), @intFromEnum(alignment));
            if (!config.safety) return new_size_class_index == size_class_index;
            const slot_count = slot_counts[size_class_index];
            const memory_addr = @intFromPtr(memory.ptr);
            const page_addr = memory_addr & ~(page_size - 1);
            const bucket: *BucketHeader = .fromPage(page_addr, slot_count);
            if (bucket.canary != config.canary) @panic("Invalid free");
            const page_offset = memory_addr - page_addr;
            const size_class = @as(usize, 1) << @as(Log2USize, @intCast(size_class_index));
            const slot_index: SlotIndex = @intCast(page_offset / size_class);
            const used_byte_index = slot_index / @bitSizeOf(usize);
            const used_bit_index: Log2USize = @intCast(slot_index % @bitSizeOf(usize));
            const used_byte = bucket.usedBits(used_byte_index);
            const is_used = @as(u1, @truncate(used_byte.* >> used_bit_index)) != 0;
            if (!is_used) {
                reportDoubleFree(
                    return_address,
                    bucketStackTrace(bucket, slot_count, slot_index, .alloc),
                    bucketStackTrace(bucket, slot_count, slot_index, .free),
                );
                // Recoverable since this is a free.
                return false;
            }

            // Definitely an in-use small alloc now.
            const requested_size = bucket.requestedSizes(slot_count)[slot_index];
            if (requested_size == 0) @panic("Invalid free");
            const slot_alignment = bucket.log2PtrAligns(slot_count)[slot_index];
            if (memory.len != requested_size or alignment != slot_alignment) {
                var addresses: [stack_n]usize = [1]usize{0} ** stack_n;
                var free_stack_trace: StackTrace = .{
                    .instruction_addresses = &addresses,
                    .index = 0,
                };
                std.debug.captureStackTrace(return_address, &free_stack_trace);
                if (memory.len != requested_size) {
                    log.err("Allocation size {d} bytes does not match free size {d}. Allocation: {f} Free: {f}", .{
                        requested_size,
                        memory.len,
                        bucketStackTrace(bucket, slot_count, slot_index, .alloc),
                        free_stack_trace,
                    });
                }
                if (alignment != slot_alignment) {
                    log.err("Allocation alignment {d} does not match free alignment {d}. Allocation: {f} Free: {f}", .{
                        slot_alignment.toByteUnits(),
                        alignment.toByteUnits(),
                        bucketStackTrace(bucket, slot_count, slot_index, .alloc),
                        free_stack_trace,
                    });
                }
            }

            if (new_size_class_index != size_class_index) return false;

            const prev_req_bytes = self.total_requested_bytes;
            if (config.enable_memory_limit) {
                const new_req_bytes = prev_req_bytes - memory.len + new_len;
                if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
                    return false;
                }
                self.total_requested_bytes = new_req_bytes;
            }

            if (memory.len > new_len) @memset(memory[new_len..], undefined);
            if (config.verbose_log)
                log.info("small resize {d} bytes at {*} to {d}", .{ memory.len, memory.ptr, new_len });

            if (config.safety)
                bucket.requestedSizes(slot_count)[slot_index] = @intCast(new_len);

            if (config.resize_stack_traces)
                bucket.captureStackTrace(return_address, slot_count, slot_index, .alloc);

            return true;
        }
    };
}