DoxigAlpha

unwindFrameDwarf

Unwind a stack frame using DWARF unwinding info, updating the register context.

If .eh_frame_hdr is available and complete, it will be used to binary search for the FDE. Otherwise, a linear scan of .eh_frame and .debug_frame is done to find the FDE. The latter may require lazily loading the data in those sections.

explicit_fde_offset is for cases where the FDE offset is known, such as when __unwind_info defers unwinding to DWARF. This is an offset into the .eh_frame section.

Function parameters

Parameters

#
di:*Dwarf
base_address:usize
context:*UnwindContext
ma:*std.debug.MemoryAccessor
explicit_fde_offset:?usize

How is this different than `Module` when the host is Windows?

Types

#
WindowsModule
How is this different than `Module` when the host is Windows?
VirtualMachine
This is a virtual machine that runs DWARF call frame instructions.

Functions in this namespace

Functions

#
readElfDebugInfo
Reads debug info from an ELF file, or the current binary if none in specified.
unwindFrameMachO
Unwind a frame using MachO compact unwind info (from __unwind_info).
stripInstructionPtrAuthCode
Some platforms use pointer authentication - the upper bits of instruction pointers contain a signature.
unwindFrameDwarf
Unwind a stack frame using DWARF unwinding info, updating the register context.
supportsUnwinding
Tells whether unwinding for this target is *implemented* here in the Zig

Error sets in this namespace

Error Sets

#

Tells whether unwinding for the host is implemented.

Values

#
supports_unwinding
Tells whether unwinding for the host is implemented.

Source

Implementation

#
pub fn unwindFrameDwarf(
    allocator: Allocator,
    di: *Dwarf,
    base_address: usize,
    context: *UnwindContext,
    ma: *std.debug.MemoryAccessor,
    explicit_fde_offset: ?usize,
) !usize {
    if (!supports_unwinding) return error.UnsupportedCpuArchitecture;
    if (context.pc == 0) return 0;

    // Find the FDE and CIE
    const cie, const fde = if (explicit_fde_offset) |fde_offset| blk: {
        const dwarf_section: Dwarf.Section.Id = .eh_frame;
        const frame_section = di.section(dwarf_section) orelse return error.MissingFDE;
        if (fde_offset >= frame_section.len) return error.MissingFDE;

        var fbr: std.debug.FixedBufferReader = .{
            .buf = frame_section,
            .pos = fde_offset,
            .endian = di.endian,
        };

        const fde_entry_header = try Dwarf.EntryHeader.read(&fbr, null, dwarf_section);
        if (fde_entry_header.type != .fde) return error.MissingFDE;

        const cie_offset = fde_entry_header.type.fde;
        try fbr.seekTo(cie_offset);

        fbr.endian = native_endian;
        const cie_entry_header = try Dwarf.EntryHeader.read(&fbr, null, dwarf_section);
        if (cie_entry_header.type != .cie) return Dwarf.bad();

        const cie = try Dwarf.CommonInformationEntry.parse(
            cie_entry_header.entry_bytes,
            0,
            true,
            cie_entry_header.format,
            dwarf_section,
            cie_entry_header.length_offset,
            @sizeOf(usize),
            native_endian,
        );
        const fde = try Dwarf.FrameDescriptionEntry.parse(
            fde_entry_header.entry_bytes,
            0,
            true,
            cie,
            @sizeOf(usize),
            native_endian,
        );

        break :blk .{ cie, fde };
    } else blk: {
        // `.eh_frame_hdr` may be incomplete. We'll try it first, but if the lookup fails, we fall
        // back to loading `.eh_frame`/`.debug_frame` and using those from that point on.

        if (di.eh_frame_hdr) |header| hdr: {
            const eh_frame_len = if (di.section(.eh_frame)) |eh_frame| eh_frame.len else null;

            var cie: Dwarf.CommonInformationEntry = undefined;
            var fde: Dwarf.FrameDescriptionEntry = undefined;

            header.findEntry(
                ma,
                eh_frame_len,
                @intFromPtr(di.section(.eh_frame_hdr).?.ptr),
                context.pc,
                &cie,
                &fde,
            ) catch |err| switch (err) {
                error.MissingDebugInfo => {
                    // `.eh_frame_hdr` appears to be incomplete, so go ahead and populate `cie_map`
                    // and `fde_list`, and fall back to the binary search logic below.
                    try di.scanCieFdeInfo(allocator, base_address);

                    // Since `.eh_frame_hdr` is incomplete, we're very likely to get more lookup
                    // failures using it, and we've just built a complete, sorted list of FDEs
                    // anyway, so just stop using `.eh_frame_hdr` altogether.
                    di.eh_frame_hdr = null;

                    break :hdr;
                },
                else => return err,
            };

            break :blk .{ cie, fde };
        }

        const index = std.sort.binarySearch(Dwarf.FrameDescriptionEntry, di.fde_list.items, context.pc, struct {
            pub fn compareFn(pc: usize, item: Dwarf.FrameDescriptionEntry) std.math.Order {
                if (pc < item.pc_begin) return .lt;

                const range_end = item.pc_begin + item.pc_range;
                if (pc < range_end) return .eq;

                return .gt;
            }
        }.compareFn);

        const fde = if (index) |i| di.fde_list.items[i] else return error.MissingFDE;
        const cie = di.cie_map.get(fde.cie_length_offset) orelse return error.MissingCIE;

        break :blk .{ cie, fde };
    };

    var expression_context: Dwarf.expression.Context = .{
        .format = cie.format,
        .memory_accessor = ma,
        .compile_unit = di.findCompileUnit(fde.pc_begin) catch null,
        .thread_context = context.thread_context,
        .reg_context = context.reg_context,
        .cfa = context.cfa,
    };

    context.vm.reset();
    context.reg_context.eh_frame = cie.version != 4;
    context.reg_context.is_macho = di.is_macho;

    const row = try context.vm.runToNative(context.allocator, context.pc, cie, fde);
    context.cfa = switch (row.cfa.rule) {
        .val_offset => |offset| blk: {
            const register = row.cfa.register orelse return error.InvalidCFARule;
            const value = mem.readInt(usize, (try regBytes(context.thread_context, register, context.reg_context))[0..@sizeOf(usize)], native_endian);
            break :blk try applyOffset(value, offset);
        },
        .expression => |expr| blk: {
            context.stack_machine.reset();
            const value = try context.stack_machine.run(
                expr,
                context.allocator,
                expression_context,
                context.cfa,
            );

            if (value) |v| {
                if (v != .generic) return error.InvalidExpressionValue;
                break :blk v.generic;
            } else return error.NoExpressionValue;
        },
        else => return error.InvalidCFARule,
    };

    if (ma.load(usize, context.cfa.?) == null) return error.InvalidCFA;
    expression_context.cfa = context.cfa;

    // Buffering the modifications is done because copying the thread context is not portable,
    // some implementations (ie. darwin) use internal pointers to the mcontext.
    var arena = std.heap.ArenaAllocator.init(context.allocator);
    defer arena.deinit();
    const update_allocator = arena.allocator();

    const RegisterUpdate = struct {
        // Backed by thread_context
        dest: []u8,
        // Backed by arena
        src: []const u8,
        prev: ?*@This(),
    };

    var update_tail: ?*RegisterUpdate = null;
    var has_return_address = true;
    for (context.vm.rowColumns(row)) |column| {
        if (column.register) |register| {
            if (register == cie.return_address_register) {
                has_return_address = column.rule != .undefined;
            }

            const dest = try regBytes(context.thread_context, register, context.reg_context);
            const src = try update_allocator.alloc(u8, dest.len);

            const prev = update_tail;
            update_tail = try update_allocator.create(RegisterUpdate);
            update_tail.?.* = .{
                .dest = dest,
                .src = src,
                .prev = prev,
            };

            try column.resolveValue(
                context,
                expression_context,
                ma,
                src,
            );
        }
    }

    // On all implemented architectures, the CFA is defined as being the previous frame's SP
    (try regValueNative(context.thread_context, spRegNum(context.reg_context), context.reg_context)).* = context.cfa.?;

    while (update_tail) |tail| {
        @memcpy(tail.dest, tail.src);
        update_tail = tail.prev;
    }

    if (has_return_address) {
        context.pc = stripInstructionPtrAuthCode(mem.readInt(usize, (try regBytes(
            context.thread_context,
            cie.return_address_register,
            context.reg_context,
        ))[0..@sizeOf(usize)], native_endian));
    } else {
        context.pc = 0;
    }

    (try regValueNative(context.thread_context, ip_reg_num, context.reg_context)).* = context.pc;

    // The call instruction will have pushed the address of the instruction that follows the call as the return address.
    // This next instruction may be past the end of the function if the caller was `noreturn` (ie. the last instruction in
    // the function was the call). If we were to look up an FDE entry using the return address directly, it could end up
    // either not finding an FDE at all, or using the next FDE in the program, producing incorrect results. To prevent this,
    // we subtract one so that the next lookup is guaranteed to land inside the
    //
    // The exception to this rule is signal frames, where we return execution would be returned to the instruction
    // that triggered the handler.
    const return_address = context.pc;
    if (context.pc > 0 and !cie.isSignalFrame()) context.pc -= 1;

    return return_address;
}