step
Reads an opcode and its operands from stream, then executes it
Function parameters
Parameters
Expressions can be evaluated in different contexts, each requiring its own set of inputs.
Types
A stack machine that can decode and run DWARF expressions.
Functions
- StackMachine
- A stack machine that can decode and run DWARF expressions.
Error sets in this namespace
Error Sets
Source
Implementation
pub fn step(
self: *Self,
stream: *std.io.FixedBufferStream([]const u8),
allocator: std.mem.Allocator,
context: Context,
) Error!bool {
if (@sizeOf(usize) != @sizeOf(addr_type) or options.endian != native_endian)
@compileError("Execution of non-native address sizes / endianness is not supported");
const opcode = try stream.reader().readByte();
if (options.call_frame_context and !isOpcodeValidInCFA(opcode)) return error.InvalidCFAOpcode;
const operand = try readOperand(stream, opcode, context);
switch (opcode) {
// 2.5.1.1: Literal Encodings
OP.lit0...OP.lit31,
OP.addr,
OP.const1u,
OP.const2u,
OP.const4u,
OP.const8u,
OP.const1s,
OP.const2s,
OP.const4s,
OP.const8s,
OP.constu,
OP.consts,
=> try self.stack.append(allocator, .{ .generic = operand.?.generic }),
OP.const_type => {
const const_type = operand.?.const_type;
try self.stack.append(allocator, .{ .const_type = .{
.type_offset = const_type.type_offset,
.value_bytes = const_type.value_bytes,
} });
},
OP.addrx,
OP.constx,
=> {
if (context.compile_unit == null) return error.IncompleteExpressionContext;
if (context.debug_addr == null) return error.IncompleteExpressionContext;
const debug_addr_index = operand.?.generic;
const offset = context.compile_unit.?.addr_base + debug_addr_index;
if (offset >= context.debug_addr.?.len) return error.InvalidExpression;
const value = mem.readInt(usize, context.debug_addr.?[offset..][0..@sizeOf(usize)], native_endian);
try self.stack.append(allocator, .{ .generic = value });
},
// 2.5.1.2: Register Values
OP.fbreg => {
if (context.compile_unit == null) return error.IncompleteExpressionContext;
if (context.compile_unit.?.frame_base == null) return error.IncompleteExpressionContext;
const offset: i64 = @intCast(operand.?.generic);
_ = offset;
switch (context.compile_unit.?.frame_base.?.*) {
.exprloc => {
// TODO: Run this expression in a nested stack machine
return error.UnimplementedOpcode;
},
.loclistx => {
// TODO: Read value from .debug_loclists
return error.UnimplementedOpcode;
},
.sec_offset => {
// TODO: Read value from .debug_loclists
return error.UnimplementedOpcode;
},
else => return error.InvalidFrameBase,
}
},
OP.breg0...OP.breg31,
OP.bregx,
=> {
if (context.thread_context == null) return error.IncompleteExpressionContext;
const base_register = operand.?.base_register;
var value: i64 = @intCast(mem.readInt(usize, (try abi.regBytes(
context.thread_context.?,
base_register.base_register,
context.reg_context,
))[0..@sizeOf(usize)], native_endian));
value += base_register.offset;
try self.stack.append(allocator, .{ .generic = @intCast(value) });
},
OP.regval_type => {
const register_type = operand.?.register_type;
const value = mem.readInt(usize, (try abi.regBytes(
context.thread_context.?,
register_type.register,
context.reg_context,
))[0..@sizeOf(usize)], native_endian);
try self.stack.append(allocator, .{
.regval_type = .{
.type_offset = register_type.type_offset,
.type_size = @sizeOf(addr_type),
.value = value,
},
});
},
// 2.5.1.3: Stack Operations
OP.dup => {
if (self.stack.items.len == 0) return error.InvalidExpression;
try self.stack.append(allocator, self.stack.items[self.stack.items.len - 1]);
},
OP.drop => {
_ = self.stack.pop();
},
OP.pick, OP.over => {
const stack_index = if (opcode == OP.over) 1 else operand.?.generic;
if (stack_index >= self.stack.items.len) return error.InvalidExpression;
try self.stack.append(allocator, self.stack.items[self.stack.items.len - 1 - stack_index]);
},
OP.swap => {
if (self.stack.items.len < 2) return error.InvalidExpression;
mem.swap(Value, &self.stack.items[self.stack.items.len - 1], &self.stack.items[self.stack.items.len - 2]);
},
OP.rot => {
if (self.stack.items.len < 3) return error.InvalidExpression;
const first = self.stack.items[self.stack.items.len - 1];
self.stack.items[self.stack.items.len - 1] = self.stack.items[self.stack.items.len - 2];
self.stack.items[self.stack.items.len - 2] = self.stack.items[self.stack.items.len - 3];
self.stack.items[self.stack.items.len - 3] = first;
},
OP.deref,
OP.xderef,
OP.deref_size,
OP.xderef_size,
OP.deref_type,
OP.xderef_type,
=> {
if (self.stack.items.len == 0) return error.InvalidExpression;
const addr = try self.stack.items[self.stack.items.len - 1].asIntegral();
const addr_space_identifier: ?usize = switch (opcode) {
OP.xderef,
OP.xderef_size,
OP.xderef_type,
=> blk: {
_ = self.stack.pop();
if (self.stack.items.len == 0) return error.InvalidExpression;
break :blk try self.stack.items[self.stack.items.len - 1].asIntegral();
},
else => null,
};
// Usage of addr_space_identifier in the address calculation is implementation defined.
// This code will need to be updated to handle any architectures that utilize this.
_ = addr_space_identifier;
const size = switch (opcode) {
OP.deref,
OP.xderef,
=> @sizeOf(addr_type),
OP.deref_size,
OP.xderef_size,
=> operand.?.type_size,
OP.deref_type,
OP.xderef_type,
=> operand.?.deref_type.size,
else => unreachable,
};
if (context.memory_accessor) |memory_accessor| {
if (!switch (size) {
1 => memory_accessor.load(u8, addr) != null,
2 => memory_accessor.load(u16, addr) != null,
4 => memory_accessor.load(u32, addr) != null,
8 => memory_accessor.load(u64, addr) != null,
else => return error.InvalidExpression,
}) return error.InvalidExpression;
}
const value: addr_type = std.math.cast(addr_type, @as(u64, switch (size) {
1 => @as(*const u8, @ptrFromInt(addr)).*,
2 => @as(*const u16, @ptrFromInt(addr)).*,
4 => @as(*const u32, @ptrFromInt(addr)).*,
8 => @as(*const u64, @ptrFromInt(addr)).*,
else => return error.InvalidExpression,
})) orelse return error.InvalidExpression;
switch (opcode) {
OP.deref_type,
OP.xderef_type,
=> {
self.stack.items[self.stack.items.len - 1] = .{
.regval_type = .{
.type_offset = operand.?.deref_type.type_offset,
.type_size = operand.?.deref_type.size,
.value = value,
},
};
},
else => {
self.stack.items[self.stack.items.len - 1] = .{ .generic = value };
},
}
},
OP.push_object_address => {
// In sub-expressions, `push_object_address` is not meaningful (as per the
// spec), so treat it like a nop
if (!context.entry_value_context) {
if (context.object_address == null) return error.IncompleteExpressionContext;
try self.stack.append(allocator, .{ .generic = @intFromPtr(context.object_address.?) });
}
},
OP.form_tls_address => {
return error.UnimplementedOpcode;
},
OP.call_frame_cfa => {
if (context.cfa) |cfa| {
try self.stack.append(allocator, .{ .generic = cfa });
} else return error.IncompleteExpressionContext;
},
// 2.5.1.4: Arithmetic and Logical Operations
OP.abs => {
if (self.stack.items.len == 0) return error.InvalidExpression;
const value: isize = @bitCast(try self.stack.items[self.stack.items.len - 1].asIntegral());
self.stack.items[self.stack.items.len - 1] = .{
.generic = @abs(value),
};
},
OP.@"and" => {
if (self.stack.items.len < 2) return error.InvalidExpression;
const a = try self.stack.pop().?.asIntegral();
self.stack.items[self.stack.items.len - 1] = .{
.generic = a & try self.stack.items[self.stack.items.len - 1].asIntegral(),
};
},
OP.div => {
if (self.stack.items.len < 2) return error.InvalidExpression;
const a: isize = @bitCast(try self.stack.pop().?.asIntegral());
const b: isize = @bitCast(try self.stack.items[self.stack.items.len - 1].asIntegral());
self.stack.items[self.stack.items.len - 1] = .{
.generic = @bitCast(try std.math.divTrunc(isize, b, a)),
};
},
OP.minus => {
if (self.stack.items.len < 2) return error.InvalidExpression;
const b = try self.stack.pop().?.asIntegral();
self.stack.items[self.stack.items.len - 1] = .{
.generic = try std.math.sub(addr_type, try self.stack.items[self.stack.items.len - 1].asIntegral(), b),
};
},
OP.mod => {
if (self.stack.items.len < 2) return error.InvalidExpression;
const a: isize = @bitCast(try self.stack.pop().?.asIntegral());
const b: isize = @bitCast(try self.stack.items[self.stack.items.len - 1].asIntegral());
self.stack.items[self.stack.items.len - 1] = .{
.generic = @bitCast(@mod(b, a)),
};
},
OP.mul => {
if (self.stack.items.len < 2) return error.InvalidExpression;
const a: isize = @bitCast(try self.stack.pop().?.asIntegral());
const b: isize = @bitCast(try self.stack.items[self.stack.items.len - 1].asIntegral());
self.stack.items[self.stack.items.len - 1] = .{
.generic = @bitCast(@mulWithOverflow(a, b)[0]),
};
},
OP.neg => {
if (self.stack.items.len == 0) return error.InvalidExpression;
self.stack.items[self.stack.items.len - 1] = .{
.generic = @bitCast(
try std.math.negate(
@as(isize, @bitCast(try self.stack.items[self.stack.items.len - 1].asIntegral())),
),
),
};
},
OP.not => {
if (self.stack.items.len == 0) return error.InvalidExpression;
self.stack.items[self.stack.items.len - 1] = .{
.generic = ~try self.stack.items[self.stack.items.len - 1].asIntegral(),
};
},
OP.@"or" => {
if (self.stack.items.len < 2) return error.InvalidExpression;
const a = try self.stack.pop().?.asIntegral();
self.stack.items[self.stack.items.len - 1] = .{
.generic = a | try self.stack.items[self.stack.items.len - 1].asIntegral(),
};
},
OP.plus => {
if (self.stack.items.len < 2) return error.InvalidExpression;
const b = try self.stack.pop().?.asIntegral();
self.stack.items[self.stack.items.len - 1] = .{
.generic = try std.math.add(addr_type, try self.stack.items[self.stack.items.len - 1].asIntegral(), b),
};
},
OP.plus_uconst => {
if (self.stack.items.len == 0) return error.InvalidExpression;
const constant = operand.?.generic;
self.stack.items[self.stack.items.len - 1] = .{
.generic = try std.math.add(addr_type, try self.stack.items[self.stack.items.len - 1].asIntegral(), constant),
};
},
OP.shl => {
if (self.stack.items.len < 2) return error.InvalidExpression;
const a = try self.stack.pop().?.asIntegral();
const b = try self.stack.items[self.stack.items.len - 1].asIntegral();
self.stack.items[self.stack.items.len - 1] = .{
.generic = std.math.shl(usize, b, a),
};
},
OP.shr => {
if (self.stack.items.len < 2) return error.InvalidExpression;
const a = try self.stack.pop().?.asIntegral();
const b = try self.stack.items[self.stack.items.len - 1].asIntegral();
self.stack.items[self.stack.items.len - 1] = .{
.generic = std.math.shr(usize, b, a),
};
},
OP.shra => {
if (self.stack.items.len < 2) return error.InvalidExpression;
const a = try self.stack.pop().?.asIntegral();
const b: isize = @bitCast(try self.stack.items[self.stack.items.len - 1].asIntegral());
self.stack.items[self.stack.items.len - 1] = .{
.generic = @bitCast(std.math.shr(isize, b, a)),
};
},
OP.xor => {
if (self.stack.items.len < 2) return error.InvalidExpression;
const a = try self.stack.pop().?.asIntegral();
self.stack.items[self.stack.items.len - 1] = .{
.generic = a ^ try self.stack.items[self.stack.items.len - 1].asIntegral(),
};
},
// 2.5.1.5: Control Flow Operations
OP.le,
OP.ge,
OP.eq,
OP.lt,
OP.gt,
OP.ne,
=> {
if (self.stack.items.len < 2) return error.InvalidExpression;
const a = self.stack.pop().?;
const b = self.stack.items[self.stack.items.len - 1];
if (a == .generic and b == .generic) {
const a_int: isize = @bitCast(a.asIntegral() catch unreachable);
const b_int: isize = @bitCast(b.asIntegral() catch unreachable);
const result = @intFromBool(switch (opcode) {
OP.le => b_int <= a_int,
OP.ge => b_int >= a_int,
OP.eq => b_int == a_int,
OP.lt => b_int < a_int,
OP.gt => b_int > a_int,
OP.ne => b_int != a_int,
else => unreachable,
});
self.stack.items[self.stack.items.len - 1] = .{ .generic = result };
} else {
// TODO: Load the types referenced by these values, find their comparison operator, and run it
return error.UnimplementedTypedComparison;
}
},
OP.skip, OP.bra => {
const branch_offset = operand.?.branch_offset;
const condition = if (opcode == OP.bra) blk: {
if (self.stack.items.len == 0) return error.InvalidExpression;
break :blk try self.stack.pop().?.asIntegral() != 0;
} else true;
if (condition) {
const new_pos = std.math.cast(
usize,
try std.math.add(isize, @as(isize, @intCast(stream.pos)), branch_offset),
) orelse return error.InvalidExpression;
if (new_pos < 0 or new_pos > stream.buffer.len) return error.InvalidExpression;
stream.pos = new_pos;
}
},
OP.call2,
OP.call4,
OP.call_ref,
=> {
const debug_info_offset = operand.?.generic;
_ = debug_info_offset;
// TODO: Load a DIE entry at debug_info_offset in a .debug_info section (the spec says that it
// can be in a separate exe / shared object from the one containing this expression).
// Transfer control to the DW_AT_location attribute, with the current stack as input.
return error.UnimplementedExpressionCall;
},
// 2.5.1.6: Type Conversions
OP.convert => {
if (self.stack.items.len == 0) return error.InvalidExpression;
const type_offset = operand.?.generic;
// TODO: Load the DW_TAG_base_type entries in context.compile_unit and verify both types are the same size
const value = self.stack.items[self.stack.items.len - 1];
if (type_offset == 0) {
self.stack.items[self.stack.items.len - 1] = .{ .generic = try value.asIntegral() };
} else {
// TODO: Load the DW_TAG_base_type entry in context.compile_unit, find a conversion operator
// from the old type to the new type, run it.
return error.UnimplementedTypeConversion;
}
},
OP.reinterpret => {
if (self.stack.items.len == 0) return error.InvalidExpression;
const type_offset = operand.?.generic;
// TODO: Load the DW_TAG_base_type entries in context.compile_unit and verify both types are the same size
const value = self.stack.items[self.stack.items.len - 1];
if (type_offset == 0) {
self.stack.items[self.stack.items.len - 1] = .{ .generic = try value.asIntegral() };
} else {
self.stack.items[self.stack.items.len - 1] = switch (value) {
.generic => |v| .{
.regval_type = .{
.type_offset = type_offset,
.type_size = @sizeOf(addr_type),
.value = v,
},
},
.regval_type => |r| .{
.regval_type = .{
.type_offset = type_offset,
.type_size = r.type_size,
.value = r.value,
},
},
.const_type => |c| .{
.const_type = .{
.type_offset = type_offset,
.value_bytes = c.value_bytes,
},
},
};
}
},
// 2.5.1.7: Special Operations
OP.nop => {},
OP.entry_value => {
const block = operand.?.block;
if (block.len == 0) return error.InvalidSubExpression;
// TODO: The spec states that this sub-expression needs to observe the state (ie. registers)
// as it was upon entering the current subprogram. If this isn't being called at the
// end of a frame unwind operation, an additional ThreadContext with this state will be needed.
if (isOpcodeRegisterLocation(block[0])) {
if (context.thread_context == null) return error.IncompleteExpressionContext;
var block_stream = std.io.fixedBufferStream(block);
const register = (try readOperand(&block_stream, block[0], context)).?.register;
const value = mem.readInt(usize, (try abi.regBytes(context.thread_context.?, register, context.reg_context))[0..@sizeOf(usize)], native_endian);
try self.stack.append(allocator, .{ .generic = value });
} else {
var stack_machine: Self = .{};
defer stack_machine.deinit(allocator);
var sub_context = context;
sub_context.entry_value_context = true;
const result = try stack_machine.run(block, allocator, sub_context, null);
try self.stack.append(allocator, result orelse return error.InvalidSubExpression);
}
},
// These have already been handled by readOperand
OP.lo_user...OP.hi_user => unreachable,
else => {
//std.debug.print("Unknown DWARF expression opcode: {x}\n", .{opcode});
return error.UnknownExpressionOpcode;
},
}
return stream.pos < stream.buffer.len;
}