ptrCast
Handle one or more nested pointer cast builtins:
- @ptrCast
- @alignCast
- @addrSpaceCast
- @constCast
- @volatileCast
Any sequence of such builtins is treated as a single operation. This allowed
for sequences like
@ptrCast(@alignCast(ptr))to work correctly despite the intermediate result type being unknown.
Function parameters
Parameters
- gz:*GenZir
- scope:*Scope
- root_node:Ast.Node.Index
Functions in this namespace
Functions
Source
Implementation
fn ptrCast(
gz: *GenZir,
scope: *Scope,
ri: ResultInfo,
root_node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const tree = astgen.tree;
const FlagsInt = @typeInfo(Zir.Inst.FullPtrCastFlags).@"struct".backing_integer.?;
var flags: Zir.Inst.FullPtrCastFlags = .{};
// Note that all pointer cast builtins have one parameter, so we only need
// to handle `builtin_call_two`.
var node = root_node;
while (true) {
switch (tree.nodeTag(node)) {
.builtin_call_two, .builtin_call_two_comma => {},
.grouped_expression => {
// Handle the chaining even with redundant parentheses
node = tree.nodeData(node).node_and_token[0];
continue;
},
else => break,
}
var buf: [2]Ast.Node.Index = undefined;
const args = tree.builtinCallParams(&buf, node).?;
std.debug.assert(args.len <= 2);
if (args.len == 0) break; // 0 args
const builtin_token = tree.nodeMainToken(node);
const builtin_name = tree.tokenSlice(builtin_token);
const info = BuiltinFn.list.get(builtin_name) orelse break;
if (args.len == 1) {
if (info.param_count != 1) break;
switch (info.tag) {
else => break,
inline .ptr_cast,
.align_cast,
.addrspace_cast,
.const_cast,
.volatile_cast,
=> |tag| {
if (@field(flags, @tagName(tag))) {
return astgen.failNode(node, "redundant {s}", .{builtin_name});
}
@field(flags, @tagName(tag)) = true;
},
}
node = args[0];
} else {
std.debug.assert(args.len == 2);
if (info.param_count != 2) break;
switch (info.tag) {
else => break,
.field_parent_ptr => {
if (flags.ptr_cast) break;
const flags_int: FlagsInt = @bitCast(flags);
const cursor = maybeAdvanceSourceCursorToMainToken(gz, root_node);
const parent_ptr_type = try ri.rl.resultTypeForCast(gz, root_node, "@alignCast");
const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, args[0], .field_name);
const field_ptr = try expr(gz, scope, .{ .rl = .none }, args[1]);
try emitDbgStmt(gz, cursor);
const result = try gz.addExtendedPayloadSmall(.field_parent_ptr, flags_int, Zir.Inst.FieldParentPtr{
.src_node = gz.nodeIndexToRelative(node),
.parent_ptr_type = parent_ptr_type,
.field_name = field_name,
.field_ptr = field_ptr,
});
return rvalue(gz, ri, result, root_node);
},
}
}
}
const flags_int: FlagsInt = @bitCast(flags);
assert(flags_int != 0);
const ptr_only: Zir.Inst.FullPtrCastFlags = .{ .ptr_cast = true };
if (flags_int == @as(FlagsInt, @bitCast(ptr_only))) {
// Special case: simpler representation
return typeCast(gz, scope, ri, root_node, node, .ptr_cast, "@ptrCast");
}
const no_result_ty_flags: Zir.Inst.FullPtrCastFlags = .{
.const_cast = true,
.volatile_cast = true,
};
if ((flags_int & ~@as(FlagsInt, @bitCast(no_result_ty_flags))) == 0) {
// Result type not needed
const cursor = maybeAdvanceSourceCursorToMainToken(gz, root_node);
const operand = try expr(gz, scope, .{ .rl = .none }, node);
try emitDbgStmt(gz, cursor);
const result = try gz.addExtendedPayloadSmall(.ptr_cast_no_dest, flags_int, Zir.Inst.UnNode{
.node = gz.nodeIndexToRelative(root_node),
.operand = operand,
});
return rvalue(gz, ri, result, root_node);
}
// Full cast including result type
const cursor = maybeAdvanceSourceCursorToMainToken(gz, root_node);
const result_type = try ri.rl.resultTypeForCast(gz, root_node, flags.needResultTypeBuiltinName());
const operand = try expr(gz, scope, .{ .rl = .none }, node);
try emitDbgStmt(gz, cursor);
const result = try gz.addExtendedPayloadSmall(.ptr_cast_full, flags_int, Zir.Inst.BinNode{
.node = gz.nodeIndexToRelative(root_node),
.lhs = result_type,
.rhs = operand,
});
return rvalue(gz, ri, result, root_node);
}