mirror of
https://github.com/ziglang/zig.git
synced 2024-11-15 08:33:06 +00:00
stage2: improved handling of store_to_block_ptr
* AstGen: remove the setBlockBodyEliding function. This is no longer
needed after 63788b2a51
.
* Sema: store_to_block_ptr instruction is handled as
store_to_inferred_ptr or store, as necessary.
This commit is contained in:
parent
5d30e8016d
commit
adb746a701
@ -1964,11 +1964,7 @@ fn labeledBlockExpr(
|
||||
},
|
||||
.break_operand => {
|
||||
// All break operands are values that did not use the result location pointer.
|
||||
if (strat.elide_store_to_block_ptr_instructions) {
|
||||
try block_scope.setBlockBodyEliding(block_inst);
|
||||
} else {
|
||||
try block_scope.setBlockBody(block_inst);
|
||||
}
|
||||
try block_scope.setBlockBody(block_inst);
|
||||
const block_ref = indexToRef(block_inst);
|
||||
switch (rl) {
|
||||
.ref => return block_ref,
|
||||
@ -9734,32 +9730,6 @@ const GenZir = struct {
|
||||
gz.unstack();
|
||||
}
|
||||
|
||||
/// Same as `setBlockBody` except we don't copy instructions which are
|
||||
/// `store_to_block_ptr` instructions with lhs set to .none.
|
||||
/// Assumes nothing stacked on `gz`. Unstacks `gz`.
|
||||
fn setBlockBodyEliding(gz: *GenZir, inst: Zir.Inst.Index) !void {
|
||||
const gpa = gz.astgen.gpa;
|
||||
const body = gz.instructionsSlice();
|
||||
try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Block).Struct.fields.len + body.len);
|
||||
const zir_datas = gz.astgen.instructions.items(.data);
|
||||
const zir_tags = gz.astgen.instructions.items(.tag);
|
||||
const block_pl_index = gz.astgen.addExtraAssumeCapacity(Zir.Inst.Block{
|
||||
.body_len = @intCast(u32, body.len),
|
||||
});
|
||||
zir_datas[inst].pl_node.payload_index = block_pl_index;
|
||||
for (body) |sub_inst| {
|
||||
if (zir_tags[sub_inst] == .store_to_block_ptr and
|
||||
zir_datas[sub_inst].bin.lhs == .none)
|
||||
{
|
||||
// Decrement `body_len`.
|
||||
gz.astgen.extra.items[block_pl_index] -= 1;
|
||||
continue;
|
||||
}
|
||||
gz.astgen.extra.appendAssumeCapacity(sub_inst);
|
||||
}
|
||||
gz.unstack();
|
||||
}
|
||||
|
||||
/// Supports `body_gz` stacked on `ret_gz` stacked on `gz`. Unstacks `body_gz` and `ret_gz`.
|
||||
fn addFunc(gz: *GenZir, args: struct {
|
||||
src_node: Ast.Node.Index,
|
||||
|
124
src/Sema.zig
124
src/Sema.zig
@ -3247,22 +3247,31 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
|
||||
defer tracy.end();
|
||||
|
||||
const bin_inst = sema.code.instructions.items(.data)[inst].bin;
|
||||
const ptr = sema.inst_map.get(@enumToInt(bin_inst.lhs) - @as(u32, Zir.Inst.Ref.typed_value_map.len)) orelse {
|
||||
const ptr = sema.inst_map.get(Zir.refToIndex(bin_inst.lhs).?) orelse {
|
||||
// This is an elided instruction, but AstGen was unable to omit it.
|
||||
return;
|
||||
};
|
||||
const value = sema.resolveInst(bin_inst.rhs);
|
||||
const ptr_ty = try Type.ptr(sema.arena, .{
|
||||
.pointee_type = sema.typeOf(value),
|
||||
// TODO figure out which address space is appropriate here
|
||||
.@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
|
||||
});
|
||||
// TODO detect when this store should be done at compile-time. For example,
|
||||
// if expressions should force it when the condition is compile-time known.
|
||||
const src: LazySrcLoc = .unneeded;
|
||||
try sema.requireRuntimeBlock(block, src);
|
||||
const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr);
|
||||
return sema.storePtr(block, src, bitcasted_ptr, value);
|
||||
const operand = sema.resolveInst(bin_inst.rhs);
|
||||
const src: LazySrcLoc = sema.src;
|
||||
blk: {
|
||||
const ptr_inst = Air.refToIndex(ptr) orelse break :blk;
|
||||
if (sema.air_instructions.items(.tag)[ptr_inst] != .constant) break :blk;
|
||||
const air_datas = sema.air_instructions.items(.data);
|
||||
const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload];
|
||||
switch (ptr_val.tag()) {
|
||||
.inferred_alloc_comptime => {
|
||||
const iac = ptr_val.castTag(.inferred_alloc_comptime).?;
|
||||
return sema.storeToInferredAllocComptime(block, src, operand, iac);
|
||||
},
|
||||
.inferred_alloc => {
|
||||
const inferred_alloc = ptr_val.castTag(.inferred_alloc).?;
|
||||
return sema.storeToInferredAlloc(block, src, ptr, operand, inferred_alloc);
|
||||
},
|
||||
else => break :blk,
|
||||
}
|
||||
}
|
||||
|
||||
return sema.storePtr(block, src, ptr, operand);
|
||||
}
|
||||
|
||||
fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
||||
@ -3273,46 +3282,71 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi
|
||||
const bin_inst = sema.code.instructions.items(.data)[inst].bin;
|
||||
const ptr = sema.resolveInst(bin_inst.lhs);
|
||||
const operand = sema.resolveInst(bin_inst.rhs);
|
||||
const operand_ty = sema.typeOf(operand);
|
||||
const ptr_inst = Air.refToIndex(ptr).?;
|
||||
assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant);
|
||||
const air_datas = sema.air_instructions.items(.data);
|
||||
const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload];
|
||||
|
||||
if (ptr_val.castTag(.inferred_alloc_comptime)) |iac| {
|
||||
// There will be only one store_to_inferred_ptr because we are running at comptime.
|
||||
// The alloc will turn into a Decl.
|
||||
if (try sema.resolveMaybeUndefValAllowVariables(block, src, operand)) |operand_val| {
|
||||
if (operand_val.tag() == .variable) {
|
||||
return sema.failWithNeededComptime(block, src);
|
||||
}
|
||||
var anon_decl = try block.startAnonDecl(src);
|
||||
defer anon_decl.deinit();
|
||||
iac.data.decl = try anon_decl.finish(
|
||||
try operand_ty.copy(anon_decl.arena()),
|
||||
try operand_val.copy(anon_decl.arena()),
|
||||
);
|
||||
// TODO set the alignment on the decl
|
||||
return;
|
||||
} else {
|
||||
switch (ptr_val.tag()) {
|
||||
.inferred_alloc_comptime => {
|
||||
const iac = ptr_val.castTag(.inferred_alloc_comptime).?;
|
||||
return sema.storeToInferredAllocComptime(block, src, operand, iac);
|
||||
},
|
||||
.inferred_alloc => {
|
||||
const inferred_alloc = ptr_val.castTag(.inferred_alloc).?;
|
||||
return sema.storeToInferredAlloc(block, src, ptr, operand, inferred_alloc);
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn storeToInferredAlloc(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
src: LazySrcLoc,
|
||||
ptr: Air.Inst.Ref,
|
||||
operand: Air.Inst.Ref,
|
||||
inferred_alloc: *Value.Payload.InferredAlloc,
|
||||
) CompileError!void {
|
||||
const operand_ty = sema.typeOf(operand);
|
||||
// Add the stored instruction to the set we will use to resolve peer types
|
||||
// for the inferred allocation.
|
||||
try inferred_alloc.data.stored_inst_list.append(sema.arena, operand);
|
||||
// Create a runtime bitcast instruction with exactly the type the pointer wants.
|
||||
const ptr_ty = try Type.ptr(sema.arena, .{
|
||||
.pointee_type = operand_ty,
|
||||
.@"align" = inferred_alloc.data.alignment,
|
||||
.@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
|
||||
});
|
||||
const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr);
|
||||
return sema.storePtr(block, src, bitcasted_ptr, operand);
|
||||
}
|
||||
|
||||
fn storeToInferredAllocComptime(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
src: LazySrcLoc,
|
||||
operand: Air.Inst.Ref,
|
||||
iac: *Value.Payload.InferredAllocComptime,
|
||||
) CompileError!void {
|
||||
const operand_ty = sema.typeOf(operand);
|
||||
// There will be only one store_to_inferred_ptr because we are running at comptime.
|
||||
// The alloc will turn into a Decl.
|
||||
if (try sema.resolveMaybeUndefValAllowVariables(block, src, operand)) |operand_val| {
|
||||
if (operand_val.tag() == .variable) {
|
||||
return sema.failWithNeededComptime(block, src);
|
||||
}
|
||||
var anon_decl = try block.startAnonDecl(src);
|
||||
defer anon_decl.deinit();
|
||||
iac.data.decl = try anon_decl.finish(
|
||||
try operand_ty.copy(anon_decl.arena()),
|
||||
try operand_val.copy(anon_decl.arena()),
|
||||
);
|
||||
// TODO set the alignment on the decl
|
||||
return;
|
||||
} else {
|
||||
return sema.failWithNeededComptime(block, src);
|
||||
}
|
||||
|
||||
if (ptr_val.castTag(.inferred_alloc)) |inferred_alloc| {
|
||||
// Add the stored instruction to the set we will use to resolve peer types
|
||||
// for the inferred allocation.
|
||||
try inferred_alloc.data.stored_inst_list.append(sema.arena, operand);
|
||||
// Create a runtime bitcast instruction with exactly the type the pointer wants.
|
||||
const ptr_ty = try Type.ptr(sema.arena, .{
|
||||
.pointee_type = operand_ty,
|
||||
.@"align" = inferred_alloc.data.alignment,
|
||||
.@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
|
||||
});
|
||||
const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr);
|
||||
return sema.storePtr(block, src, bitcasted_ptr, operand);
|
||||
}
|
||||
unreachable;
|
||||
}
|
||||
|
||||
fn zirSetEvalBranchQuota(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
|
||||
|
13
src/Zir.zig
13
src/Zir.zig
@ -518,11 +518,14 @@ pub const Inst = struct {
|
||||
/// Same as `store` except provides a source location.
|
||||
/// Uses the `pl_node` union field. Payload is `Bin`.
|
||||
store_node,
|
||||
/// Same as `store` but the type of the value being stored will be used to infer
|
||||
/// the block type. The LHS is the pointer to store to.
|
||||
/// Uses the `bin` union field.
|
||||
/// If the pointer is none, it means this instruction has been elided in
|
||||
/// AstGen, but AstGen was unable to actually omit it from the ZIR code.
|
||||
/// This instruction is not really supposed to be emitted from AstGen; nevetheless it
|
||||
/// is sometimes emitted due to deficiencies in AstGen. When Sema sees this instruction,
|
||||
/// it must clean up after AstGen's mess by looking at various context clues and
|
||||
/// then treating it as one of the following:
|
||||
/// * no-op
|
||||
/// * store_to_inferred_ptr
|
||||
/// * store
|
||||
/// Uses the `bin` union field with LHS as the pointer to store to.
|
||||
store_to_block_ptr,
|
||||
/// Same as `store` but the type of the value being stored will be used to infer
|
||||
/// the pointer type.
|
||||
|
@ -124,6 +124,7 @@ test {
|
||||
_ = @import("behavior/bugs/421.zig");
|
||||
_ = @import("behavior/bugs/726.zig");
|
||||
_ = @import("behavior/bugs/1421.zig");
|
||||
_ = @import("behavior/bugs/1442.zig");
|
||||
_ = @import("behavior/bugs/2114.zig");
|
||||
_ = @import("behavior/bugs/3742.zig");
|
||||
_ = @import("behavior/struct_contains_null_ptr_itself.zig");
|
||||
@ -144,7 +145,6 @@ test {
|
||||
_ = @import("behavior/bugs/828.zig");
|
||||
_ = @import("behavior/bugs/920.zig");
|
||||
_ = @import("behavior/bugs/1120.zig");
|
||||
_ = @import("behavior/bugs/1442.zig");
|
||||
_ = @import("behavior/bugs/1607.zig");
|
||||
_ = @import("behavior/bugs/1851.zig");
|
||||
_ = @import("behavior/bugs/3384.zig");
|
||||
|
@ -7,8 +7,6 @@ const Union = union(enum) {
|
||||
};
|
||||
|
||||
test "const error union field alignment" {
|
||||
if (builtin.zig_backend != .stage1) return error.SkipZigTest;
|
||||
|
||||
var union_or_err: anyerror!Union = Union{ .Color = 1234 };
|
||||
try std.testing.expect((union_or_err catch unreachable).Color == 1234);
|
||||
}
|
||||
|
@ -495,7 +495,7 @@ test "@tagName of @typeInfo" {
|
||||
}
|
||||
|
||||
test "static eval list init" {
|
||||
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
|
||||
try expect(static_vec3.data[2] == 1.0);
|
||||
try expect(vec3(0.0, 0.0, 3.0).data[2] == 3.0);
|
||||
@ -511,8 +511,6 @@ pub fn vec3(x: f32, y: f32, z: f32) Vec3 {
|
||||
}
|
||||
|
||||
test "inlined loop has array literal with elided runtime scope on first iteration but not second iteration" {
|
||||
if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO
|
||||
|
||||
var runtime = [1]i32{3};
|
||||
comptime var i: usize = 0;
|
||||
inline while (i < 2) : (i += 1) {
|
||||
|
Loading…
Reference in New Issue
Block a user