stage2: use indexes for Decl objects

Rather than allocating Decl objects with an Allocator, we instead allocate
them with a SegmentedList. This provides four advantages:
 * Stable memory so that one thread can access a Decl object while another
   thread allocates additional Decl objects from this list.
 * It allows us to use u32 indexes to reference Decl objects rather than
   pointers, saving memory in Type, Value, and dependency sets.
 * Using integers to reference Decl objects rather than pointers makes
   serialization trivial.
 * It provides a unique integer to be used for anonymous symbol names,
   avoiding multi-threaded contention on an atomic counter.
This commit is contained in:
Andrew Kelley 2022-04-19 21:51:08 -07:00
parent 4f527e5d36
commit f7596ae942
31 changed files with 2584 additions and 2181 deletions

View File

@ -148,6 +148,24 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
return result;
}
/// Reduce length to `new_len`.
/// Invalidates pointers for the elements at index new_len and beyond.
pub fn shrinkRetainingCapacity(self: *Self, new_len: usize) void {
assert(new_len <= self.len);
self.len = new_len;
}
/// Invalidates all element pointers.
pub fn clearRetainingCapacity(self: *Self) void {
self.items.len = 0;
}
/// Invalidates all element pointers.
pub fn clearAndFree(self: *Self, allocator: Allocator) void {
self.setCapacity(allocator, 0) catch unreachable;
self.items.len = 0;
}
/// Grows or shrinks capacity to match usage.
/// TODO update this and related methods to match the conventions set by ArrayList
pub fn setCapacity(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void {

View File

@ -191,22 +191,22 @@ pub const CSourceFile = struct {
const Job = union(enum) {
/// Write the constant value for a Decl to the output file.
codegen_decl: *Module.Decl,
codegen_decl: Module.Decl.Index,
/// Write the machine code for a function to the output file.
codegen_func: *Module.Fn,
/// Render the .h file snippet for the Decl.
emit_h_decl: *Module.Decl,
emit_h_decl: Module.Decl.Index,
/// The Decl needs to be analyzed and possibly export itself.
/// It may have already be analyzed, or it may have been determined
/// to be outdated; in this case perform semantic analysis again.
analyze_decl: *Module.Decl,
analyze_decl: Module.Decl.Index,
/// The file that was loaded with `@embedFile` has changed on disk
/// and has been re-loaded into memory. All Decls that depend on it
/// need to be re-analyzed.
update_embed_file: *Module.EmbedFile,
/// The source file containing the Decl has been updated, and so the
/// Decl may need its line number information updated in the debug info.
update_line_number: *Module.Decl,
update_line_number: Module.Decl.Index,
/// The main source file for the package needs to be analyzed.
analyze_pkg: *Package,
@ -2105,17 +2105,18 @@ pub fn update(comp: *Compilation) !void {
// deletion set may grow as we call `clearDecl` within this loop,
// and more unreferenced Decls are revealed.
while (module.deletion_set.count() != 0) {
const decl = module.deletion_set.keys()[0];
const decl_index = module.deletion_set.keys()[0];
const decl = module.declPtr(decl_index);
assert(decl.deletion_flag);
assert(decl.dependants.count() == 0);
const is_anon = if (decl.zir_decl_index == 0) blk: {
break :blk decl.src_namespace.anon_decls.swapRemove(decl);
break :blk decl.src_namespace.anon_decls.swapRemove(decl_index);
} else false;
try module.clearDecl(decl, null);
try module.clearDecl(decl_index, null);
if (is_anon) {
decl.destroy(module);
module.destroyDecl(decl_index);
}
}
@ -2444,13 +2445,15 @@ pub fn totalErrorCount(self: *Compilation) usize {
// the previous parse success, including compile errors, but we cannot
// emit them until the file succeeds parsing.
for (module.failed_decls.keys()) |key| {
if (key.getFileScope().okToReportErrors()) {
const decl = module.declPtr(key);
if (decl.getFileScope().okToReportErrors()) {
total += 1;
}
}
if (module.emit_h) |emit_h| {
for (emit_h.failed_decls.keys()) |key| {
if (key.getFileScope().okToReportErrors()) {
const decl = module.declPtr(key);
if (decl.getFileScope().okToReportErrors()) {
total += 1;
}
}
@ -2529,9 +2532,10 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
{
var it = module.failed_decls.iterator();
while (it.next()) |entry| {
const decl = module.declPtr(entry.key_ptr.*);
// Skip errors for Decls within files that had a parse failure.
// We'll try again once parsing succeeds.
if (entry.key_ptr.*.getFileScope().okToReportErrors()) {
if (decl.getFileScope().okToReportErrors()) {
try AllErrors.add(module, &arena, &errors, entry.value_ptr.*.*);
}
}
@ -2539,9 +2543,10 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
if (module.emit_h) |emit_h| {
var it = emit_h.failed_decls.iterator();
while (it.next()) |entry| {
const decl = module.declPtr(entry.key_ptr.*);
// Skip errors for Decls within files that had a parse failure.
// We'll try again once parsing succeeds.
if (entry.key_ptr.*.getFileScope().okToReportErrors()) {
if (decl.getFileScope().okToReportErrors()) {
try AllErrors.add(module, &arena, &errors, entry.value_ptr.*.*);
}
}
@ -2564,7 +2569,8 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
const keys = module.compile_log_decls.keys();
const values = module.compile_log_decls.values();
// First one will be the error; subsequent ones will be notes.
const src_loc = keys[0].nodeOffsetSrcLoc(values[0]);
const err_decl = module.declPtr(keys[0]);
const src_loc = err_decl.nodeOffsetSrcLoc(values[0]);
const err_msg = Module.ErrorMsg{
.src_loc = src_loc,
.msg = "found compile log statement",
@ -2573,8 +2579,9 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
defer self.gpa.free(err_msg.notes);
for (keys[1..]) |key, i| {
const note_decl = module.declPtr(key);
err_msg.notes[i] = .{
.src_loc = key.nodeOffsetSrcLoc(values[i + 1]),
.src_loc = note_decl.nodeOffsetSrcLoc(values[i + 1]),
.msg = "also here",
};
}
@ -2708,38 +2715,42 @@ pub fn performAllTheWork(
fn processOneJob(comp: *Compilation, job: Job) !void {
switch (job) {
.codegen_decl => |decl| switch (decl.analysis) {
.unreferenced => unreachable,
.in_progress => unreachable,
.outdated => unreachable,
.codegen_decl => |decl_index| {
if (build_options.omit_stage2)
@panic("sadly stage2 is omitted from this build to save memory on the CI server");
.file_failure,
.sema_failure,
.codegen_failure,
.dependency_failure,
.sema_failure_retryable,
=> return,
const module = comp.bin_file.options.module.?;
const decl = module.declPtr(decl_index);
.complete, .codegen_failure_retryable => {
if (build_options.omit_stage2)
@panic("sadly stage2 is omitted from this build to save memory on the CI server");
switch (decl.analysis) {
.unreferenced => unreachable,
.in_progress => unreachable,
.outdated => unreachable,
const named_frame = tracy.namedFrame("codegen_decl");
defer named_frame.end();
.file_failure,
.sema_failure,
.codegen_failure,
.dependency_failure,
.sema_failure_retryable,
=> return,
const module = comp.bin_file.options.module.?;
assert(decl.has_tv);
.complete, .codegen_failure_retryable => {
const named_frame = tracy.namedFrame("codegen_decl");
defer named_frame.end();
if (decl.alive) {
try module.linkerUpdateDecl(decl);
assert(decl.has_tv);
if (decl.alive) {
try module.linkerUpdateDecl(decl_index);
return;
}
// Instead of sending this decl to the linker, we actually will delete it
// because we found out that it in fact was never referenced.
module.deleteUnusedDecl(decl_index);
return;
}
// Instead of sending this decl to the linker, we actually will delete it
// because we found out that it in fact was never referenced.
module.deleteUnusedDecl(decl);
return;
},
},
}
},
.codegen_func => |func| {
if (build_options.omit_stage2)
@ -2754,68 +2765,73 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
error.AnalysisFail => return,
};
},
.emit_h_decl => |decl| switch (decl.analysis) {
.unreferenced => unreachable,
.in_progress => unreachable,
.outdated => unreachable,
.file_failure,
.sema_failure,
.dependency_failure,
.sema_failure_retryable,
=> return,
// emit-h only requires semantic analysis of the Decl to be complete,
// it does not depend on machine code generation to succeed.
.codegen_failure, .codegen_failure_retryable, .complete => {
if (build_options.omit_stage2)
@panic("sadly stage2 is omitted from this build to save memory on the CI server");
const named_frame = tracy.namedFrame("emit_h_decl");
defer named_frame.end();
const gpa = comp.gpa;
const module = comp.bin_file.options.module.?;
const emit_h = module.emit_h.?;
_ = try emit_h.decl_table.getOrPut(gpa, decl);
const decl_emit_h = decl.getEmitH(module);
const fwd_decl = &decl_emit_h.fwd_decl;
fwd_decl.shrinkRetainingCapacity(0);
var typedefs_arena = std.heap.ArenaAllocator.init(gpa);
defer typedefs_arena.deinit();
var dg: c_codegen.DeclGen = .{
.gpa = gpa,
.module = module,
.error_msg = null,
.decl = decl,
.fwd_decl = fwd_decl.toManaged(gpa),
.typedefs = c_codegen.TypedefMap.initContext(gpa, .{
.target = comp.getTarget(),
}),
.typedefs_arena = typedefs_arena.allocator(),
};
defer dg.fwd_decl.deinit();
defer dg.typedefs.deinit();
c_codegen.genHeader(&dg) catch |err| switch (err) {
error.AnalysisFail => {
try emit_h.failed_decls.put(gpa, decl, dg.error_msg.?);
return;
},
else => |e| return e,
};
fwd_decl.* = dg.fwd_decl.moveToUnmanaged();
fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len);
},
},
.analyze_decl => |decl| {
.emit_h_decl => |decl_index| {
if (build_options.omit_stage2)
@panic("sadly stage2 is omitted from this build to save memory on the CI server");
const module = comp.bin_file.options.module.?;
module.ensureDeclAnalyzed(decl) catch |err| switch (err) {
const decl = module.declPtr(decl_index);
switch (decl.analysis) {
.unreferenced => unreachable,
.in_progress => unreachable,
.outdated => unreachable,
.file_failure,
.sema_failure,
.dependency_failure,
.sema_failure_retryable,
=> return,
// emit-h only requires semantic analysis of the Decl to be complete,
// it does not depend on machine code generation to succeed.
.codegen_failure, .codegen_failure_retryable, .complete => {
const named_frame = tracy.namedFrame("emit_h_decl");
defer named_frame.end();
const gpa = comp.gpa;
const emit_h = module.emit_h.?;
_ = try emit_h.decl_table.getOrPut(gpa, decl_index);
const decl_emit_h = emit_h.declPtr(decl_index);
const fwd_decl = &decl_emit_h.fwd_decl;
fwd_decl.shrinkRetainingCapacity(0);
var typedefs_arena = std.heap.ArenaAllocator.init(gpa);
defer typedefs_arena.deinit();
var dg: c_codegen.DeclGen = .{
.gpa = gpa,
.module = module,
.error_msg = null,
.decl_index = decl_index,
.decl = decl,
.fwd_decl = fwd_decl.toManaged(gpa),
.typedefs = c_codegen.TypedefMap.initContext(gpa, .{
.mod = module,
}),
.typedefs_arena = typedefs_arena.allocator(),
};
defer dg.fwd_decl.deinit();
defer dg.typedefs.deinit();
c_codegen.genHeader(&dg) catch |err| switch (err) {
error.AnalysisFail => {
try emit_h.failed_decls.put(gpa, decl_index, dg.error_msg.?);
return;
},
else => |e| return e,
};
fwd_decl.* = dg.fwd_decl.moveToUnmanaged();
fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len);
},
}
},
.analyze_decl => |decl_index| {
if (build_options.omit_stage2)
@panic("sadly stage2 is omitted from this build to save memory on the CI server");
const module = comp.bin_file.options.module.?;
module.ensureDeclAnalyzed(decl_index) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => return,
};
@ -2833,7 +2849,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
error.AnalysisFail => return,
};
},
.update_line_number => |decl| {
.update_line_number => |decl_index| {
if (build_options.omit_stage2)
@panic("sadly stage2 is omitted from this build to save memory on the CI server");
@ -2842,9 +2858,10 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const gpa = comp.gpa;
const module = comp.bin_file.options.module.?;
const decl = module.declPtr(decl_index);
comp.bin_file.updateDeclLineNumber(module, decl) catch |err| {
try module.failed_decls.ensureUnusedCapacity(gpa, 1);
module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create(
module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create(
gpa,
decl.srcLoc(),
"unable to update line number: {s}",
@ -3472,7 +3489,7 @@ fn reportRetryableEmbedFileError(
const mod = comp.bin_file.options.module.?;
const gpa = mod.gpa;
const src_loc: Module.SrcLoc = embed_file.owner_decl.srcLoc();
const src_loc: Module.SrcLoc = mod.declPtr(embed_file.owner_decl).srcLoc();
const err_msg = if (embed_file.pkg.root_src_directory.path) |dir_path|
try Module.ErrorMsg.create(

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +1,14 @@
const std = @import("std");
const Order = std.math.Order;
const RangeSet = @This();
const Module = @import("Module.zig");
const SwitchProngSrc = @import("Module.zig").SwitchProngSrc;
const Type = @import("type.zig").Type;
const Value = @import("value.zig").Value;
const RangeSet = @This();
const SwitchProngSrc = @import("Module.zig").SwitchProngSrc;
ranges: std.ArrayList(Range),
target: std.Target,
module: *Module,
pub const Range = struct {
first: Value,
@ -14,10 +16,10 @@ pub const Range = struct {
src: SwitchProngSrc,
};
pub fn init(allocator: std.mem.Allocator, target: std.Target) RangeSet {
pub fn init(allocator: std.mem.Allocator, module: *Module) RangeSet {
return .{
.ranges = std.ArrayList(Range).init(allocator),
.target = target,
.module = module,
};
}
@ -32,11 +34,9 @@ pub fn add(
ty: Type,
src: SwitchProngSrc,
) !?SwitchProngSrc {
const target = self.target;
for (self.ranges.items) |range| {
if (last.compare(.gte, range.first, ty, target) and
first.compare(.lte, range.last, ty, target))
if (last.compare(.gte, range.first, ty, self.module) and
first.compare(.lte, range.last, ty, self.module))
{
return range.src; // They overlap.
}
@ -49,26 +49,24 @@ pub fn add(
return null;
}
const LessThanContext = struct { ty: Type, target: std.Target };
const LessThanContext = struct { ty: Type, module: *Module };
/// Assumes a and b do not overlap
fn lessThan(ctx: LessThanContext, a: Range, b: Range) bool {
return a.first.compare(.lt, b.first, ctx.ty, ctx.target);
return a.first.compare(.lt, b.first, ctx.ty, ctx.module);
}
pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool {
if (self.ranges.items.len == 0)
return false;
const target = self.target;
std.sort.sort(Range, self.ranges.items, LessThanContext{
.ty = ty,
.target = target,
.module = self.module,
}, lessThan);
if (!self.ranges.items[0].first.eql(first, ty, target) or
!self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, target))
if (!self.ranges.items[0].first.eql(first, ty, self.module) or
!self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, self.module))
{
return false;
}
@ -78,6 +76,8 @@ pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool {
var counter = try std.math.big.int.Managed.init(self.ranges.allocator);
defer counter.deinit();
const target = self.module.getTarget();
// look for gaps
for (self.ranges.items[1..]) |cur, i| {
// i starts counting from the second item.

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,7 @@
const std = @import("std");
const Type = @import("type.zig").Type;
const Value = @import("value.zig").Value;
const Module = @import("Module.zig");
const Allocator = std.mem.Allocator;
const TypedValue = @This();
const Target = std.Target;
@ -31,13 +32,13 @@ pub fn copy(self: TypedValue, arena: Allocator) error{OutOfMemory}!TypedValue {
};
}
pub fn eql(a: TypedValue, b: TypedValue, target: std.Target) bool {
if (!a.ty.eql(b.ty, target)) return false;
return a.val.eql(b.val, a.ty, target);
pub fn eql(a: TypedValue, b: TypedValue, mod: *Module) bool {
if (!a.ty.eql(b.ty, mod)) return false;
return a.val.eql(b.val, a.ty, mod);
}
pub fn hash(tv: TypedValue, hasher: *std.hash.Wyhash, target: std.Target) void {
return tv.val.hash(tv.ty, hasher, target);
pub fn hash(tv: TypedValue, hasher: *std.hash.Wyhash, mod: *Module) void {
return tv.val.hash(tv.ty, hasher, mod);
}
pub fn enumToInt(tv: TypedValue, buffer: *Value.Payload.U64) Value {
@ -48,7 +49,7 @@ const max_aggregate_items = 100;
const FormatContext = struct {
tv: TypedValue,
target: Target,
mod: *Module,
};
pub fn format(
@ -59,7 +60,7 @@ pub fn format(
) !void {
_ = options;
comptime std.debug.assert(fmt.len == 0);
return ctx.tv.print(writer, 3, ctx.target);
return ctx.tv.print(writer, 3, ctx.mod);
}
/// Prints the Value according to the Type, not according to the Value Tag.
@ -67,8 +68,9 @@ pub fn print(
tv: TypedValue,
writer: anytype,
level: u8,
target: std.Target,
mod: *Module,
) @TypeOf(writer).Error!void {
const target = mod.getTarget();
var val = tv.val;
var ty = tv.ty;
while (true) switch (val.tag()) {
@ -156,7 +158,7 @@ pub fn print(
try print(.{
.ty = fields[i].ty,
.val = vals[i],
}, writer, level - 1, target);
}, writer, level - 1, mod);
}
return writer.writeAll(" }");
} else {
@ -170,7 +172,7 @@ pub fn print(
try print(.{
.ty = elem_ty,
.val = vals[i],
}, writer, level - 1, target);
}, writer, level - 1, mod);
}
return writer.writeAll(" }");
}
@ -185,12 +187,12 @@ pub fn print(
try print(.{
.ty = ty.unionTagType().?,
.val = union_val.tag,
}, writer, level - 1, target);
}, writer, level - 1, mod);
try writer.writeAll(" = ");
try print(.{
.ty = ty.unionFieldType(union_val.tag, target),
.ty = ty.unionFieldType(union_val.tag, mod),
.val = union_val.val,
}, writer, level - 1, target);
}, writer, level - 1, mod);
return writer.writeAll(" }");
},
@ -205,7 +207,7 @@ pub fn print(
},
.bool_true => return writer.writeAll("true"),
.bool_false => return writer.writeAll("false"),
.ty => return val.castTag(.ty).?.data.print(writer, target),
.ty => return val.castTag(.ty).?.data.print(writer, mod),
.int_type => {
const int_type = val.castTag(.int_type).?.data;
return writer.print("{s}{d}", .{
@ -222,28 +224,32 @@ pub fn print(
const x = sub_ty.abiAlignment(target);
return writer.print("{d}", .{x});
},
.function => return writer.print("(function '{s}')", .{val.castTag(.function).?.data.owner_decl.name}),
.function => return writer.print("(function '{s}')", .{
mod.declPtr(val.castTag(.function).?.data.owner_decl).name,
}),
.extern_fn => return writer.writeAll("(extern function)"),
.variable => return writer.writeAll("(variable)"),
.decl_ref_mut => {
const decl = val.castTag(.decl_ref_mut).?.data.decl;
const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index;
const decl = mod.declPtr(decl_index);
if (level == 0) {
return writer.print("(decl ref mut '{s}')", .{decl.name});
}
return print(.{
.ty = decl.ty,
.val = decl.val,
}, writer, level - 1, target);
}, writer, level - 1, mod);
},
.decl_ref => {
const decl = val.castTag(.decl_ref).?.data;
const decl_index = val.castTag(.decl_ref).?.data;
const decl = mod.declPtr(decl_index);
if (level == 0) {
return writer.print("(decl ref '{s}')", .{decl.name});
}
return print(.{
.ty = decl.ty,
.val = decl.val,
}, writer, level - 1, target);
}, writer, level - 1, mod);
},
.elem_ptr => {
const elem_ptr = val.castTag(.elem_ptr).?.data;
@ -251,7 +257,7 @@ pub fn print(
try print(.{
.ty = elem_ptr.elem_ty,
.val = elem_ptr.array_ptr,
}, writer, level - 1, target);
}, writer, level - 1, mod);
return writer.print("[{}]", .{elem_ptr.index});
},
.field_ptr => {
@ -260,7 +266,7 @@ pub fn print(
try print(.{
.ty = field_ptr.container_ty,
.val = field_ptr.container_ptr,
}, writer, level - 1, target);
}, writer, level - 1, mod);
if (field_ptr.container_ty.zigTypeTag() == .Struct) {
const field_name = field_ptr.container_ty.structFields().keys()[field_ptr.field_index];
@ -288,7 +294,7 @@ pub fn print(
};
while (i < max_aggregate_items) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
try print(elem_tv, writer, level - 1, target);
try print(elem_tv, writer, level - 1, mod);
}
return writer.writeAll(" }");
},
@ -300,7 +306,7 @@ pub fn print(
try print(.{
.ty = ty.elemType2(),
.val = ty.sentinel().?,
}, writer, level - 1, target);
}, writer, level - 1, mod);
return writer.writeAll(" }");
},
.slice => return writer.writeAll("(slice)"),

View File

@ -237,8 +237,10 @@ pub fn generate(
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
assert(module_fn.owner_decl.has_tv);
const fn_type = module_fn.owner_decl.ty;
const mod = bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
var branch_stack = std.ArrayList(Branch).init(bin_file.allocator);
defer {
@ -819,9 +821,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
return @as(u32, 0);
}
const target = self.target.*;
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)});
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
// TODO swap this for inst.ty.ptrAlign
const abi_align = elem_ty.abiAlignment(self.target.*);
@ -830,9 +832,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
const target = self.target.*;
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)});
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
const abi_align = elem_ty.abiAlignment(self.target.*);
if (abi_align > self.stack_align)
@ -1422,7 +1424,7 @@ fn binOp(
lhs_ty: Type,
rhs_ty: Type,
) InnerError!MCValue {
const target = self.target.*;
const mod = self.bin_file.options.module.?;
switch (tag) {
.add,
.sub,
@ -1432,7 +1434,7 @@ fn binOp(
.Float => return self.fail("TODO binary operations on floats", .{}),
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, target));
assert(lhs_ty.eql(rhs_ty, mod));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 64) {
// Only say yes if the operation is
@ -1483,7 +1485,7 @@ fn binOp(
switch (lhs_ty.zigTypeTag()) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, target));
assert(lhs_ty.eql(rhs_ty, mod));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 64) {
// TODO add optimisations for multiplication
@ -1534,7 +1536,7 @@ fn binOp(
switch (lhs_ty.zigTypeTag()) {
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, target));
assert(lhs_ty.eql(rhs_ty, mod));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 64) {
// TODO implement bitwise operations with immediates
@ -2425,12 +2427,12 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
const ty = self.air.typeOfIndex(inst);
const result = self.args[arg_index];
const target = self.target.*;
const mcv = switch (result) {
// Copy registers to the stack
.register => |reg| blk: {
const mod = self.bin_file.options.module.?;
const abi_size = math.cast(u32, ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(target)});
return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)});
};
const abi_align = ty.abiAlignment(self.target.*);
const stack_offset = try self.allocMem(inst, abi_size, abi_align);
@ -2537,17 +2539,19 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
// Due to incremental compilation, how function calls are generated depends
// on linking.
const mod = self.bin_file.options.module.?;
if (self.air.value(callee)) |func_value| {
if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const fn_owner_decl = mod.declPtr(func.owner_decl);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes
coff_file.offset_table_virtual_address + fn_owner_decl.link.coff.offset_table_index * ptr_bytes
else
unreachable;
@ -2565,8 +2569,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const fn_owner_decl = mod.declPtr(func.owner_decl);
try self.genSetReg(Type.initTag(.u64), .x30, .{
.got_load = func.owner_decl.link.macho.local_sym_index,
.got_load = fn_owner_decl.link.macho.local_sym_index,
});
// blr x30
_ = try self.addInst(.{
@ -2575,7 +2580,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
});
} else if (func_value.castTag(.extern_fn)) |func_payload| {
const extern_fn = func_payload.data;
const decl_name = extern_fn.owner_decl.name;
const decl_name = mod.declPtr(extern_fn.owner_decl).name;
if (extern_fn.lib_name) |lib_name| {
log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{
decl_name,
@ -2588,7 +2593,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
.tag = .call_extern,
.data = .{
.extern_fn = .{
.atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index,
.atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.local_sym_index,
.sym_name = n_strx,
},
},
@ -2602,7 +2607,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
const got_index = func_payload.data.owner_decl.link.plan9.got_index.?;
const got_index = mod.declPtr(func_payload.data.owner_decl).link.plan9.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr });
@ -3478,12 +3483,13 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
.direct_load => .load_memory_ptr_direct,
else => unreachable,
};
const mod = self.bin_file.options.module.?;
_ = try self.addInst(.{
.tag = tag,
.data = .{
.payload = try self.addExtra(Mir.LoadMemoryPie{
.register = @enumToInt(src_reg),
.atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index,
.atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.local_sym_index,
.sym_index = sym_index,
}),
},
@ -3597,12 +3603,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.direct_load => .load_memory_direct,
else => unreachable,
};
const mod = self.bin_file.options.module.?;
_ = try self.addInst(.{
.tag = tag,
.data = .{
.payload = try self.addExtra(Mir.LoadMemoryPie{
.register = @enumToInt(reg),
.atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index,
.atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.local_sym_index,
.sym_index = sym_index,
}),
},
@ -3860,7 +3867,7 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue {
fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
@ -3872,7 +3879,10 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa
}
}
decl.alive = true;
const mod = self.bin_file.options.module.?;
const decl = mod.declPtr(decl_index);
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
@ -3886,7 +3896,7 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
@ -3922,7 +3932,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return self.lowerDeclRef(typed_value, payload.data);
}
if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
return self.lowerDeclRef(typed_value, payload.data.decl);
return self.lowerDeclRef(typed_value, payload.data.decl_index);
}
const target = self.target.*;

View File

@ -271,8 +271,10 @@ pub fn generate(
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
assert(module_fn.owner_decl.has_tv);
const fn_type = module_fn.owner_decl.ty;
const mod = bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
var branch_stack = std.ArrayList(Branch).init(bin_file.allocator);
defer {
@ -838,9 +840,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
return @as(u32, 0);
}
const target = self.target.*;
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)});
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
// TODO swap this for inst.ty.ptrAlign
const abi_align = elem_ty.abiAlignment(self.target.*);
@ -849,9 +851,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
const target = self.target.*;
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)});
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
const abi_align = elem_ty.abiAlignment(self.target.*);
if (abi_align > self.stack_align)
@ -1204,7 +1206,8 @@ fn minMax(
.Float => return self.fail("TODO ARM min/max on floats", .{}),
.Vector => return self.fail("TODO ARM min/max on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, self.target.*));
const mod = self.bin_file.options.module.?;
assert(lhs_ty.eql(rhs_ty, mod));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 32) {
const lhs_is_register = lhs == .register;
@ -1372,7 +1375,8 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
switch (lhs_ty.zigTypeTag()) {
.Vector => return self.fail("TODO implement add_with_overflow/sub_with_overflow for vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, self.target.*));
const mod = self.bin_file.options.module.?;
assert(lhs_ty.eql(rhs_ty, mod));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits < 32) {
const stack_offset = try self.allocMem(inst, tuple_size, tuple_align);
@ -1472,7 +1476,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
switch (lhs_ty.zigTypeTag()) {
.Vector => return self.fail("TODO implement mul_with_overflow for vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, self.target.*));
const mod = self.bin_file.options.module.?;
assert(lhs_ty.eql(rhs_ty, mod));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 16) {
const stack_offset = try self.allocMem(inst, tuple_size, tuple_align);
@ -2682,7 +2687,6 @@ fn binOp(
lhs_ty: Type,
rhs_ty: Type,
) InnerError!MCValue {
const target = self.target.*;
switch (tag) {
.add,
.sub,
@ -2692,7 +2696,8 @@ fn binOp(
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, target));
const mod = self.bin_file.options.module.?;
assert(lhs_ty.eql(rhs_ty, mod));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 32) {
// Only say yes if the operation is
@ -2740,7 +2745,8 @@ fn binOp(
.Float => return self.fail("TODO ARM binary operations on floats", .{}),
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, target));
const mod = self.bin_file.options.module.?;
assert(lhs_ty.eql(rhs_ty, mod));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 32) {
// TODO add optimisations for multiplication
@ -2794,7 +2800,8 @@ fn binOp(
switch (lhs_ty.zigTypeTag()) {
.Vector => return self.fail("TODO ARM binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, target));
const mod = self.bin_file.options.module.?;
assert(lhs_ty.eql(rhs_ty, mod));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 32) {
const lhs_immediate_ok = lhs == .immediate and Instruction.Operand.fromU32(lhs.immediate) != null;
@ -3100,8 +3107,9 @@ fn addDbgInfoTypeReloc(self: *Self, ty: Type) error{OutOfMemory}!void {
const dbg_info = &dw.dbg_info;
const index = dbg_info.items.len;
try dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4
const mod = self.bin_file.options.module.?;
const atom = switch (self.bin_file.tag) {
.elf => &self.mod_fn.owner_decl.link.elf.dbg_info_atom,
.elf => &mod.declPtr(self.mod_fn.owner_decl).link.elf.dbg_info_atom,
.macho => unreachable,
else => unreachable,
};
@ -3318,11 +3326,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const func = func_payload.data;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(func.owner_decl);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes
coff_file.offset_table_virtual_address + fn_owner_decl.link.coff.offset_table_index * ptr_bytes
else
unreachable;
@ -4924,11 +4934,14 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue {
fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
decl.alive = true;
const mod = self.bin_file.options.module.?;
const decl = mod.declPtr(decl_index);
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
@ -4939,7 +4952,7 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
@ -4976,7 +4989,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return self.lowerDeclRef(typed_value, payload.data);
}
if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
return self.lowerDeclRef(typed_value, payload.data.decl);
return self.lowerDeclRef(typed_value, payload.data.decl_index);
}
const target = self.target.*;

View File

@ -229,8 +229,10 @@ pub fn generate(
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
assert(module_fn.owner_decl.has_tv);
const fn_type = module_fn.owner_decl.ty;
const mod = bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
var branch_stack = std.ArrayList(Branch).init(bin_file.allocator);
defer {
@ -738,8 +740,9 @@ fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void {
const dbg_info = &dw.dbg_info;
const index = dbg_info.items.len;
try dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4
const mod = self.bin_file.options.module.?;
const atom = switch (self.bin_file.tag) {
.elf => &self.mod_fn.owner_decl.link.elf.dbg_info_atom,
.elf => &mod.declPtr(self.mod_fn.owner_decl).link.elf.dbg_info_atom,
.macho => unreachable,
else => unreachable,
};
@ -768,9 +771,9 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
const elem_ty = self.air.typeOfIndex(inst).elemType();
const target = self.target.*;
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)});
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
// TODO swap this for inst.ty.ptrAlign
const abi_align = elem_ty.abiAlignment(self.target.*);
@ -779,9 +782,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
const target = self.target.*;
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)});
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
const abi_align = elem_ty.abiAlignment(self.target.*);
if (abi_align > self.stack_align)
@ -1037,7 +1040,8 @@ fn binOp(
.Float => return self.fail("TODO binary operations on floats", .{}),
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
assert(lhs_ty.eql(rhs_ty, self.target.*));
const mod = self.bin_file.options.module.?;
assert(lhs_ty.eql(rhs_ty, mod));
const int_info = lhs_ty.intInfo(self.target.*);
if (int_info.bits <= 64) {
// TODO immediate operands
@ -1679,11 +1683,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(func.owner_decl);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes
coff_file.offset_table_virtual_address + fn_owner_decl.link.coff.offset_table_index * ptr_bytes
else
unreachable;
@ -1768,7 +1774,8 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
const ty = self.air.typeOf(bin_op.lhs);
assert(ty.eql(self.air.typeOf(bin_op.rhs), self.target.*));
const mod = self.bin_file.options.module.?;
assert(ty.eql(self.air.typeOf(bin_op.rhs), mod));
if (ty.zigTypeTag() == .ErrorSet)
return self.fail("TODO implement cmp for errors", .{});
@ -2501,10 +2508,12 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue {
fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
decl.alive = true;
const mod = self.bin_file.options.module.?;
const decl = mod.declPtr(decl_index);
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
@ -2517,7 +2526,7 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
@ -2534,7 +2543,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return self.lowerDeclRef(typed_value, payload.data);
}
if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
return self.lowerDeclRef(typed_value, payload.data.decl);
return self.lowerDeclRef(typed_value, payload.data.decl_index);
}
const target = self.target.*;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
@ -2544,7 +2553,8 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_type = typed_value.ty.slicePtrFieldType(&buf);
const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val });
const slice_len = typed_value.val.sliceLen(target);
const mod = self.bin_file.options.module.?;
const slice_len = typed_value.val.sliceLen(mod);
// Codegen can't handle some kinds of indirection. If the wrong union field is accessed here it may mean
// the Sema code needs to use anonymous Decls or alloca instructions to store data.
const ptr_imm = ptr_mcv.memory;

View File

@ -243,8 +243,10 @@ pub fn generate(
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
assert(module_fn.owner_decl.has_tv);
const fn_type = module_fn.owner_decl.ty;
const mod = bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
var branch_stack = std.ArrayList(Branch).init(bin_file.allocator);
defer {
@ -871,7 +873,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
const mod = self.bin_file.options.module.?;
break :blk @intCast(u32, got.p_vaddr + mod.declPtr(func.owner_decl).link.elf.offset_table_index * ptr_bytes);
} else unreachable;
try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr });
@ -1026,9 +1029,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
return @as(u32, 0);
}
const target = self.target.*;
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)});
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
// TODO swap this for inst.ty.ptrAlign
const abi_align = elem_ty.abiAlignment(self.target.*);
@ -1037,9 +1040,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
const target = self.target.*;
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)});
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
const abi_align = elem_ty.abiAlignment(self.target.*);
if (abi_align > self.stack_align)
@ -1372,7 +1375,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return self.lowerDeclRef(typed_value, payload.data);
}
if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
return self.lowerDeclRef(typed_value, payload.data.decl);
return self.lowerDeclRef(typed_value, payload.data.decl_index);
}
const target = self.target.*;
@ -1422,7 +1425,7 @@ fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigT
};
}
fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue {
fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
@ -1434,7 +1437,10 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa
}
}
decl.alive = true;
const mod = self.bin_file.options.module.?;
const decl = mod.declPtr(decl_index);
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;

View File

@ -538,6 +538,10 @@ const Self = @This();
/// Reference to the function declaration the code
/// section belongs to
decl: *Decl,
decl_index: Decl.Index,
/// Current block depth. Used to calculate the relative difference between a break
/// and block
block_depth: u32 = 0,
air: Air,
liveness: Liveness,
gpa: mem.Allocator,
@ -559,9 +563,6 @@ local_index: u32 = 0,
arg_index: u32 = 0,
/// If codegen fails, an error messages will be allocated and saved in `err_msg`
err_msg: *Module.ErrorMsg,
/// Current block depth. Used to calculate the relative difference between a break
/// and block
block_depth: u32 = 0,
/// List of all locals' types generated throughout this declaration
/// used to emit locals count at start of 'code' section.
locals: std.ArrayListUnmanaged(u8),
@ -644,7 +645,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!WValue {
// In the other cases, we will simply lower the constant to a value that fits
// into a single local (such as a pointer, integer, bool, etc).
const result = if (isByRef(ty, self.target)) blk: {
const sym_index = try self.bin_file.lowerUnnamedConst(self.decl, .{ .ty = ty, .val = val });
const sym_index = try self.bin_file.lowerUnnamedConst(.{ .ty = ty, .val = val }, self.decl_index);
break :blk WValue{ .memory = sym_index };
} else try self.lowerConstant(val, ty);
@ -838,7 +839,8 @@ pub fn generate(
.liveness = liveness,
.values = .{},
.code = code,
.decl = func.owner_decl,
.decl_index = func.owner_decl,
.decl = bin_file.options.module.?.declPtr(func.owner_decl),
.err_msg = undefined,
.locals = .{},
.target = bin_file.options.target,
@ -1022,8 +1024,9 @@ fn allocStack(self: *Self, ty: Type) !WValue {
}
const abi_size = std.math.cast(u32, ty.abiSize(self.target)) catch {
const module = self.bin_file.base.options.module.?;
return self.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
ty.fmt(self.target), ty.abiSize(self.target),
ty.fmt(module), ty.abiSize(self.target),
});
};
const abi_align = ty.abiAlignment(self.target);
@ -1056,8 +1059,9 @@ fn allocStackPtr(self: *Self, inst: Air.Inst.Index) !WValue {
const abi_alignment = ptr_ty.ptrAlignment(self.target);
const abi_size = std.math.cast(u32, pointee_ty.abiSize(self.target)) catch {
const module = self.bin_file.base.options.module.?;
return self.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
pointee_ty.fmt(self.target), pointee_ty.abiSize(self.target),
pointee_ty.fmt(module), pointee_ty.abiSize(self.target),
});
};
if (abi_alignment > self.stack_alignment) {
@ -1542,20 +1546,21 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const ret_ty = fn_ty.fnReturnType();
const first_param_sret = isByRef(ret_ty, self.target);
const target: ?*Decl = blk: {
const callee: ?*Decl = blk: {
const func_val = self.air.value(pl_op.operand) orelse break :blk null;
const module = self.bin_file.base.options.module.?;
if (func_val.castTag(.function)) |func| {
break :blk func.data.owner_decl;
break :blk module.declPtr(func.data.owner_decl);
} else if (func_val.castTag(.extern_fn)) |extern_fn| {
const ext_decl = extern_fn.data.owner_decl;
const ext_decl = module.declPtr(extern_fn.data.owner_decl);
var func_type = try genFunctype(self.gpa, ext_decl.ty, self.target);
defer func_type.deinit(self.gpa);
ext_decl.fn_link.wasm.type_index = try self.bin_file.putOrGetFuncType(func_type);
try self.bin_file.addOrUpdateImport(ext_decl);
break :blk ext_decl;
} else if (func_val.castTag(.decl_ref)) |decl_ref| {
break :blk decl_ref.data;
break :blk module.declPtr(decl_ref.data);
}
return self.fail("Expected a function, but instead found type '{s}'", .{func_val.tag()});
};
@ -1580,7 +1585,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
}
}
if (target) |direct| {
if (callee) |direct| {
try self.addLabel(.call, direct.link.wasm.sym_index);
} else {
// in this case we call a function pointer
@ -1837,16 +1842,16 @@ fn wrapBinOp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError
fn lowerParentPtr(self: *Self, ptr_val: Value, ptr_child_ty: Type) InnerError!WValue {
switch (ptr_val.tag()) {
.decl_ref_mut => {
const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl;
return self.lowerParentPtrDecl(ptr_val, decl);
const decl_index = ptr_val.castTag(.decl_ref_mut).?.data.decl_index;
return self.lowerParentPtrDecl(ptr_val, decl_index);
},
.decl_ref => {
const decl = ptr_val.castTag(.decl_ref).?.data;
return self.lowerParentPtrDecl(ptr_val, decl);
const decl_index = ptr_val.castTag(.decl_ref).?.data;
return self.lowerParentPtrDecl(ptr_val, decl_index);
},
.variable => {
const decl = ptr_val.castTag(.variable).?.data.owner_decl;
return self.lowerParentPtrDecl(ptr_val, decl);
const decl_index = ptr_val.castTag(.variable).?.data.owner_decl;
return self.lowerParentPtrDecl(ptr_val, decl_index);
},
.field_ptr => {
const field_ptr = ptr_val.castTag(.field_ptr).?.data;
@ -1918,24 +1923,31 @@ fn lowerParentPtr(self: *Self, ptr_val: Value, ptr_child_ty: Type) InnerError!WV
}
}
fn lowerParentPtrDecl(self: *Self, ptr_val: Value, decl: *Module.Decl) InnerError!WValue {
decl.markAlive();
fn lowerParentPtrDecl(self: *Self, ptr_val: Value, decl_index: Module.Decl.Index) InnerError!WValue {
const module = self.bin_file.base.options.module.?;
const decl = module.declPtr(decl_index);
module.markDeclAlive(decl);
var ptr_ty_payload: Type.Payload.ElemType = .{
.base = .{ .tag = .single_mut_pointer },
.data = decl.ty,
};
const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
return self.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl);
return self.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index);
}
fn lowerDeclRefValue(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!WValue {
fn lowerDeclRefValue(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!WValue {
if (tv.ty.isSlice()) {
return WValue{ .memory = try self.bin_file.lowerUnnamedConst(decl, tv) };
} else if (decl.ty.zigTypeTag() != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime()) {
return WValue{ .memory = try self.bin_file.lowerUnnamedConst(tv, decl_index) };
}
const module = self.bin_file.base.options.module.?;
const decl = module.declPtr(decl_index);
if (decl.ty.zigTypeTag() != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime()) {
return WValue{ .imm32 = 0xaaaaaaaa };
}
decl.markAlive();
module.markDeclAlive(decl);
const target_sym_index = decl.link.wasm.sym_index;
if (decl.ty.zigTypeTag() == .Fn) {
try self.bin_file.addTableFunction(target_sym_index);
@ -1946,12 +1958,12 @@ fn lowerDeclRefValue(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError
fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue {
if (val.isUndefDeep()) return self.emitUndefined(ty);
if (val.castTag(.decl_ref)) |decl_ref| {
const decl = decl_ref.data;
return self.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl);
const decl_index = decl_ref.data;
return self.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index);
}
if (val.castTag(.decl_ref_mut)) |decl_ref| {
const decl = decl_ref.data.decl;
return self.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl);
if (val.castTag(.decl_ref_mut)) |decl_ref_mut| {
const decl_index = decl_ref_mut.data.decl_index;
return self.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index);
}
const target = self.target;
@ -2347,8 +2359,9 @@ fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const struct_ptr = try self.resolveInst(extra.data.struct_operand);
const struct_ty = self.air.typeOf(extra.data.struct_operand).childType();
const offset = std.math.cast(u32, struct_ty.structFieldOffset(extra.data.field_index, self.target)) catch {
const module = self.bin_file.base.options.module.?;
return self.fail("Field type '{}' too big to fit into stack frame", .{
struct_ty.structFieldType(extra.data.field_index).fmt(self.target),
struct_ty.structFieldType(extra.data.field_index).fmt(module),
});
};
return self.structFieldPtr(struct_ptr, offset);
@ -2360,8 +2373,9 @@ fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u32) InnerEr
const struct_ty = self.air.typeOf(ty_op.operand).childType();
const field_ty = struct_ty.structFieldType(index);
const offset = std.math.cast(u32, struct_ty.structFieldOffset(index, self.target)) catch {
const module = self.bin_file.base.options.module.?;
return self.fail("Field type '{}' too big to fit into stack frame", .{
field_ty.fmt(self.target),
field_ty.fmt(module),
});
};
return self.structFieldPtr(struct_ptr, offset);
@ -2387,7 +2401,8 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const field_ty = struct_ty.structFieldType(field_index);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) return WValue{ .none = {} };
const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, self.target)) catch {
return self.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(self.target)});
const module = self.bin_file.base.options.module.?;
return self.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(module)});
};
if (isByRef(field_ty, self.target)) {
@ -2782,7 +2797,8 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue
}
const offset = std.math.cast(u32, opt_ty.abiSize(self.target) - payload_ty.abiSize(self.target)) catch {
return self.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(self.target)});
const module = self.bin_file.base.options.module.?;
return self.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(module)});
};
try self.emitWValue(operand);
@ -2811,7 +2827,8 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
return operand;
}
const offset = std.math.cast(u32, op_ty.abiSize(self.target) - payload_ty.abiSize(self.target)) catch {
return self.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(self.target)});
const module = self.bin_file.base.options.module.?;
return self.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(module)});
};
// Create optional type, set the non-null bit, and store the operand inside the optional type

View File

@ -309,8 +309,10 @@ pub fn generate(
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
assert(module_fn.owner_decl.has_tv);
const fn_type = module_fn.owner_decl.ty;
const mod = bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
var branch_stack = std.ArrayList(Branch).init(bin_file.allocator);
defer {
@ -396,14 +398,14 @@ pub fn generate(
if (builtin.mode == .Debug and bin_file.options.module.?.comp.verbose_mir) {
const w = std.io.getStdErr().writer();
w.print("# Begin Function MIR: {s}:\n", .{module_fn.owner_decl.name}) catch {};
w.print("# Begin Function MIR: {s}:\n", .{fn_owner_decl.name}) catch {};
const PrintMir = @import("PrintMir.zig");
const print = PrintMir{
.mir = mir,
.bin_file = bin_file,
};
print.printMir(w, function.mir_to_air_map, air) catch {}; // we don't care if the debug printing fails
w.print("# End Function MIR: {s}\n\n", .{module_fn.owner_decl.name}) catch {};
w.print("# End Function MIR: {s}\n\n", .{fn_owner_decl.name}) catch {};
}
if (function.err_msg) |em| {
@ -915,9 +917,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
return self.allocMem(inst, @sizeOf(usize), @alignOf(usize));
}
const target = self.target.*;
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)});
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
// TODO swap this for inst.ty.ptrAlign
const abi_align = ptr_ty.ptrAlignment(self.target.*);
@ -926,9 +928,9 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
const target = self.target.*;
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)});
const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
const abi_align = elem_ty.abiAlignment(self.target.*);
if (abi_align > self.stack_align)
@ -2650,6 +2652,8 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue
.direct_load => 0b01,
else => unreachable,
};
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl);
_ = try self.addInst(.{
.tag = .lea_pie,
.ops = (Mir.Ops{
@ -2658,7 +2662,7 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue
}).encode(),
.data = .{
.load_reloc = .{
.atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index,
.atom_index = fn_owner_decl.link.macho.local_sym_index,
.sym_index = sym_index,
},
},
@ -3583,17 +3587,19 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
// Due to incremental compilation, how function calls are generated depends
// on linking.
const mod = self.bin_file.options.module.?;
if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) {
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const fn_owner_decl = mod.declPtr(func.owner_decl);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
@intCast(u32, coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes)
@intCast(u32, coff_file.offset_table_virtual_address + fn_owner_decl.link.coff.offset_table_index * ptr_bytes)
else
unreachable;
_ = try self.addInst(.{
@ -3625,8 +3631,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const fn_owner_decl = mod.declPtr(func.owner_decl);
try self.genSetReg(Type.initTag(.usize), .rax, .{
.got_load = func.owner_decl.link.macho.local_sym_index,
.got_load = fn_owner_decl.link.macho.local_sym_index,
});
// callq *%rax
_ = try self.addInst(.{
@ -3639,7 +3646,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
});
} else if (func_value.castTag(.extern_fn)) |func_payload| {
const extern_fn = func_payload.data;
const decl_name = extern_fn.owner_decl.name;
const decl_name = mod.declPtr(extern_fn.owner_decl).name;
if (extern_fn.lib_name) |lib_name| {
log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{
decl_name,
@ -3652,7 +3659,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
.ops = undefined,
.data = .{
.extern_fn = .{
.atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index,
.atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.local_sym_index,
.sym_name = n_strx,
},
},
@ -3680,7 +3687,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
const got_index = func_payload.data.owner_decl.link.plan9.got_index.?;
const got_index = mod.declPtr(func_payload.data.owner_decl).link.plan9.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
_ = try self.addInst(.{
.tag = .call,
@ -4012,9 +4019,11 @@ fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void {
const dbg_info = &dw.dbg_info;
const index = dbg_info.items.len;
try dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom = switch (self.bin_file.tag) {
.elf => &self.mod_fn.owner_decl.link.elf.dbg_info_atom,
.macho => &self.mod_fn.owner_decl.link.macho.dbg_info_atom,
.elf => &fn_owner_decl.link.elf.dbg_info_atom,
.macho => &fn_owner_decl.link.macho.dbg_info_atom,
else => unreachable,
};
try dw.addTypeReloc(atom, ty, @intCast(u32, index), null);
@ -6124,7 +6133,7 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV
return mcv;
}
fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue {
fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
log.debug("lowerDeclRef: ty = {}, val = {}", .{ tv.ty.fmtDebug(), tv.val.fmtDebug() });
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
@ -6137,7 +6146,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa
}
}
decl.markAlive();
const module = self.bin_file.options.module.?;
const decl = module.declPtr(decl_index);
module.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
@ -6152,7 +6163,7 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
@ -6189,7 +6200,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return self.lowerDeclRef(typed_value, payload.data);
}
if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
return self.lowerDeclRef(typed_value, payload.data.decl);
return self.lowerDeclRef(typed_value, payload.data.decl_index);
}
const target = self.target.*;

View File

@ -347,7 +347,9 @@ pub fn generateSymbol(
switch (container_ptr.tag()) {
.decl_ref => {
const decl = container_ptr.castTag(.decl_ref).?.data;
const decl_index = container_ptr.castTag(.decl_ref).?.data;
const mod = bin_file.options.module.?;
const decl = mod.declPtr(decl_index);
const addend = blk: {
switch (decl.ty.tag()) {
.@"struct" => {
@ -364,7 +366,7 @@ pub fn generateSymbol(
},
}
};
return lowerDeclRef(bin_file, src_loc, typed_value, decl, code, debug_output, .{
return lowerDeclRef(bin_file, src_loc, typed_value, decl_index, code, debug_output, .{
.parent_atom_index = reloc_info.parent_atom_index,
.addend = (reloc_info.addend orelse 0) + addend,
});
@ -400,8 +402,8 @@ pub fn generateSymbol(
switch (array_ptr.tag()) {
.decl_ref => {
const decl = array_ptr.castTag(.decl_ref).?.data;
return lowerDeclRef(bin_file, src_loc, typed_value, decl, code, debug_output, .{
const decl_index = array_ptr.castTag(.decl_ref).?.data;
return lowerDeclRef(bin_file, src_loc, typed_value, decl_index, code, debug_output, .{
.parent_atom_index = reloc_info.parent_atom_index,
.addend = (reloc_info.addend orelse 0) + addend,
});
@ -589,7 +591,8 @@ pub fn generateSymbol(
}
const union_ty = typed_value.ty.cast(Type.Payload.Union).?.data;
const field_index = union_ty.tag_ty.enumTagFieldIndex(union_obj.tag, target).?;
const mod = bin_file.options.module.?;
const field_index = union_ty.tag_ty.enumTagFieldIndex(union_obj.tag, mod).?;
assert(union_ty.haveFieldTypes());
const field_ty = union_ty.fields.values()[field_index].ty;
if (!field_ty.hasRuntimeBits()) {
@ -772,12 +775,13 @@ fn lowerDeclRef(
bin_file: *link.File,
src_loc: Module.SrcLoc,
typed_value: TypedValue,
decl: *Module.Decl,
decl_index: Module.Decl.Index,
code: *std.ArrayList(u8),
debug_output: DebugInfoOutput,
reloc_info: RelocInfo,
) GenerateSymbolError!Result {
const target = bin_file.options.target;
const module = bin_file.options.module.?;
if (typed_value.ty.isSlice()) {
// generate ptr
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
@ -796,7 +800,7 @@ fn lowerDeclRef(
// generate length
var slice_len: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = typed_value.val.sliceLen(target),
.data = typed_value.val.sliceLen(module),
};
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.usize,
@ -813,14 +817,16 @@ fn lowerDeclRef(
}
const ptr_width = target.cpu.arch.ptrBitWidth();
const decl = module.declPtr(decl_index);
const is_fn_body = decl.ty.zigTypeTag() == .Fn;
if (!is_fn_body and !decl.ty.hasRuntimeBits()) {
try code.writer().writeByteNTimes(0xaa, @divExact(ptr_width, 8));
return Result{ .appended = {} };
}
decl.markAlive();
const vaddr = try bin_file.getDeclVAddr(decl, .{
module.markDeclAlive(decl);
const vaddr = try bin_file.getDeclVAddr(decl_index, .{
.parent_atom_index = reloc_info.parent_atom_index,
.offset = code.items.len,
.addend = reloc_info.addend orelse 0,

View File

@ -32,8 +32,8 @@ pub const CValue = union(enum) {
/// Index into the parameters
arg: usize,
/// By-value
decl: *Decl,
decl_ref: *Decl,
decl: Decl.Index,
decl_ref: Decl.Index,
/// An undefined (void *) pointer (cannot be dereferenced)
undefined_ptr: void,
/// Render the slice as an identifier (using fmtIdent)
@ -58,7 +58,7 @@ pub const TypedefMap = std.ArrayHashMap(
const FormatTypeAsCIdentContext = struct {
ty: Type,
target: std.Target,
mod: *Module,
};
/// TODO make this not cut off at 128 bytes
@ -71,14 +71,14 @@ fn formatTypeAsCIdentifier(
_ = fmt;
_ = options;
var buffer = [1]u8{0} ** 128;
var buf = std.fmt.bufPrint(&buffer, "{}", .{data.ty.fmt(data.target)}) catch &buffer;
var buf = std.fmt.bufPrint(&buffer, "{}", .{data.ty.fmt(data.mod)}) catch &buffer;
return formatIdent(buf, "", .{}, writer);
}
pub fn typeToCIdentifier(ty: Type, target: std.Target) std.fmt.Formatter(formatTypeAsCIdentifier) {
pub fn typeToCIdentifier(ty: Type, mod: *Module) std.fmt.Formatter(formatTypeAsCIdentifier) {
return .{ .data = .{
.ty = ty,
.target = target,
.mod = mod,
} };
}
@ -349,6 +349,7 @@ pub const DeclGen = struct {
gpa: std.mem.Allocator,
module: *Module,
decl: *Decl,
decl_index: Decl.Index,
fwd_decl: std.ArrayList(u8),
error_msg: ?*Module.ErrorMsg,
/// The key of this map is Type which has references to typedefs_arena.
@ -376,10 +377,8 @@ pub const DeclGen = struct {
writer: anytype,
ty: Type,
val: Value,
decl: *Decl,
decl_index: Decl.Index,
) error{ OutOfMemory, AnalysisFail }!void {
const target = dg.module.getTarget();
if (ty.isSlice()) {
try writer.writeByte('(');
try dg.renderTypecast(writer, ty);
@ -387,11 +386,12 @@ pub const DeclGen = struct {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
try dg.renderValue(writer, ty.slicePtrFieldType(&buf), val.slicePtr());
try writer.writeAll(", ");
try writer.print("{d}", .{val.sliceLen(target)});
try writer.print("{d}", .{val.sliceLen(dg.module)});
try writer.writeAll("}");
return;
}
const decl = dg.module.declPtr(decl_index);
assert(decl.has_tv);
// We shouldn't cast C function pointers as this is UB (when you call
// them). The analysis until now should ensure that the C function
@ -399,21 +399,21 @@ pub const DeclGen = struct {
// somewhere and we should let the C compiler tell us about it.
if (ty.castPtrToFn() == null) {
// Determine if we must pointer cast.
if (ty.eql(decl.ty, target)) {
if (ty.eql(decl.ty, dg.module)) {
try writer.writeByte('&');
try dg.renderDeclName(writer, decl);
try dg.renderDeclName(writer, decl_index);
return;
}
try writer.writeAll("((");
try dg.renderTypecast(writer, ty);
try writer.writeAll(")&");
try dg.renderDeclName(writer, decl);
try dg.renderDeclName(writer, decl_index);
try writer.writeByte(')');
return;
}
try dg.renderDeclName(writer, decl);
try dg.renderDeclName(writer, decl_index);
}
fn renderInt128(
@ -471,13 +471,13 @@ pub const DeclGen = struct {
try writer.writeByte(')');
switch (ptr_val.tag()) {
.decl_ref_mut, .decl_ref, .variable => {
const decl = switch (ptr_val.tag()) {
const decl_index = switch (ptr_val.tag()) {
.decl_ref => ptr_val.castTag(.decl_ref).?.data,
.decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl,
.decl_ref_mut => ptr_val.castTag(.decl_ref_mut).?.data.decl_index,
.variable => ptr_val.castTag(.variable).?.data.owner_decl,
else => unreachable,
};
try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl);
try dg.renderDeclValue(writer, ptr_ty, ptr_val, decl_index);
},
.field_ptr => {
const field_ptr = ptr_val.castTag(.field_ptr).?.data;
@ -685,7 +685,7 @@ pub const DeclGen = struct {
var index: usize = 0;
while (index < ai.len) : (index += 1) {
if (index != 0) try writer.writeAll(",");
const elem_val = try val.elemValue(arena_allocator, index);
const elem_val = try val.elemValue(dg.module, arena_allocator, index);
try dg.renderValue(writer, ai.elem_type, elem_val);
}
if (ai.sentinel) |s| {
@ -837,7 +837,7 @@ pub const DeclGen = struct {
try writer.writeAll(".payload = {");
}
const index = union_ty.tag_ty.enumTagFieldIndex(union_obj.tag, target).?;
const index = union_ty.tag_ty.enumTagFieldIndex(union_obj.tag, dg.module).?;
const field_ty = ty.unionFields().values()[index].ty;
const field_name = ty.unionFields().keys()[index];
if (field_ty.hasRuntimeBits()) {
@ -889,7 +889,7 @@ pub const DeclGen = struct {
try w.writeAll("void");
}
try w.writeAll(" ");
try dg.renderDeclName(w, dg.decl);
try dg.renderDeclName(w, dg.decl_index);
try w.writeAll("(");
const param_len = dg.decl.ty.fnParamLen();
@ -927,8 +927,7 @@ pub const DeclGen = struct {
try bw.writeAll(" (*");
const name_start = buffer.items.len;
const target = dg.module.getTarget();
try bw.print("zig_F_{s})(", .{typeToCIdentifier(t, target)});
try bw.print("zig_F_{s})(", .{typeToCIdentifier(t, dg.module)});
const name_end = buffer.items.len - 2;
const param_len = fn_info.param_types.len;
@ -982,11 +981,10 @@ pub const DeclGen = struct {
try bw.writeAll("; size_t len; } ");
const name_index = buffer.items.len;
const target = dg.module.getTarget();
if (t.isConstPtr()) {
try bw.print("zig_L_{s}", .{typeToCIdentifier(child_type, target)});
try bw.print("zig_L_{s}", .{typeToCIdentifier(child_type, dg.module)});
} else {
try bw.print("zig_M_{s}", .{typeToCIdentifier(child_type, target)});
try bw.print("zig_M_{s}", .{typeToCIdentifier(child_type, dg.module)});
}
if (ptr_sentinel) |s| {
try bw.writeAll("_s_");
@ -1009,7 +1007,7 @@ pub const DeclGen = struct {
fn renderStructTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 {
const struct_obj = t.castTag(.@"struct").?.data; // Handle 0 bit types elsewhere.
const fqn = try struct_obj.getFullyQualifiedName(dg.typedefs.allocator);
const fqn = try struct_obj.getFullyQualifiedName(dg.module);
defer dg.typedefs.allocator.free(fqn);
var buffer = std.ArrayList(u8).init(dg.typedefs.allocator);
@ -1072,8 +1070,7 @@ pub const DeclGen = struct {
try buffer.appendSlice("} ");
const name_start = buffer.items.len;
const target = dg.module.getTarget();
try writer.print("zig_T_{};\n", .{typeToCIdentifier(t, target)});
try writer.print("zig_T_{};\n", .{typeToCIdentifier(t, dg.module)});
const rendered = buffer.toOwnedSlice();
errdefer dg.typedefs.allocator.free(rendered);
@ -1090,7 +1087,7 @@ pub const DeclGen = struct {
fn renderUnionTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 {
const union_ty = t.cast(Type.Payload.Union).?.data;
const fqn = try union_ty.getFullyQualifiedName(dg.typedefs.allocator);
const fqn = try union_ty.getFullyQualifiedName(dg.module);
defer dg.typedefs.allocator.free(fqn);
const target = dg.module.getTarget();
@ -1157,7 +1154,6 @@ pub const DeclGen = struct {
try dg.renderTypeAndName(bw, child_type, payload_name, .Mut, 0);
try bw.writeAll("; uint16_t error; } ");
const name_index = buffer.items.len;
const target = dg.module.getTarget();
if (err_set_type.castTag(.error_set_inferred)) |inf_err_set_payload| {
const func = inf_err_set_payload.data.func;
try bw.writeAll("zig_E_");
@ -1165,7 +1161,7 @@ pub const DeclGen = struct {
try bw.writeAll(";\n");
} else {
try bw.print("zig_E_{s}_{s};\n", .{
typeToCIdentifier(err_set_type, target), typeToCIdentifier(child_type, target),
typeToCIdentifier(err_set_type, dg.module), typeToCIdentifier(child_type, dg.module),
});
}
@ -1195,8 +1191,7 @@ pub const DeclGen = struct {
try dg.renderType(bw, elem_type);
const name_start = buffer.items.len + 1;
const target = dg.module.getTarget();
try bw.print(" zig_A_{s}_{d}", .{ typeToCIdentifier(elem_type, target), c_len });
try bw.print(" zig_A_{s}_{d}", .{ typeToCIdentifier(elem_type, dg.module), c_len });
const name_end = buffer.items.len;
try bw.print("[{d}];\n", .{c_len});
@ -1224,8 +1219,7 @@ pub const DeclGen = struct {
try dg.renderTypeAndName(bw, child_type, payload_name, .Mut, 0);
try bw.writeAll("; bool is_null; } ");
const name_index = buffer.items.len;
const target = dg.module.getTarget();
try bw.print("zig_Q_{s};\n", .{typeToCIdentifier(child_type, target)});
try bw.print("zig_Q_{s};\n", .{typeToCIdentifier(child_type, dg.module)});
const rendered = buffer.toOwnedSlice();
errdefer dg.typedefs.allocator.free(rendered);
@ -1535,16 +1529,17 @@ pub const DeclGen = struct {
}
}
fn renderDeclName(dg: DeclGen, writer: anytype, decl: *Decl) !void {
decl.markAlive();
fn renderDeclName(dg: DeclGen, writer: anytype, decl_index: Decl.Index) !void {
const decl = dg.module.declPtr(decl_index);
dg.module.markDeclAlive(decl);
if (dg.module.decl_exports.get(decl)) |exports| {
if (dg.module.decl_exports.get(decl_index)) |exports| {
return writer.writeAll(exports[0].options.name);
} else if (decl.val.tag() == .extern_fn) {
return writer.writeAll(mem.sliceTo(decl.name, 0));
} else {
const gpa = dg.module.gpa;
const name = try decl.getFullyQualifiedName(gpa);
const name = try decl.getFullyQualifiedName(dg.module);
defer gpa.free(name);
return writer.print("{ }", .{fmtIdent(name)});
}
@ -1616,7 +1611,11 @@ pub fn genDecl(o: *Object) !void {
try fwd_decl_writer.writeAll("zig_threadlocal ");
}
const decl_c_value: CValue = if (is_global) .{ .bytes = mem.span(o.dg.decl.name) } else .{ .decl = o.dg.decl };
const decl_c_value: CValue = if (is_global) .{
.bytes = mem.span(o.dg.decl.name),
} else .{
.decl = o.dg.decl_index,
};
try o.dg.renderTypeAndName(fwd_decl_writer, o.dg.decl.ty, decl_c_value, .Mut, o.dg.decl.@"align");
try fwd_decl_writer.writeAll(";\n");
@ -1641,7 +1640,7 @@ pub fn genDecl(o: *Object) !void {
// TODO ask the Decl if it is const
// https://github.com/ziglang/zig/issues/7582
const decl_c_value: CValue = .{ .decl = o.dg.decl };
const decl_c_value: CValue = .{ .decl = o.dg.decl_index };
try o.dg.renderTypeAndName(writer, tv.ty, decl_c_value, .Mut, o.dg.decl.@"align");
try writer.writeAll(" = ");
@ -2234,13 +2233,12 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
if (src_val_is_undefined)
return try airStoreUndefined(f, dest_ptr);
const target = f.object.dg.module.getTarget();
const writer = f.object.writer();
if (lhs_child_type.zigTypeTag() == .Array) {
// For this memcpy to safely work we need the rhs to have the same
// underlying type as the lhs (i.e. they must both be arrays of the same underlying type).
const rhs_type = f.air.typeOf(bin_op.rhs);
assert(rhs_type.eql(lhs_child_type, target));
assert(rhs_type.eql(lhs_child_type, f.object.dg.module));
// If the source is a constant, writeCValue will emit a brace initialization
// so work around this by initializing into new local.
@ -2780,7 +2778,8 @@ fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const writer = f.object.writer();
const function = f.air.values[ty_pl.payload].castTag(.function).?.data;
try writer.print("/* dbg func:{s} */\n", .{function.owner_decl.name});
const mod = f.object.dg.module;
try writer.print("/* dbg func:{s} */\n", .{mod.declPtr(function.owner_decl).name});
return CValue.none;
}

View File

@ -161,6 +161,7 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![:0]u8 {
pub const Object = struct {
gpa: Allocator,
module: *Module,
llvm_module: *const llvm.Module,
di_builder: ?*llvm.DIBuilder,
/// One of these mappings:
@ -181,7 +182,7 @@ pub const Object = struct {
/// version of the name and incorrectly get function not found in the llvm module.
/// * it works for functions not all globals.
/// Therefore, this table keeps track of the mapping.
decl_map: std.AutoHashMapUnmanaged(*const Module.Decl, *const llvm.Value),
decl_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *const llvm.Value),
/// Maps Zig types to LLVM types. The table memory itself is backed by the GPA of
/// the compiler, but the Type/Value memory here is backed by `type_map_arena`.
/// TODO we need to remove entries from this map in response to incremental compilation
@ -340,6 +341,7 @@ pub const Object = struct {
return Object{
.gpa = gpa,
.module = options.module.?,
.llvm_module = llvm_module,
.di_map = .{},
.di_builder = opt_di_builder,
@ -568,18 +570,20 @@ pub const Object = struct {
air: Air,
liveness: Liveness,
) !void {
const decl = func.owner_decl;
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
var dg: DeclGen = .{
.context = o.context,
.object = o,
.module = module,
.decl_index = decl_index,
.decl = decl,
.err_msg = null,
.gpa = module.gpa,
};
const llvm_func = try dg.resolveLlvmFunction(decl);
const llvm_func = try dg.resolveLlvmFunction(decl_index);
if (module.align_stack_fns.get(func)) |align_info| {
dg.addFnAttrInt(llvm_func, "alignstack", align_info.alignment);
@ -632,7 +636,7 @@ pub const Object = struct {
const line_number = decl.src_line + 1;
const is_internal_linkage = decl.val.tag() != .extern_fn and
!dg.module.decl_exports.contains(decl);
!dg.module.decl_exports.contains(decl_index);
const noret_bit: c_uint = if (fn_info.return_type.isNoReturn())
llvm.DIFlags.NoReturn
else
@ -684,48 +688,51 @@ pub const Object = struct {
fg.genBody(air.getMainBody()) catch |err| switch (err) {
error.CodegenFail => {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, dg.err_msg.?);
try module.failed_decls.put(module.gpa, decl_index, dg.err_msg.?);
dg.err_msg = null;
return;
},
else => |e| return e,
};
const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
try o.updateDeclExports(module, decl, decl_exports);
const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{};
try o.updateDeclExports(module, decl_index, decl_exports);
}
pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void {
pub fn updateDecl(self: *Object, module: *Module, decl_index: Module.Decl.Index) !void {
const decl = module.declPtr(decl_index);
var dg: DeclGen = .{
.context = self.context,
.object = self,
.module = module,
.decl = decl,
.decl_index = decl_index,
.err_msg = null,
.gpa = module.gpa,
};
dg.genDecl() catch |err| switch (err) {
error.CodegenFail => {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, dg.err_msg.?);
try module.failed_decls.put(module.gpa, decl_index, dg.err_msg.?);
dg.err_msg = null;
return;
},
else => |e| return e,
};
const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
try self.updateDeclExports(module, decl, decl_exports);
const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{};
try self.updateDeclExports(module, decl_index, decl_exports);
}
pub fn updateDeclExports(
self: *Object,
module: *const Module,
decl: *const Module.Decl,
module: *Module,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
// If the module does not already have the function, we ignore this function call
// because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`.
const llvm_global = self.decl_map.get(decl) orelse return;
const llvm_global = self.decl_map.get(decl_index) orelse return;
const decl = module.declPtr(decl_index);
if (decl.isExtern()) {
llvm_global.setValueName(decl.name);
llvm_global.setUnnamedAddr(.False);
@ -798,7 +805,7 @@ pub const Object = struct {
}
}
} else {
const fqn = try decl.getFullyQualifiedName(module.gpa);
const fqn = try decl.getFullyQualifiedName(module);
defer module.gpa.free(fqn);
llvm_global.setValueName2(fqn.ptr, fqn.len);
llvm_global.setLinkage(.Internal);
@ -814,8 +821,8 @@ pub const Object = struct {
}
}
pub fn freeDecl(self: *Object, decl: *Module.Decl) void {
const llvm_value = self.decl_map.get(decl) orelse return;
pub fn freeDecl(self: *Object, decl_index: Module.Decl.Index) void {
const llvm_value = self.decl_map.get(decl_index) orelse return;
llvm_value.deleteGlobal();
}
@ -847,7 +854,7 @@ pub const Object = struct {
const gpa = o.gpa;
// Be careful not to reference this `gop` variable after any recursive calls
// to `lowerDebugType`.
const gop = try o.di_type_map.getOrPutContext(gpa, ty, .{ .target = o.target });
const gop = try o.di_type_map.getOrPutContext(gpa, ty, .{ .mod = o.module });
if (gop.found_existing) {
const annotated = gop.value_ptr.*;
const di_type = annotated.toDIType();
@ -860,7 +867,7 @@ pub const Object = struct {
};
return o.lowerDebugTypeImpl(entry, resolve, di_type);
}
errdefer assert(o.di_type_map.orderedRemoveContext(ty, .{ .target = o.target }));
errdefer assert(o.di_type_map.orderedRemoveContext(ty, .{ .mod = o.module }));
// The Type memory is ephemeral; since we want to store a longer-lived
// reference, we need to copy it here.
gop.key_ptr.* = try ty.copy(o.type_map_arena.allocator());
@ -891,7 +898,7 @@ pub const Object = struct {
.Int => {
const info = ty.intInfo(target);
assert(info.bits != 0);
const name = try ty.nameAlloc(gpa, target);
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
const dwarf_encoding: c_uint = switch (info.signedness) {
.signed => DW.ATE.signed,
@ -902,13 +909,14 @@ pub const Object = struct {
return di_type;
},
.Enum => {
const owner_decl = ty.getOwnerDecl();
const owner_decl_index = ty.getOwnerDecl();
const owner_decl = o.module.declPtr(owner_decl_index);
if (!ty.hasRuntimeBitsIgnoreComptime()) {
const enum_di_ty = try o.makeEmptyNamespaceDIType(owner_decl);
const enum_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .mod = o.module });
return enum_di_ty;
}
@ -938,7 +946,7 @@ pub const Object = struct {
const di_file = try o.getDIFile(gpa, owner_decl.src_namespace.file_scope);
const di_scope = try o.namespaceToDebugScope(owner_decl.src_namespace);
const name = try ty.nameAlloc(gpa, target);
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
var buffer: Type.Payload.Bits = undefined;
const int_ty = ty.intTagType(&buffer);
@ -956,12 +964,12 @@ pub const Object = struct {
"",
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(enum_di_ty), .{ .mod = o.module });
return enum_di_ty;
},
.Float => {
const bits = ty.floatBits(target);
const name = try ty.nameAlloc(gpa, target);
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
const di_type = dib.createBasicType(name, bits, DW.ATE.float);
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type);
@ -1009,7 +1017,7 @@ pub const Object = struct {
const bland_ptr_ty = Type.initPayload(&payload.base);
const ptr_di_ty = try o.lowerDebugType(bland_ptr_ty, resolve);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.init(ptr_di_ty, resolve), .{ .mod = o.module });
return ptr_di_ty;
}
@ -1018,7 +1026,7 @@ pub const Object = struct {
const ptr_ty = ty.slicePtrFieldType(&buf);
const len_ty = Type.usize;
const name = try ty.nameAlloc(gpa, target);
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
const di_file: ?*llvm.DIFile = null;
const line = 0;
@ -1089,12 +1097,12 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
return full_di_ty;
}
const elem_di_ty = try o.lowerDebugType(ptr_info.pointee_type, .fwd);
const name = try ty.nameAlloc(gpa, target);
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
const ptr_di_ty = dib.createPointerType(
elem_di_ty,
@ -1103,7 +1111,7 @@ pub const Object = struct {
name,
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .mod = o.module });
return ptr_di_ty;
},
.Opaque => {
@ -1112,9 +1120,10 @@ pub const Object = struct {
gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty);
return di_ty;
}
const name = try ty.nameAlloc(gpa, target);
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
const owner_decl = ty.getOwnerDecl();
const owner_decl_index = ty.getOwnerDecl();
const owner_decl = o.module.declPtr(owner_decl_index);
const opaque_di_ty = dib.createForwardDeclType(
DW.TAG.structure_type,
name,
@ -1124,7 +1133,7 @@ pub const Object = struct {
);
// The recursive call to `lowerDebugType` va `namespaceToDebugScope`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(opaque_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(opaque_di_ty), .{ .mod = o.module });
return opaque_di_ty;
},
.Array => {
@ -1135,7 +1144,7 @@ pub const Object = struct {
@intCast(c_int, ty.arrayLen()),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(array_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(array_di_ty), .{ .mod = o.module });
return array_di_ty;
},
.Vector => {
@ -1146,11 +1155,11 @@ pub const Object = struct {
ty.vectorLen(),
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(vector_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(vector_di_ty), .{ .mod = o.module });
return vector_di_ty;
},
.Optional => {
const name = try ty.nameAlloc(gpa, target);
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
var buf: Type.Payload.ElemType = undefined;
const child_ty = ty.optionalChild(&buf);
@ -1162,7 +1171,7 @@ pub const Object = struct {
if (ty.isPtrLikeOptional()) {
const ptr_di_ty = try o.lowerDebugType(child_ty, resolve);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .mod = o.module });
return ptr_di_ty;
}
@ -1235,7 +1244,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
return full_di_ty;
},
.ErrorUnion => {
@ -1244,10 +1253,10 @@ pub const Object = struct {
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
const err_set_di_ty = try o.lowerDebugType(err_set_ty, .full);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(err_set_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(err_set_di_ty), .{ .mod = o.module });
return err_set_di_ty;
}
const name = try ty.nameAlloc(gpa, target);
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
const di_file: ?*llvm.DIFile = null;
const line = 0;
@ -1332,7 +1341,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
return full_di_ty;
},
.ErrorSet => {
@ -1344,7 +1353,7 @@ pub const Object = struct {
},
.Struct => {
const compile_unit_scope = o.di_compile_unit.?.toScope();
const name = try ty.nameAlloc(gpa, target);
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
if (ty.castTag(.@"struct")) |payload| {
@ -1431,7 +1440,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
return full_di_ty;
}
@ -1445,23 +1454,23 @@ pub const Object = struct {
// into. Therefore we can satisfy this by making an empty namespace,
// rather than changing the frontend to unnecessarily resolve the
// struct field types.
const owner_decl = ty.getOwnerDecl();
const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl);
const owner_decl_index = ty.getOwnerDecl();
const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
dib.replaceTemporary(fwd_decl, struct_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module });
return struct_di_ty;
}
}
if (!ty.hasRuntimeBitsIgnoreComptime()) {
const owner_decl = ty.getOwnerDecl();
const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl);
const owner_decl_index = ty.getOwnerDecl();
const struct_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
dib.replaceTemporary(fwd_decl, struct_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(struct_di_ty), .{ .mod = o.module });
return struct_di_ty;
}
@ -1516,14 +1525,14 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
return full_di_ty;
},
.Union => {
const compile_unit_scope = o.di_compile_unit.?.toScope();
const owner_decl = ty.getOwnerDecl();
const owner_decl_index = ty.getOwnerDecl();
const name = try ty.nameAlloc(gpa, target);
const name = try ty.nameAlloc(gpa, o.module);
defer gpa.free(name);
const fwd_decl = opt_fwd_decl orelse blk: {
@ -1540,11 +1549,11 @@ pub const Object = struct {
};
if (!ty.hasRuntimeBitsIgnoreComptime()) {
const union_di_ty = try o.makeEmptyNamespaceDIType(owner_decl);
const union_di_ty = try o.makeEmptyNamespaceDIType(owner_decl_index);
dib.replaceTemporary(fwd_decl, union_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .mod = o.module });
return union_di_ty;
}
@ -1572,7 +1581,7 @@ pub const Object = struct {
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` via `makeEmptyNamespaceDIType`
// means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
return full_di_ty;
}
@ -1626,7 +1635,7 @@ pub const Object = struct {
if (layout.tag_size == 0) {
dib.replaceTemporary(fwd_decl, union_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(union_di_ty), .{ .mod = o.module });
return union_di_ty;
}
@ -1685,7 +1694,7 @@ pub const Object = struct {
);
dib.replaceTemporary(fwd_decl, full_di_ty);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(full_di_ty), .{ .mod = o.module });
return full_di_ty;
},
.Fn => {
@ -1733,7 +1742,7 @@ pub const Object = struct {
0,
);
// The recursive call to `lowerDebugType` means we can't use `gop` anymore.
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(fn_di_ty), .{ .target = o.target });
try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(fn_di_ty), .{ .mod = o.module });
return fn_di_ty;
},
.ComptimeInt => unreachable,
@ -1762,7 +1771,8 @@ pub const Object = struct {
/// This is to be used instead of void for debug info types, to avoid tripping
/// Assertion `!isa<DIType>(Scope) && "shouldn't make a namespace scope for a type"'
/// when targeting CodeView (Windows).
fn makeEmptyNamespaceDIType(o: *Object, decl: *const Module.Decl) !*llvm.DIType {
fn makeEmptyNamespaceDIType(o: *Object, decl_index: Module.Decl.Index) !*llvm.DIType {
const decl = o.module.declPtr(decl_index);
const fields: [0]*llvm.DIType = .{};
return o.di_builder.?.createStructType(
try o.namespaceToDebugScope(decl.src_namespace),
@ -1787,6 +1797,7 @@ pub const DeclGen = struct {
object: *Object,
module: *Module,
decl: *Module.Decl,
decl_index: Module.Decl.Index,
gpa: Allocator,
err_msg: ?*Module.ErrorMsg,
@ -1804,6 +1815,7 @@ pub const DeclGen = struct {
fn genDecl(dg: *DeclGen) !void {
const decl = dg.decl;
const decl_index = dg.decl_index;
assert(decl.has_tv);
log.debug("gen: {s} type: {}, value: {}", .{
@ -1817,7 +1829,7 @@ pub const DeclGen = struct {
_ = try dg.resolveLlvmFunction(extern_fn.data.owner_decl);
} else {
const target = dg.module.getTarget();
var global = try dg.resolveGlobalDecl(decl);
var global = try dg.resolveGlobalDecl(decl_index);
global.setAlignment(decl.getAlignment(target));
assert(decl.has_tv);
const init_val = if (decl.val.castTag(.variable)) |payload| init_val: {
@ -1858,7 +1870,7 @@ pub const DeclGen = struct {
// old uses.
const new_global_ptr = new_global.constBitCast(global.typeOf());
global.replaceAllUsesWith(new_global_ptr);
dg.object.decl_map.putAssumeCapacity(decl, new_global);
dg.object.decl_map.putAssumeCapacity(decl_index, new_global);
new_global.takeName(global);
global.deleteGlobal();
global = new_global;
@ -1869,7 +1881,7 @@ pub const DeclGen = struct {
const di_file = try dg.object.getDIFile(dg.gpa, decl.src_namespace.file_scope);
const line_number = decl.src_line + 1;
const is_internal_linkage = !dg.module.decl_exports.contains(decl);
const is_internal_linkage = !dg.module.decl_exports.contains(decl_index);
const di_global = dib.createGlobalVariable(
di_file.toScope(),
decl.name,
@ -1888,12 +1900,10 @@ pub const DeclGen = struct {
/// If the llvm function does not exist, create it.
/// Note that this can be called before the function's semantic analysis has
/// completed, so if any attributes rely on that, they must be done in updateFunc, not here.
fn resolveLlvmFunction(dg: *DeclGen, decl: *Module.Decl) !*const llvm.Value {
return dg.resolveLlvmFunctionExtra(decl, decl.ty);
}
fn resolveLlvmFunctionExtra(dg: *DeclGen, decl: *Module.Decl, zig_fn_type: Type) !*const llvm.Value {
const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl);
fn resolveLlvmFunction(dg: *DeclGen, decl_index: Module.Decl.Index) !*const llvm.Value {
const decl = dg.module.declPtr(decl_index);
const zig_fn_type = decl.ty;
const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl_index);
if (gop.found_existing) return gop.value_ptr.*;
assert(decl.has_tv);
@ -1903,7 +1913,7 @@ pub const DeclGen = struct {
const fn_type = try dg.llvmType(zig_fn_type);
const fqn = try decl.getFullyQualifiedName(dg.gpa);
const fqn = try decl.getFullyQualifiedName(dg.module);
defer dg.gpa.free(fqn);
const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace");
@ -1996,12 +2006,13 @@ pub const DeclGen = struct {
// TODO add target-cpu and target-features fn attributes
}
fn resolveGlobalDecl(dg: *DeclGen, decl: *Module.Decl) Error!*const llvm.Value {
const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl);
fn resolveGlobalDecl(dg: *DeclGen, decl_index: Module.Decl.Index) Error!*const llvm.Value {
const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl_index);
if (gop.found_existing) return gop.value_ptr.*;
errdefer assert(dg.object.decl_map.remove(decl));
errdefer assert(dg.object.decl_map.remove(decl_index));
const fqn = try decl.getFullyQualifiedName(dg.gpa);
const decl = dg.module.declPtr(decl_index);
const fqn = try decl.getFullyQualifiedName(dg.module);
defer dg.gpa.free(fqn);
const llvm_type = try dg.llvmType(decl.ty);
@ -2122,7 +2133,7 @@ pub const DeclGen = struct {
},
.Opaque => switch (t.tag()) {
.@"opaque" => {
const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .target = target });
const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module });
if (gop.found_existing) return gop.value_ptr.*;
// The Type memory is ephemeral; since we want to store a longer-lived
@ -2130,7 +2141,7 @@ pub const DeclGen = struct {
gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator());
const opaque_obj = t.castTag(.@"opaque").?.data;
const name = try opaque_obj.getFullyQualifiedName(gpa);
const name = try opaque_obj.getFullyQualifiedName(dg.module);
defer gpa.free(name);
const llvm_struct_ty = dg.context.structCreateNamed(name);
@ -2191,7 +2202,7 @@ pub const DeclGen = struct {
return dg.context.intType(16);
},
.Struct => {
const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .target = target });
const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module });
if (gop.found_existing) return gop.value_ptr.*;
// The Type memory is ephemeral; since we want to store a longer-lived
@ -2260,7 +2271,7 @@ pub const DeclGen = struct {
return int_llvm_ty;
}
const name = try struct_obj.getFullyQualifiedName(gpa);
const name = try struct_obj.getFullyQualifiedName(dg.module);
defer gpa.free(name);
const llvm_struct_ty = dg.context.structCreateNamed(name);
@ -2314,7 +2325,7 @@ pub const DeclGen = struct {
return llvm_struct_ty;
},
.Union => {
const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .target = target });
const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module });
if (gop.found_existing) return gop.value_ptr.*;
// The Type memory is ephemeral; since we want to store a longer-lived
@ -2330,7 +2341,7 @@ pub const DeclGen = struct {
return enum_tag_llvm_ty;
}
const name = try union_obj.getFullyQualifiedName(gpa);
const name = try union_obj.getFullyQualifiedName(dg.module);
defer gpa.free(name);
const llvm_union_ty = dg.context.structCreateNamed(name);
@ -2439,7 +2450,7 @@ pub const DeclGen = struct {
// TODO this duplicates code with Pointer but they should share the handling
// of the tv.val.tag() and then Int should do extra constPtrToInt on top
.Int => switch (tv.val.tag()) {
.decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl),
.decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index),
.decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data),
else => {
var bigint_space: Value.BigIntSpace = undefined;
@ -2524,12 +2535,13 @@ pub const DeclGen = struct {
}
},
.Pointer => switch (tv.val.tag()) {
.decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl),
.decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl_index),
.decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data),
.variable => {
const decl = tv.val.castTag(.variable).?.data.owner_decl;
decl.markAlive();
const val = try dg.resolveGlobalDecl(decl);
const decl_index = tv.val.castTag(.variable).?.data.owner_decl;
const decl = dg.module.declPtr(decl_index);
dg.module.markDeclAlive(decl);
const val = try dg.resolveGlobalDecl(decl_index);
const llvm_var_type = try dg.llvmType(tv.ty);
const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace");
const llvm_type = llvm_var_type.pointerType(llvm_addrspace);
@ -2683,13 +2695,14 @@ pub const DeclGen = struct {
return dg.context.constStruct(&fields, fields.len, .False);
},
.Fn => {
const fn_decl = switch (tv.val.tag()) {
const fn_decl_index = switch (tv.val.tag()) {
.extern_fn => tv.val.castTag(.extern_fn).?.data.owner_decl,
.function => tv.val.castTag(.function).?.data.owner_decl,
else => unreachable,
};
fn_decl.markAlive();
return dg.resolveLlvmFunction(fn_decl);
const fn_decl = dg.module.declPtr(fn_decl_index);
dg.module.markDeclAlive(fn_decl);
return dg.resolveLlvmFunction(fn_decl_index);
},
.ErrorSet => {
const llvm_ty = try dg.llvmType(tv.ty);
@ -2911,7 +2924,7 @@ pub const DeclGen = struct {
});
}
const union_obj = tv.ty.cast(Type.Payload.Union).?.data;
const field_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag, target).?;
const field_index = union_obj.tag_ty.enumTagFieldIndex(tag_and_val.tag, dg.module).?;
assert(union_obj.haveFieldTypes());
const field_ty = union_obj.fields.values()[field_index].ty;
const payload = p: {
@ -3049,17 +3062,22 @@ pub const DeclGen = struct {
llvm_ptr: *const llvm.Value,
};
fn lowerParentPtrDecl(dg: *DeclGen, ptr_val: Value, decl: *Module.Decl, ptr_child_ty: Type) Error!*const llvm.Value {
decl.markAlive();
fn lowerParentPtrDecl(
dg: *DeclGen,
ptr_val: Value,
decl_index: Module.Decl.Index,
ptr_child_ty: Type,
) Error!*const llvm.Value {
const decl = dg.module.declPtr(decl_index);
dg.module.markDeclAlive(decl);
var ptr_ty_payload: Type.Payload.ElemType = .{
.base = .{ .tag = .single_mut_pointer },
.data = decl.ty,
};
const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
const llvm_ptr = try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl);
const llvm_ptr = try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index);
const target = dg.module.getTarget();
if (ptr_child_ty.eql(decl.ty, target)) {
if (ptr_child_ty.eql(decl.ty, dg.module)) {
return llvm_ptr;
} else {
return llvm_ptr.constBitCast((try dg.llvmType(ptr_child_ty)).pointerType(0));
@ -3071,7 +3089,7 @@ pub const DeclGen = struct {
var bitcast_needed: bool = undefined;
const llvm_ptr = switch (ptr_val.tag()) {
.decl_ref_mut => {
const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl;
const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl_index;
return dg.lowerParentPtrDecl(ptr_val, decl, ptr_child_ty);
},
.decl_ref => {
@ -3123,7 +3141,7 @@ pub const DeclGen = struct {
},
.Struct => {
const field_ty = parent_ty.structFieldType(field_index);
bitcast_needed = !field_ty.eql(ptr_child_ty, target);
bitcast_needed = !field_ty.eql(ptr_child_ty, dg.module);
var ty_buf: Type.Payload.Pointer = undefined;
const llvm_field_index = llvmFieldIndex(parent_ty, field_index, target, &ty_buf).?;
@ -3139,7 +3157,7 @@ pub const DeclGen = struct {
.elem_ptr => blk: {
const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr, elem_ptr.elem_ty);
bitcast_needed = !elem_ptr.elem_ty.eql(ptr_child_ty, target);
bitcast_needed = !elem_ptr.elem_ty.eql(ptr_child_ty, dg.module);
const llvm_usize = try dg.llvmType(Type.usize);
const indices: [1]*const llvm.Value = .{
@ -3153,7 +3171,7 @@ pub const DeclGen = struct {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = opt_payload_ptr.container_ty.optionalChild(&buf);
bitcast_needed = !payload_ty.eql(ptr_child_ty, target);
bitcast_needed = !payload_ty.eql(ptr_child_ty, dg.module);
if (!payload_ty.hasRuntimeBitsIgnoreComptime() or payload_ty.isPtrLikeOptional()) {
// In this case, we represent pointer to optional the same as pointer
@ -3173,7 +3191,7 @@ pub const DeclGen = struct {
const parent_llvm_ptr = try dg.lowerParentPtr(eu_payload_ptr.container_ptr, eu_payload_ptr.container_ty);
const payload_ty = eu_payload_ptr.container_ty.errorUnionPayload();
bitcast_needed = !payload_ty.eql(ptr_child_ty, target);
bitcast_needed = !payload_ty.eql(ptr_child_ty, dg.module);
if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
// In this case, we represent pointer to error union the same as pointer
@ -3201,15 +3219,14 @@ pub const DeclGen = struct {
fn lowerDeclRefValue(
self: *DeclGen,
tv: TypedValue,
decl: *Module.Decl,
decl_index: Module.Decl.Index,
) Error!*const llvm.Value {
const target = self.module.getTarget();
if (tv.ty.isSlice()) {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = tv.ty.slicePtrFieldType(&buf);
var slice_len: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = tv.val.sliceLen(target),
.data = tv.val.sliceLen(self.module),
};
const fields: [2]*const llvm.Value = .{
try self.genTypedValue(.{
@ -3229,8 +3246,9 @@ pub const DeclGen = struct {
// const bar = foo;
// ... &bar;
// `bar` is just an alias and we actually want to lower a reference to `foo`.
const decl = self.module.declPtr(decl_index);
if (decl.val.castTag(.function)) |func| {
if (func.data.owner_decl != decl) {
if (func.data.owner_decl != decl_index) {
return self.lowerDeclRefValue(tv, func.data.owner_decl);
}
}
@ -3240,12 +3258,12 @@ pub const DeclGen = struct {
return self.lowerPtrToVoid(tv.ty);
}
decl.markAlive();
self.module.markDeclAlive(decl);
const llvm_val = if (is_fn_body)
try self.resolveLlvmFunction(decl)
try self.resolveLlvmFunction(decl_index)
else
try self.resolveGlobalDecl(decl);
try self.resolveGlobalDecl(decl_index);
const llvm_type = try self.llvmType(tv.ty);
if (tv.ty.zigTypeTag() == .Int) {
@ -4405,7 +4423,8 @@ pub const FuncGen = struct {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const func = self.air.values[ty_pl.payload].castTag(.function).?.data;
const decl = func.owner_decl;
const decl_index = func.owner_decl;
const decl = self.dg.module.declPtr(decl_index);
const di_file = try self.dg.object.getDIFile(self.gpa, decl.src_namespace.file_scope);
self.di_file = di_file;
const line_number = decl.src_line + 1;
@ -4417,10 +4436,10 @@ pub const FuncGen = struct {
.base_line = self.base_line,
});
const fqn = try decl.getFullyQualifiedName(self.gpa);
const fqn = try decl.getFullyQualifiedName(self.dg.module);
defer self.gpa.free(fqn);
const is_internal_linkage = !self.dg.module.decl_exports.contains(decl);
const is_internal_linkage = !self.dg.module.decl_exports.contains(decl_index);
const subprogram = dib.createFunction(
di_file.toScope(),
decl.name,
@ -4447,7 +4466,8 @@ pub const FuncGen = struct {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const func = self.air.values[ty_pl.payload].castTag(.function).?.data;
const decl = func.owner_decl;
const mod = self.dg.module;
const decl = mod.declPtr(func.owner_decl);
const di_file = try self.dg.object.getDIFile(self.gpa, decl.src_namespace.file_scope);
self.di_file = di_file;
const old = self.dbg_inlined.pop();
@ -5887,7 +5907,7 @@ pub const FuncGen = struct {
if (self.dg.object.di_builder) |dib| {
const src_index = self.getSrcArgIndex(self.arg_index - 1);
const func = self.dg.decl.getFunction().?;
const lbrace_line = func.owner_decl.src_line + func.lbrace_line + 1;
const lbrace_line = self.dg.module.declPtr(func.owner_decl).src_line + func.lbrace_line + 1;
const lbrace_col = func.lbrace_column + 1;
const di_local_var = dib.createParameterVariable(
self.di_scope.?,
@ -6430,8 +6450,9 @@ pub const FuncGen = struct {
const operand = try self.resolveInst(un_op);
const enum_ty = self.air.typeOf(un_op);
const mod = self.dg.module;
const llvm_fn_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{
try enum_ty.getOwnerDecl().getFullyQualifiedName(arena),
try mod.declPtr(enum_ty.getOwnerDecl()).getFullyQualifiedName(mod),
});
const llvm_fn = try self.getEnumTagNameFunction(enum_ty, llvm_fn_name);
@ -6617,7 +6638,7 @@ pub const FuncGen = struct {
for (values) |*val, i| {
var buf: Value.ElemValueBuffer = undefined;
const elem = mask.elemValueBuffer(i, &buf);
const elem = mask.elemValueBuffer(self.dg.module, i, &buf);
if (elem.isUndef()) {
val.* = llvm_i32.getUndef();
} else {

View File

@ -633,7 +633,13 @@ pub const DeclGen = struct {
return result_id.toRef();
}
fn airArithOp(self: *DeclGen, inst: Air.Inst.Index, comptime fop: Opcode, comptime sop: Opcode, comptime uop: Opcode) !IdRef {
fn airArithOp(
self: *DeclGen,
inst: Air.Inst.Index,
comptime fop: Opcode,
comptime sop: Opcode,
comptime uop: Opcode,
) !IdRef {
// LHS and RHS are guaranteed to have the same type, and AIR guarantees
// the result to be the same as the LHS and RHS, which matches SPIR-V.
const ty = self.air.typeOfIndex(inst);
@ -644,10 +650,8 @@ pub const DeclGen = struct {
const result_id = self.spv.allocId();
const result_type_id = try self.resolveTypeId(ty);
const target = self.getTarget();
assert(self.air.typeOf(bin_op.lhs).eql(ty, target));
assert(self.air.typeOf(bin_op.rhs).eql(ty, target));
assert(self.air.typeOf(bin_op.lhs).eql(ty, self.module));
assert(self.air.typeOf(bin_op.rhs).eql(ty, self.module));
// Binary operations are generally applicable to both scalar and vector operations
// in SPIR-V, but int and float versions of operations require different opcodes.
@ -694,7 +698,7 @@ pub const DeclGen = struct {
const result_id = self.spv.allocId();
const result_type_id = try self.resolveTypeId(Type.initTag(.bool));
const op_ty = self.air.typeOf(bin_op.lhs);
assert(op_ty.eql(self.air.typeOf(bin_op.rhs), self.getTarget()));
assert(op_ty.eql(self.air.typeOf(bin_op.rhs), self.module));
// Comparisons are generally applicable to both scalar and vector operations in SPIR-V,
// but int and float versions of operations require different opcodes.

View File

@ -90,9 +90,11 @@ fn dumpStatusReport() !void {
const stderr = io.getStdErr().writer();
const block: *Sema.Block = anal.block;
const mod = anal.sema.mod;
const block_src_decl = mod.declPtr(block.src_decl);
try stderr.writeAll("Analyzing ");
try writeFullyQualifiedDeclWithFile(block.src_decl, stderr);
try writeFullyQualifiedDeclWithFile(mod, block_src_decl, stderr);
try stderr.writeAll("\n");
print_zir.renderInstructionContext(
@ -100,7 +102,7 @@ fn dumpStatusReport() !void {
anal.body,
anal.body_index,
block.namespace.file_scope,
block.src_decl.src_node,
block_src_decl.src_node,
6, // indent
stderr,
) catch |err| switch (err) {
@ -115,13 +117,14 @@ fn dumpStatusReport() !void {
while (parent) |curr| {
fba.reset();
try stderr.writeAll(" in ");
try writeFullyQualifiedDeclWithFile(curr.block.src_decl, stderr);
const curr_block_src_decl = mod.declPtr(curr.block.src_decl);
try writeFullyQualifiedDeclWithFile(mod, curr_block_src_decl, stderr);
try stderr.writeAll("\n > ");
print_zir.renderSingleInstruction(
allocator,
curr.body[curr.body_index],
curr.block.namespace.file_scope,
curr.block.src_decl.src_node,
curr_block_src_decl.src_node,
6, // indent
stderr,
) catch |err| switch (err) {
@ -146,10 +149,10 @@ fn writeFilePath(file: *Module.File, stream: anytype) !void {
try stream.writeAll(file.sub_file_path);
}
fn writeFullyQualifiedDeclWithFile(decl: *Decl, stream: anytype) !void {
fn writeFullyQualifiedDeclWithFile(mod: *Module, decl: *Decl, stream: anytype) !void {
try writeFilePath(decl.getFileScope(), stream);
try stream.writeAll(": ");
try decl.renderFullyQualifiedDebugName(stream);
try decl.renderFullyQualifiedDebugName(mod, stream);
}
pub fn compilerPanic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) noreturn {

View File

@ -417,17 +417,18 @@ pub const File = struct {
/// Called from within the CodeGen to lower a local variable instantion as an unnamed
/// constant. Returns the symbol index of the lowered constant in the read-only section
/// of the final binary.
pub fn lowerUnnamedConst(base: *File, tv: TypedValue, decl: *Module.Decl) UpdateDeclError!u32 {
pub fn lowerUnnamedConst(base: *File, tv: TypedValue, decl_index: Module.Decl.Index) UpdateDeclError!u32 {
const decl = base.options.module.?.declPtr(decl_index);
log.debug("lowerUnnamedConst {*} ({s})", .{ decl, decl.name });
switch (base.tag) {
// zig fmt: off
.coff => return @fieldParentPtr(Coff, "base", base).lowerUnnamedConst(tv, decl),
.elf => return @fieldParentPtr(Elf, "base", base).lowerUnnamedConst(tv, decl),
.macho => return @fieldParentPtr(MachO, "base", base).lowerUnnamedConst(tv, decl),
.plan9 => return @fieldParentPtr(Plan9, "base", base).lowerUnnamedConst(tv, decl),
.coff => return @fieldParentPtr(Coff, "base", base).lowerUnnamedConst(tv, decl_index),
.elf => return @fieldParentPtr(Elf, "base", base).lowerUnnamedConst(tv, decl_index),
.macho => return @fieldParentPtr(MachO, "base", base).lowerUnnamedConst(tv, decl_index),
.plan9 => return @fieldParentPtr(Plan9, "base", base).lowerUnnamedConst(tv, decl_index),
.spirv => unreachable,
.c => unreachable,
.wasm => unreachable,
.wasm => return @fieldParentPtr(Wasm, "base", base).lowerUnnamedConst(tv, decl_index),
.nvptx => unreachable,
// zig fmt: on
}
@ -435,19 +436,20 @@ pub const File = struct {
/// May be called before or after updateDeclExports but must be called
/// after allocateDeclIndexes for any given Decl.
pub fn updateDecl(base: *File, module: *Module, decl: *Module.Decl) UpdateDeclError!void {
pub fn updateDecl(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void {
const decl = module.declPtr(decl_index);
log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty.fmtDebug() });
assert(decl.has_tv);
switch (base.tag) {
// zig fmt: off
.coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl),
.elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl),
.macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl),
.c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl),
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl),
.spirv => return @fieldParentPtr(SpirV, "base", base).updateDecl(module, decl),
.plan9 => return @fieldParentPtr(Plan9, "base", base).updateDecl(module, decl),
.nvptx => return @fieldParentPtr(NvPtx, "base", base).updateDecl(module, decl),
.coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl_index),
.elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl_index),
.macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl_index),
.c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl_index),
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl_index),
.spirv => return @fieldParentPtr(SpirV, "base", base).updateDecl(module, decl_index),
.plan9 => return @fieldParentPtr(Plan9, "base", base).updateDecl(module, decl_index),
.nvptx => return @fieldParentPtr(NvPtx, "base", base).updateDecl(module, decl_index),
// zig fmt: on
}
}
@ -455,8 +457,9 @@ pub const File = struct {
/// May be called before or after updateDeclExports but must be called
/// after allocateDeclIndexes for any given Decl.
pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) UpdateDeclError!void {
const owner_decl = module.declPtr(func.owner_decl);
log.debug("updateFunc {*} ({s}), type={}", .{
func.owner_decl, func.owner_decl.name, func.owner_decl.ty.fmtDebug(),
owner_decl, owner_decl.name, owner_decl.ty.fmtDebug(),
});
switch (base.tag) {
// zig fmt: off
@ -492,19 +495,20 @@ pub const File = struct {
/// TODO we're transitioning to deleting this function and instead having
/// each linker backend notice the first time updateDecl or updateFunc is called, or
/// a callee referenced from AIR.
pub fn allocateDeclIndexes(base: *File, decl: *Module.Decl) error{OutOfMemory}!void {
pub fn allocateDeclIndexes(base: *File, decl_index: Module.Decl.Index) error{OutOfMemory}!void {
const decl = base.options.module.?.declPtr(decl_index);
log.debug("allocateDeclIndexes {*} ({s})", .{ decl, decl.name });
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl),
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl),
.macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl) catch |err| switch (err) {
.coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl_index),
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl_index),
.macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl_index) catch |err| switch (err) {
// remap this error code because we are transitioning away from
// `allocateDeclIndexes`.
error.Overflow => return error.OutOfMemory,
error.OutOfMemory => return error.OutOfMemory,
},
.wasm => return @fieldParentPtr(Wasm, "base", base).allocateDeclIndexes(decl),
.plan9 => return @fieldParentPtr(Plan9, "base", base).allocateDeclIndexes(decl),
.wasm => return @fieldParentPtr(Wasm, "base", base).allocateDeclIndexes(decl_index),
.plan9 => return @fieldParentPtr(Plan9, "base", base).allocateDeclIndexes(decl_index),
.c, .spirv, .nvptx => {},
}
}
@ -621,17 +625,16 @@ pub const File = struct {
}
/// Called when a Decl is deleted from the Module.
pub fn freeDecl(base: *File, decl: *Module.Decl) void {
log.debug("freeDecl {*} ({s})", .{ decl, decl.name });
pub fn freeDecl(base: *File, decl_index: Module.Decl.Index) void {
switch (base.tag) {
.coff => @fieldParentPtr(Coff, "base", base).freeDecl(decl),
.elf => @fieldParentPtr(Elf, "base", base).freeDecl(decl),
.macho => @fieldParentPtr(MachO, "base", base).freeDecl(decl),
.c => @fieldParentPtr(C, "base", base).freeDecl(decl),
.wasm => @fieldParentPtr(Wasm, "base", base).freeDecl(decl),
.spirv => @fieldParentPtr(SpirV, "base", base).freeDecl(decl),
.plan9 => @fieldParentPtr(Plan9, "base", base).freeDecl(decl),
.nvptx => @fieldParentPtr(NvPtx, "base", base).freeDecl(decl),
.coff => @fieldParentPtr(Coff, "base", base).freeDecl(decl_index),
.elf => @fieldParentPtr(Elf, "base", base).freeDecl(decl_index),
.macho => @fieldParentPtr(MachO, "base", base).freeDecl(decl_index),
.c => @fieldParentPtr(C, "base", base).freeDecl(decl_index),
.wasm => @fieldParentPtr(Wasm, "base", base).freeDecl(decl_index),
.spirv => @fieldParentPtr(SpirV, "base", base).freeDecl(decl_index),
.plan9 => @fieldParentPtr(Plan9, "base", base).freeDecl(decl_index),
.nvptx => @fieldParentPtr(NvPtx, "base", base).freeDecl(decl_index),
}
}
@ -656,20 +659,21 @@ pub const File = struct {
pub fn updateDeclExports(
base: *File,
module: *Module,
decl: *Module.Decl,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) UpdateDeclExportsError!void {
const decl = module.declPtr(decl_index);
log.debug("updateDeclExports {*} ({s})", .{ decl, decl.name });
assert(decl.has_tv);
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclExports(module, decl, exports),
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclExports(module, decl, exports),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclExports(module, decl, exports),
.c => return @fieldParentPtr(C, "base", base).updateDeclExports(module, decl, exports),
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclExports(module, decl, exports),
.spirv => return @fieldParentPtr(SpirV, "base", base).updateDeclExports(module, decl, exports),
.plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclExports(module, decl, exports),
.nvptx => return @fieldParentPtr(NvPtx, "base", base).updateDeclExports(module, decl, exports),
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclExports(module, decl_index, exports),
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclExports(module, decl_index, exports),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclExports(module, decl_index, exports),
.c => return @fieldParentPtr(C, "base", base).updateDeclExports(module, decl_index, exports),
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclExports(module, decl_index, exports),
.spirv => return @fieldParentPtr(SpirV, "base", base).updateDeclExports(module, decl_index, exports),
.plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclExports(module, decl_index, exports),
.nvptx => return @fieldParentPtr(NvPtx, "base", base).updateDeclExports(module, decl_index, exports),
}
}
@ -683,14 +687,14 @@ pub const File = struct {
/// The linker is passed information about the containing atom, `parent_atom_index`, and offset within it's
/// memory buffer, `offset`, so that it can make a note of potential relocation sites, should the
/// `Decl`'s address was not yet resolved, or the containing atom gets moved in virtual memory.
pub fn getDeclVAddr(base: *File, decl: *const Module.Decl, reloc_info: RelocInfo) !u64 {
pub fn getDeclVAddr(base: *File, decl_index: Module.Decl.Index, reloc_info: RelocInfo) !u64 {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl, reloc_info),
.elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl, reloc_info),
.macho => return @fieldParentPtr(MachO, "base", base).getDeclVAddr(decl, reloc_info),
.plan9 => return @fieldParentPtr(Plan9, "base", base).getDeclVAddr(decl, reloc_info),
.coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl_index, reloc_info),
.elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl_index, reloc_info),
.macho => return @fieldParentPtr(MachO, "base", base).getDeclVAddr(decl_index, reloc_info),
.plan9 => return @fieldParentPtr(Plan9, "base", base).getDeclVAddr(decl_index, reloc_info),
.c => unreachable,
.wasm => return @fieldParentPtr(Wasm, "base", base).getDeclVAddr(decl, reloc_info),
.wasm => return @fieldParentPtr(Wasm, "base", base).getDeclVAddr(decl_index, reloc_info),
.spirv => unreachable,
.nvptx => unreachable,
}

View File

@ -21,7 +21,7 @@ base: link.File,
/// This linker backend does not try to incrementally link output C source code.
/// Instead, it tracks all declarations in this table, and iterates over it
/// in the flush function, stitching pre-rendered pieces of C code together.
decl_table: std.AutoArrayHashMapUnmanaged(*const Module.Decl, DeclBlock) = .{},
decl_table: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, DeclBlock) = .{},
/// Stores Type/Value data for `typedefs` to reference.
/// Accumulates allocations and then there is a periodic garbage collection after flush().
arena: std.heap.ArenaAllocator,
@ -87,9 +87,9 @@ pub fn deinit(self: *C) void {
self.arena.deinit();
}
pub fn freeDecl(self: *C, decl: *Module.Decl) void {
pub fn freeDecl(self: *C, decl_index: Module.Decl.Index) void {
const gpa = self.base.allocator;
if (self.decl_table.fetchSwapRemove(decl)) |kv| {
if (self.decl_table.fetchSwapRemove(decl_index)) |kv| {
var decl_block = kv.value;
decl_block.deinit(gpa);
}
@ -99,8 +99,8 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes
const tracy = trace(@src());
defer tracy.end();
const decl = func.owner_decl;
const gop = try self.decl_table.getOrPut(self.base.allocator, decl);
const decl_index = func.owner_decl;
const gop = try self.decl_table.getOrPut(self.base.allocator, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
@ -126,9 +126,10 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes
.gpa = module.gpa,
.module = module,
.error_msg = null,
.decl = decl,
.decl_index = decl_index,
.decl = module.declPtr(decl_index),
.fwd_decl = fwd_decl.toManaged(module.gpa),
.typedefs = typedefs.promoteContext(module.gpa, .{ .target = module.getTarget() }),
.typedefs = typedefs.promoteContext(module.gpa, .{ .mod = module }),
.typedefs_arena = self.arena.allocator(),
},
.code = code.toManaged(module.gpa),
@ -150,7 +151,7 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes
codegen.genFunc(&function) catch |err| switch (err) {
error.AnalysisFail => {
try module.failed_decls.put(module.gpa, decl, function.object.dg.error_msg.?);
try module.failed_decls.put(module.gpa, decl_index, function.object.dg.error_msg.?);
return;
},
else => |e| return e,
@ -166,11 +167,11 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes
code.shrinkAndFree(module.gpa, code.items.len);
}
pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
pub fn updateDecl(self: *C, module: *Module, decl_index: Module.Decl.Index) !void {
const tracy = trace(@src());
defer tracy.end();
const gop = try self.decl_table.getOrPut(self.base.allocator, decl);
const gop = try self.decl_table.getOrPut(self.base.allocator, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
@ -186,14 +187,17 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
typedefs.clearRetainingCapacity();
code.shrinkRetainingCapacity(0);
const decl = module.declPtr(decl_index);
var object: codegen.Object = .{
.dg = .{
.gpa = module.gpa,
.module = module,
.error_msg = null,
.decl_index = decl_index,
.decl = decl,
.fwd_decl = fwd_decl.toManaged(module.gpa),
.typedefs = typedefs.promoteContext(module.gpa, .{ .target = module.getTarget() }),
.typedefs = typedefs.promoteContext(module.gpa, .{ .mod = module }),
.typedefs_arena = self.arena.allocator(),
},
.code = code.toManaged(module.gpa),
@ -211,7 +215,7 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
codegen.genDecl(&object) catch |err| switch (err) {
error.AnalysisFail => {
try module.failed_decls.put(module.gpa, decl, object.dg.error_msg.?);
try module.failed_decls.put(module.gpa, decl_index, object.dg.error_msg.?);
return;
},
else => |e| return e,
@ -287,14 +291,14 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
const decl_keys = self.decl_table.keys();
const decl_values = self.decl_table.values();
for (decl_keys) |decl| {
assert(decl.has_tv);
f.remaining_decls.putAssumeCapacityNoClobber(decl, {});
for (decl_keys) |decl_index| {
assert(module.declPtr(decl_index).has_tv);
f.remaining_decls.putAssumeCapacityNoClobber(decl_index, {});
}
while (f.remaining_decls.popOrNull()) |kv| {
const decl = kv.key;
try flushDecl(self, &f, decl);
const decl_index = kv.key;
try flushDecl(self, &f, decl_index);
}
f.all_buffers.items[err_typedef_index] = .{
@ -305,7 +309,8 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
// Now the function bodies.
try f.all_buffers.ensureUnusedCapacity(gpa, f.fn_count);
for (decl_keys) |decl, i| {
for (decl_keys) |decl_index, i| {
const decl = module.declPtr(decl_index);
if (decl.getFunction() != null) {
const decl_block = &decl_values[i];
const buf = decl_block.code.items;
@ -325,7 +330,7 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
}
const Flush = struct {
remaining_decls: std.AutoArrayHashMapUnmanaged(*const Module.Decl, void) = .{},
remaining_decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, void) = .{},
typedefs: Typedefs = .{},
err_typedef_buf: std.ArrayListUnmanaged(u8) = .{},
/// We collect a list of buffers to write, and write them all at once with pwritev 😎
@ -354,7 +359,9 @@ const FlushDeclError = error{
};
/// Assumes `decl` was in the `remaining_decls` set, and has already been removed.
fn flushDecl(self: *C, f: *Flush, decl: *const Module.Decl) FlushDeclError!void {
fn flushDecl(self: *C, f: *Flush, decl_index: Module.Decl.Index) FlushDeclError!void {
const module = self.base.options.module.?;
const decl = module.declPtr(decl_index);
// Before flushing any particular Decl we must ensure its
// dependencies are already flushed, so that the order in the .c
// file comes out correctly.
@ -364,15 +371,17 @@ fn flushDecl(self: *C, f: *Flush, decl: *const Module.Decl) FlushDeclError!void
}
}
const decl_block = self.decl_table.getPtr(decl).?;
const decl_block = self.decl_table.getPtr(decl_index).?;
const gpa = self.base.allocator;
if (decl_block.typedefs.count() != 0) {
try f.typedefs.ensureUnusedCapacity(gpa, @intCast(u32, decl_block.typedefs.count()));
try f.typedefs.ensureUnusedCapacityContext(gpa, @intCast(u32, decl_block.typedefs.count()), .{
.mod = module,
});
var it = decl_block.typedefs.iterator();
while (it.next()) |new| {
const gop = f.typedefs.getOrPutAssumeCapacityContext(new.key_ptr.*, .{
.target = self.base.options.target,
.mod = module,
});
if (!gop.found_existing) {
try f.err_typedef_buf.appendSlice(gpa, new.value_ptr.rendered);
@ -417,8 +426,8 @@ pub fn flushEmitH(module: *Module) !void {
.iov_len = zig_h.len,
});
for (emit_h.decl_table.keys()) |decl| {
const decl_emit_h = decl.getEmitH(module);
for (emit_h.decl_table.keys()) |decl_index| {
const decl_emit_h = emit_h.declPtr(decl_index);
const buf = decl_emit_h.fwd_decl.items;
all_buffers.appendAssumeCapacity(.{
.iov_base = buf.ptr,
@ -442,11 +451,11 @@ pub fn flushEmitH(module: *Module) !void {
pub fn updateDeclExports(
self: *C,
module: *Module,
decl: *Module.Decl,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
_ = exports;
_ = decl;
_ = decl_index;
_ = module;
_ = self;
}

View File

@ -418,11 +418,12 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Coff {
return self;
}
pub fn allocateDeclIndexes(self: *Coff, decl: *Module.Decl) !void {
pub fn allocateDeclIndexes(self: *Coff, decl_index: Module.Decl.Index) !void {
if (self.llvm_object) |_| return;
try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1);
const decl = self.base.options.module.?.declPtr(decl_index);
if (self.offset_table_free_list.popOrNull()) |i| {
decl.link.coff.offset_table_index = i;
} else {
@ -674,7 +675,8 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
const decl = func.owner_decl;
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
const res = try codegen.generateFunction(
&self.base,
decl.srcLoc(),
@ -688,7 +690,7 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
.appended => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
try module.failed_decls.put(module.gpa, decl_index, em);
return;
},
};
@ -696,24 +698,26 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
return self.finishUpdateDecl(module, func.owner_decl, code);
}
pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl: *Module.Decl) !u32 {
pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.Index) !u32 {
_ = self;
_ = tv;
_ = decl;
_ = decl_index;
log.debug("TODO lowerUnnamedConst for Coff", .{});
return error.AnalysisFail;
}
pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !void {
if (build_options.skip_non_native and builtin.object_format != .coff) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl);
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl_index);
}
const tracy = trace(@src());
defer tracy.end();
const decl = module.declPtr(decl_index);
if (decl.val.tag() == .extern_fn) {
return; // TODO Should we do more when front-end analyzed extern decl?
}
@ -735,15 +739,16 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
.appended => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
try module.failed_decls.put(module.gpa, decl_index, em);
return;
},
};
return self.finishUpdateDecl(module, decl, code);
return self.finishUpdateDecl(module, decl_index, code);
}
fn finishUpdateDecl(self: *Coff, module: *Module, decl: *Module.Decl, code: []const u8) !void {
fn finishUpdateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index, code: []const u8) !void {
const decl = module.declPtr(decl_index);
const required_alignment = decl.ty.abiAlignment(self.base.options.target);
const curr_size = decl.link.coff.size;
if (curr_size != 0) {
@ -778,15 +783,18 @@ fn finishUpdateDecl(self: *Coff, module: *Module, decl: *Module.Decl, code: []co
try self.base.file.?.pwriteAll(code, self.section_data_offset + self.offset_table_size + decl.link.coff.text_offset);
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
return self.updateDeclExports(module, decl, decl_exports);
const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{};
return self.updateDeclExports(module, decl_index, decl_exports);
}
pub fn freeDecl(self: *Coff, decl: *Module.Decl) void {
pub fn freeDecl(self: *Coff, decl_index: Module.Decl.Index) void {
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl);
if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl_index);
}
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
self.freeTextBlock(&decl.link.coff);
self.offset_table_free_list.append(self.base.allocator, decl.link.coff.offset_table_index) catch {};
@ -795,16 +803,17 @@ pub fn freeDecl(self: *Coff, decl: *Module.Decl) void {
pub fn updateDeclExports(
self: *Coff,
module: *Module,
decl: *Module.Decl,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
if (build_options.skip_non_native and builtin.object_format != .coff) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl, exports);
if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl_index, exports);
}
const decl = module.declPtr(decl_index);
for (exports) |exp| {
if (exp.options.section) |section_name| {
if (!mem.eql(u8, section_name, ".text")) {
@ -1474,8 +1483,14 @@ fn findLib(self: *Coff, arena: Allocator, name: []const u8) !?[]const u8 {
return null;
}
pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl, reloc_info: link.File.RelocInfo) !u64 {
pub fn getDeclVAddr(
self: *Coff,
decl_index: Module.Decl.Index,
reloc_info: link.File.RelocInfo,
) !u64 {
_ = reloc_info;
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
assert(self.llvm_object == null);
return self.text_section_virtual_address + decl.link.coff.text_offset;
}

View File

@ -67,7 +67,7 @@ pub const Atom = struct {
/// Decl's inner Atom is assigned an offset within the DWARF section.
pub const DeclState = struct {
gpa: Allocator,
target: std.Target,
mod: *Module,
dbg_line: std.ArrayList(u8),
dbg_info: std.ArrayList(u8),
abbrev_type_arena: std.heap.ArenaAllocator,
@ -81,10 +81,10 @@ pub const DeclState = struct {
abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{},
exprloc_relocs: std.ArrayListUnmanaged(ExprlocRelocation) = .{},
fn init(gpa: Allocator, target: std.Target) DeclState {
fn init(gpa: Allocator, mod: *Module) DeclState {
return .{
.gpa = gpa,
.target = target,
.mod = mod,
.dbg_line = std.ArrayList(u8).init(gpa),
.dbg_info = std.ArrayList(u8).init(gpa),
.abbrev_type_arena = std.heap.ArenaAllocator.init(gpa),
@ -118,7 +118,7 @@ pub const DeclState = struct {
addend: ?u32,
) !void {
const resolv = self.abbrev_resolver.getContext(ty, .{
.target = self.target,
.mod = self.mod,
}) orelse blk: {
const sym_index = @intCast(u32, self.abbrev_table.items.len);
try self.abbrev_table.append(self.gpa, .{
@ -128,10 +128,10 @@ pub const DeclState = struct {
});
log.debug("@{d}: {}", .{ sym_index, ty.fmtDebug() });
try self.abbrev_resolver.putNoClobberContext(self.gpa, ty, sym_index, .{
.target = self.target,
.mod = self.mod,
});
break :blk self.abbrev_resolver.getContext(ty, .{
.target = self.target,
.mod = self.mod,
}).?;
};
const add: u32 = addend orelse 0;
@ -153,8 +153,8 @@ pub const DeclState = struct {
) error{OutOfMemory}!void {
const arena = self.abbrev_type_arena.allocator();
const dbg_info_buffer = &self.dbg_info;
const target = self.target;
const target_endian = self.target.cpu.arch.endian();
const target = module.getTarget();
const target_endian = target.cpu.arch.endian();
switch (ty.zigTypeTag()) {
.NoReturn => unreachable,
@ -181,7 +181,7 @@ pub const DeclState = struct {
// DW.AT.byte_size, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(@intCast(u8, ty.abiSize(target)));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(target)});
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
},
.Optional => {
if (ty.isPtrLikeOptional()) {
@ -192,7 +192,7 @@ pub const DeclState = struct {
// DW.AT.byte_size, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(@intCast(u8, ty.abiSize(target)));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(target)});
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
} else {
// Non-pointer optionals are structs: struct { .maybe = *, .val = * }
var buf = try arena.create(Type.Payload.ElemType);
@ -203,7 +203,7 @@ pub const DeclState = struct {
const abi_size = ty.abiSize(target);
try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(target)});
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(7);
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member));
@ -242,7 +242,7 @@ pub const DeclState = struct {
// DW.AT.byte_size, DW.FORM.sdata
dbg_info_buffer.appendAssumeCapacity(@sizeOf(usize) * 2);
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(target)});
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(5);
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member));
@ -285,7 +285,7 @@ pub const DeclState = struct {
// DW.AT.array_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.array_type));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(target)});
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
@ -312,7 +312,7 @@ pub const DeclState = struct {
switch (ty.tag()) {
.tuple, .anon_struct => {
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(target)});
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
const fields = ty.tupleFields();
for (fields.types) |field, field_index| {
@ -331,7 +331,7 @@ pub const DeclState = struct {
},
else => {
// DW.AT.name, DW.FORM.string
const struct_name = try ty.nameAllocArena(arena, target);
const struct_name = try ty.nameAllocArena(arena, module);
try dbg_info_buffer.ensureUnusedCapacity(struct_name.len + 1);
dbg_info_buffer.appendSliceAssumeCapacity(struct_name);
dbg_info_buffer.appendAssumeCapacity(0);
@ -372,7 +372,7 @@ pub const DeclState = struct {
const abi_size = ty.abiSize(target);
try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
const enum_name = try ty.nameAllocArena(arena, target);
const enum_name = try ty.nameAllocArena(arena, module);
try dbg_info_buffer.ensureUnusedCapacity(enum_name.len + 1);
dbg_info_buffer.appendSliceAssumeCapacity(enum_name);
dbg_info_buffer.appendAssumeCapacity(0);
@ -410,7 +410,7 @@ pub const DeclState = struct {
const payload_offset = if (layout.tag_align >= layout.payload_align) layout.tag_size else 0;
const tag_offset = if (layout.tag_align >= layout.payload_align) 0 else layout.payload_size;
const is_tagged = layout.tag_size > 0;
const union_name = try ty.nameAllocArena(arena, target);
const union_name = try ty.nameAllocArena(arena, module);
// TODO this is temporary to match current state of unions in Zig - we don't yet have
// safety checks implemented meaning the implicit tag is not yet stored and generated
@ -491,7 +491,7 @@ pub const DeclState = struct {
self.abbrev_type_arena.allocator(),
module,
ty,
self.target,
target,
&self.dbg_info,
);
},
@ -507,7 +507,7 @@ pub const DeclState = struct {
// DW.AT.byte_size, DW.FORM.sdata
try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
const name = try ty.nameAllocArena(arena, target);
const name = try ty.nameAllocArena(arena, module);
try dbg_info_buffer.writer().print("{s}\x00", .{name});
// DW.AT.member
@ -654,17 +654,17 @@ pub fn deinit(self: *Dwarf) void {
/// Initializes Decl's state and its matching output buffers.
/// Call this before `commitDeclState`.
pub fn initDeclState(self: *Dwarf, decl: *Module.Decl) !DeclState {
pub fn initDeclState(self: *Dwarf, mod: *Module, decl: *Module.Decl) !DeclState {
const tracy = trace(@src());
defer tracy.end();
const decl_name = try decl.getFullyQualifiedName(self.allocator);
const decl_name = try decl.getFullyQualifiedName(mod);
defer self.allocator.free(decl_name);
log.debug("initDeclState {s}{*}", .{ decl_name, decl });
const gpa = self.allocator;
var decl_state = DeclState.init(gpa, self.target);
var decl_state = DeclState.init(gpa, mod);
errdefer decl_state.deinit();
const dbg_line_buffer = &decl_state.dbg_line;
const dbg_info_buffer = &decl_state.dbg_info;
@ -2133,7 +2133,7 @@ fn addDbgInfoErrorSet(
const abi_size = ty.abiSize(target);
try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
const name = try ty.nameAllocArena(arena, target);
const name = try ty.nameAllocArena(arena, module);
try dbg_info_buffer.writer().print("{s}\x00", .{name});
// DW.AT.enumerator

View File

@ -134,7 +134,7 @@ atom_free_lists: std.AutoHashMapUnmanaged(u16, std.ArrayListUnmanaged(*TextBlock
/// We store them here so that we can properly dispose of any allocated
/// memory within the atom in the incremental linker.
/// TODO consolidate this.
decls: std.AutoHashMapUnmanaged(*Module.Decl, ?u16) = .{},
decls: std.AutoHashMapUnmanaged(Module.Decl.Index, ?u16) = .{},
/// List of atoms that are owned directly by the linker.
/// Currently these are only atoms that are the result of linking
@ -178,7 +178,7 @@ const Reloc = struct {
};
const RelocTable = std.AutoHashMapUnmanaged(*TextBlock, std.ArrayListUnmanaged(Reloc));
const UnnamedConstTable = std.AutoHashMapUnmanaged(*Module.Decl, std.ArrayListUnmanaged(*TextBlock));
const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*TextBlock));
/// When allocating, the ideal_capacity is calculated by
/// actual_capacity + (actual_capacity / ideal_factor)
@ -389,7 +389,10 @@ pub fn deinit(self: *Elf) void {
}
}
pub fn getDeclVAddr(self: *Elf, decl: *const Module.Decl, reloc_info: File.RelocInfo) !u64 {
pub fn getDeclVAddr(self: *Elf, decl_index: Module.Decl.Index, reloc_info: File.RelocInfo) !u64 {
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
assert(self.llvm_object == null);
assert(decl.link.elf.local_sym_index != 0);
@ -2189,15 +2192,17 @@ fn allocateLocalSymbol(self: *Elf) !u32 {
return index;
}
pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void {
pub fn allocateDeclIndexes(self: *Elf, decl_index: Module.Decl.Index) !void {
if (self.llvm_object) |_| return;
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
if (decl.link.elf.local_sym_index != 0) return;
try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1);
try self.decls.putNoClobber(self.base.allocator, decl, null);
try self.decls.putNoClobber(self.base.allocator, decl_index, null);
const decl_name = try decl.getFullyQualifiedName(self.base.allocator);
const decl_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(decl_name);
log.debug("allocating symbol indexes for {s}", .{decl_name});
@ -2214,8 +2219,8 @@ pub fn allocateDeclIndexes(self: *Elf, decl: *Module.Decl) !void {
self.offset_table.items[decl.link.elf.offset_table_index] = 0;
}
fn freeUnnamedConsts(self: *Elf, decl: *Module.Decl) void {
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl) orelse return;
fn freeUnnamedConsts(self: *Elf, decl_index: Module.Decl.Index) void {
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
for (unnamed_consts.items) |atom| {
self.freeTextBlock(atom, self.phdr_load_ro_index.?);
self.local_symbol_free_list.append(self.base.allocator, atom.local_sym_index) catch {};
@ -2225,15 +2230,18 @@ fn freeUnnamedConsts(self: *Elf, decl: *Module.Decl) void {
unnamed_consts.clearAndFree(self.base.allocator);
}
pub fn freeDecl(self: *Elf, decl: *Module.Decl) void {
pub fn freeDecl(self: *Elf, decl_index: Module.Decl.Index) void {
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl);
if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl_index);
}
const kv = self.decls.fetchRemove(decl);
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
const kv = self.decls.fetchRemove(decl_index);
if (kv.?.value) |index| {
self.freeTextBlock(&decl.link.elf, index);
self.freeUnnamedConsts(decl);
self.freeUnnamedConsts(decl_index);
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
@ -2274,14 +2282,17 @@ fn getDeclPhdrIndex(self: *Elf, decl: *Module.Decl) !u16 {
return phdr_index;
}
fn updateDeclCode(self: *Elf, decl: *Module.Decl, code: []const u8, stt_bits: u8) !*elf.Elf64_Sym {
const decl_name = try decl.getFullyQualifiedName(self.base.allocator);
fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, stt_bits: u8) !*elf.Elf64_Sym {
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
const decl_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(decl_name);
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
const required_alignment = decl.ty.abiAlignment(self.base.options.target);
const decl_ptr = self.decls.getPtr(decl).?;
const decl_ptr = self.decls.getPtr(decl_index).?;
if (decl_ptr.* == null) {
decl_ptr.* = try self.getDeclPhdrIndex(decl);
}
@ -2355,10 +2366,11 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
const decl = func.owner_decl;
self.freeUnnamedConsts(decl);
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
self.freeUnnamedConsts(decl_index);
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(decl) else null;
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl) else null;
defer if (decl_state) |*ds| ds.deinit();
const res = if (decl_state) |*ds|
@ -2372,11 +2384,11 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
.appended => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
try module.failed_decls.put(module.gpa, decl_index, em);
return;
},
};
const local_sym = try self.updateDeclCode(decl, code, elf.STT_FUNC);
const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_FUNC);
if (decl_state) |*ds| {
try self.dwarf.?.commitDeclState(
&self.base,
@ -2389,21 +2401,23 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
}
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
return self.updateDeclExports(module, decl, decl_exports);
const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{};
return self.updateDeclExports(module, decl_index, decl_exports);
}
pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl);
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl_index);
}
const tracy = trace(@src());
defer tracy.end();
const decl = module.declPtr(decl_index);
if (decl.val.tag() == .extern_fn) {
return; // TODO Should we do more when front-end analyzed extern decl?
}
@ -2414,12 +2428,12 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
}
}
assert(!self.unnamed_const_atoms.contains(decl));
assert(!self.unnamed_const_atoms.contains(decl_index));
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(decl) else null;
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl) else null;
defer if (decl_state) |*ds| ds.deinit();
// TODO implement .debug_info for global variables
@ -2446,12 +2460,12 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
.appended => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
try module.failed_decls.put(module.gpa, decl_index, em);
return;
},
};
const local_sym = try self.updateDeclCode(decl, code, elf.STT_OBJECT);
const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_OBJECT);
if (decl_state) |*ds| {
try self.dwarf.?.commitDeclState(
&self.base,
@ -2464,16 +2478,18 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
}
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
return self.updateDeclExports(module, decl, decl_exports);
const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{};
return self.updateDeclExports(module, decl_index, decl_exports);
}
pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl: *Module.Decl) !u32 {
pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module.Decl.Index) !u32 {
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
const module = self.base.options.module.?;
const gop = try self.unnamed_const_atoms.getOrPut(self.base.allocator, decl);
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
const gop = try self.unnamed_const_atoms.getOrPut(self.base.allocator, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
@ -2485,7 +2501,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl: *Module.Decl
try self.managed_atoms.append(self.base.allocator, atom);
const name_str_index = blk: {
const decl_name = try decl.getFullyQualifiedName(self.base.allocator);
const decl_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(decl_name);
const index = unnamed_consts.items.len;
@ -2510,7 +2526,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl: *Module.Decl
.appended => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
try mod.failed_decls.put(mod.gpa, decl_index, em);
log.err("{s}", .{em.msg});
return error.AnalysisFail;
},
@ -2547,24 +2563,25 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl: *Module.Decl
pub fn updateDeclExports(
self: *Elf,
module: *Module,
decl: *Module.Decl,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl, exports);
if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl_index, exports);
}
const tracy = trace(@src());
defer tracy.end();
try self.global_symbols.ensureUnusedCapacity(self.base.allocator, exports.len);
const decl = module.declPtr(decl_index);
if (decl.link.elf.local_sym_index == 0) return;
const decl_sym = self.local_symbols.items[decl.link.elf.local_sym_index];
const decl_ptr = self.decls.getPtr(decl).?;
const decl_ptr = self.decls.getPtr(decl_index).?;
if (decl_ptr.* == null) {
decl_ptr.* = try self.getDeclPhdrIndex(decl);
}
@ -2633,12 +2650,11 @@ pub fn updateDeclExports(
}
/// Must be called only after a successful call to `updateDecl`.
pub fn updateDeclLineNumber(self: *Elf, module: *Module, decl: *const Module.Decl) !void {
_ = module;
pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl: *const Module.Decl) !void {
const tracy = trace(@src());
defer tracy.end();
const decl_name = try decl.getFullyQualifiedName(self.base.allocator);
const decl_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(decl_name);
log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl });

View File

@ -247,14 +247,14 @@ unnamed_const_atoms: UnnamedConstTable = .{},
/// We store them here so that we can properly dispose of any allocated
/// memory within the atom in the incremental linker.
/// TODO consolidate this.
decls: std.AutoArrayHashMapUnmanaged(*Module.Decl, ?MatchingSection) = .{},
decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, ?MatchingSection) = .{},
const Entry = struct {
target: Atom.Relocation.Target,
atom: *Atom,
};
const UnnamedConstTable = std.AutoHashMapUnmanaged(*Module.Decl, std.ArrayListUnmanaged(*Atom));
const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*Atom));
const PendingUpdate = union(enum) {
resolve_undef: u32,
@ -3451,10 +3451,15 @@ pub fn deinit(self: *MachO) void {
}
self.atom_free_lists.deinit(self.base.allocator);
}
for (self.decls.keys()) |decl| {
decl.link.macho.deinit(self.base.allocator);
if (self.base.options.module) |mod| {
for (self.decls.keys()) |decl_index| {
const decl = mod.declPtr(decl_index);
decl.link.macho.deinit(self.base.allocator);
}
self.decls.deinit(self.base.allocator);
} else {
assert(self.decls.count() == 0);
}
self.decls.deinit(self.base.allocator);
{
var it = self.unnamed_const_atoms.valueIterator();
@ -3652,13 +3657,14 @@ pub fn allocateTlvPtrEntry(self: *MachO, target: Atom.Relocation.Target) !u32 {
return index;
}
pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {
pub fn allocateDeclIndexes(self: *MachO, decl_index: Module.Decl.Index) !void {
if (self.llvm_object) |_| return;
const decl = self.base.options.module.?.declPtr(decl_index);
if (decl.link.macho.local_sym_index != 0) return;
decl.link.macho.local_sym_index = try self.allocateLocalSymbol();
try self.atom_by_index_table.putNoClobber(self.base.allocator, decl.link.macho.local_sym_index, &decl.link.macho);
try self.decls.putNoClobber(self.base.allocator, decl, null);
try self.decls.putNoClobber(self.base.allocator, decl_index, null);
const got_target = .{ .local = decl.link.macho.local_sym_index };
const got_index = try self.allocateGotEntry(got_target);
@ -3676,8 +3682,9 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
const tracy = trace(@src());
defer tracy.end();
const decl = func.owner_decl;
self.freeUnnamedConsts(decl);
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
self.freeUnnamedConsts(decl_index);
// TODO clearing the code and relocs buffer should probably be orchestrated
// in a different, smarter, more automatic way somewhere else, in a more centralised
@ -3690,7 +3697,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
defer code_buffer.deinit();
var decl_state = if (self.d_sym) |*d_sym|
try d_sym.dwarf.initDeclState(decl)
try d_sym.dwarf.initDeclState(module, decl)
else
null;
defer if (decl_state) |*ds| ds.deinit();
@ -3708,12 +3715,12 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
},
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
try module.failed_decls.put(module.gpa, decl_index, em);
return;
},
}
const symbol = try self.placeDecl(decl, decl.link.macho.code.items.len);
const symbol = try self.placeDecl(decl_index, decl.link.macho.code.items.len);
if (decl_state) |*ds| {
try self.d_sym.?.dwarf.commitDeclState(
@ -3728,22 +3735,23 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
// Since we updated the vaddr and the size, each corresponding export symbol also
// needs to be updated.
const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
try self.updateDeclExports(module, decl, decl_exports);
const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{};
try self.updateDeclExports(module, decl_index, decl_exports);
}
pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl: *Module.Decl) !u32 {
pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Module.Decl.Index) !u32 {
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
const module = self.base.options.module.?;
const gop = try self.unnamed_const_atoms.getOrPut(self.base.allocator, decl);
const gop = try self.unnamed_const_atoms.getOrPut(self.base.allocator, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
const unnamed_consts = gop.value_ptr;
const decl_name = try decl.getFullyQualifiedName(self.base.allocator);
const decl = module.declPtr(decl_index);
const decl_name = try decl.getFullyQualifiedName(module);
defer self.base.allocator.free(decl_name);
const name_str_index = blk: {
@ -3769,7 +3777,7 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl: *Module.De
.appended => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
try module.failed_decls.put(module.gpa, decl_index, em);
log.err("{s}", .{em.msg});
return error.AnalysisFail;
},
@ -3800,16 +3808,18 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl: *Module.De
return atom.local_sym_index;
}
pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl);
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl_index);
}
const tracy = trace(@src());
defer tracy.end();
const decl = module.declPtr(decl_index);
if (decl.val.tag() == .extern_fn) {
return; // TODO Should we do more when front-end analyzed extern decl?
}
@ -3824,7 +3834,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
defer code_buffer.deinit();
var decl_state: ?Dwarf.DeclState = if (self.d_sym) |*d_sym|
try d_sym.dwarf.initDeclState(decl)
try d_sym.dwarf.initDeclState(module, decl)
else
null;
defer if (decl_state) |*ds| ds.deinit();
@ -3862,12 +3872,12 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
},
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
try module.failed_decls.put(module.gpa, decl_index, em);
return;
},
}
};
const symbol = try self.placeDecl(decl, code.len);
const symbol = try self.placeDecl(decl_index, code.len);
if (decl_state) |*ds| {
try self.d_sym.?.dwarf.commitDeclState(
@ -3882,13 +3892,13 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
// Since we updated the vaddr and the size, each corresponding export symbol also
// needs to be updated.
const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
try self.updateDeclExports(module, decl, decl_exports);
const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{};
try self.updateDeclExports(module, decl_index, decl_exports);
}
/// Checks if the value, or any of its embedded values stores a pointer, and thus requires
/// a rebase opcode for the dynamic linker.
fn needsPointerRebase(ty: Type, val: Value, target: std.Target) bool {
fn needsPointerRebase(ty: Type, val: Value, mod: *Module) bool {
if (ty.zigTypeTag() == .Fn) {
return false;
}
@ -3903,8 +3913,8 @@ fn needsPointerRebase(ty: Type, val: Value, target: std.Target) bool {
if (ty.arrayLen() == 0) return false;
const elem_ty = ty.childType();
var elem_value_buf: Value.ElemValueBuffer = undefined;
const elem_val = val.elemValueBuffer(0, &elem_value_buf);
return needsPointerRebase(elem_ty, elem_val, target);
const elem_val = val.elemValueBuffer(mod, 0, &elem_value_buf);
return needsPointerRebase(elem_ty, elem_val, mod);
},
.Struct => {
const fields = ty.structFields().values();
@ -3912,7 +3922,7 @@ fn needsPointerRebase(ty: Type, val: Value, target: std.Target) bool {
if (val.castTag(.aggregate)) |payload| {
const field_values = payload.data;
for (field_values) |field_val, i| {
if (needsPointerRebase(fields[i].ty, field_val, target)) return true;
if (needsPointerRebase(fields[i].ty, field_val, mod)) return true;
} else return false;
} else return false;
},
@ -3921,18 +3931,18 @@ fn needsPointerRebase(ty: Type, val: Value, target: std.Target) bool {
const sub_val = payload.data;
var buffer: Type.Payload.ElemType = undefined;
const sub_ty = ty.optionalChild(&buffer);
return needsPointerRebase(sub_ty, sub_val, target);
return needsPointerRebase(sub_ty, sub_val, mod);
} else return false;
},
.Union => {
const union_obj = val.cast(Value.Payload.Union).?.data;
const active_field_ty = ty.unionFieldType(union_obj.tag, target);
return needsPointerRebase(active_field_ty, union_obj.val, target);
const active_field_ty = ty.unionFieldType(union_obj.tag, mod);
return needsPointerRebase(active_field_ty, union_obj.val, mod);
},
.ErrorUnion => {
if (val.castTag(.eu_payload)) |payload| {
const payload_ty = ty.errorUnionPayload();
return needsPointerRebase(payload_ty, payload.data, target);
return needsPointerRebase(payload_ty, payload.data, mod);
} else return false;
},
else => return false,
@ -3942,6 +3952,7 @@ fn needsPointerRebase(ty: Type, val: Value, target: std.Target) bool {
fn getMatchingSectionAtom(self: *MachO, atom: *Atom, name: []const u8, ty: Type, val: Value) !MatchingSection {
const code = atom.code.items;
const target = self.base.options.target;
const mod = self.base.options.module.?;
const alignment = ty.abiAlignment(target);
const align_log_2 = math.log2(alignment);
const zig_ty = ty.zigTypeTag();
@ -3969,7 +3980,7 @@ fn getMatchingSectionAtom(self: *MachO, atom: *Atom, name: []const u8, ty: Type,
};
}
if (needsPointerRebase(ty, val, target)) {
if (needsPointerRebase(ty, val, mod)) {
break :blk (try self.getMatchingSection(.{
.segname = makeStaticString("__DATA_CONST"),
.sectname = makeStaticString("__const"),
@ -4025,15 +4036,17 @@ fn getMatchingSectionAtom(self: *MachO, atom: *Atom, name: []const u8, ty: Type,
return match;
}
fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64 {
fn placeDecl(self: *MachO, decl_index: Module.Decl.Index, code_len: usize) !*macho.nlist_64 {
const module = self.base.options.module.?;
const decl = module.declPtr(decl_index);
const required_alignment = decl.ty.abiAlignment(self.base.options.target);
assert(decl.link.macho.local_sym_index != 0); // Caller forgot to call allocateDeclIndexes()
const symbol = &self.locals.items[decl.link.macho.local_sym_index];
const sym_name = try decl.getFullyQualifiedName(self.base.allocator);
const sym_name = try decl.getFullyQualifiedName(module);
defer self.base.allocator.free(sym_name);
const decl_ptr = self.decls.getPtr(decl).?;
const decl_ptr = self.decls.getPtr(decl_index).?;
if (decl_ptr.* == null) {
decl_ptr.* = try self.getMatchingSectionAtom(&decl.link.macho, sym_name, decl.ty, decl.val);
}
@ -4101,19 +4114,20 @@ pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.D
pub fn updateDeclExports(
self: *MachO,
module: *Module,
decl: *Module.Decl,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl, exports);
if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl_index, exports);
}
const tracy = trace(@src());
defer tracy.end();
try self.globals.ensureUnusedCapacity(self.base.allocator, exports.len);
const decl = module.declPtr(decl_index);
if (decl.link.macho.local_sym_index == 0) return;
const decl_sym = &self.locals.items[decl.link.macho.local_sym_index];
@ -4250,9 +4264,8 @@ pub fn deleteExport(self: *MachO, exp: Export) void {
global.n_value = 0;
}
fn freeUnnamedConsts(self: *MachO, decl: *Module.Decl) void {
log.debug("freeUnnamedConsts for decl {*}", .{decl});
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl) orelse return;
fn freeUnnamedConsts(self: *MachO, decl_index: Module.Decl.Index) void {
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
for (unnamed_consts.items) |atom| {
self.freeAtom(atom, .{
.seg = self.text_segment_cmd_index.?,
@ -4267,15 +4280,17 @@ fn freeUnnamedConsts(self: *MachO, decl: *Module.Decl) void {
unnamed_consts.clearAndFree(self.base.allocator);
}
pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {
pub fn freeDecl(self: *MachO, decl_index: Module.Decl.Index) void {
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl);
if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl_index);
}
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
log.debug("freeDecl {*}", .{decl});
const kv = self.decls.fetchSwapRemove(decl);
const kv = self.decls.fetchSwapRemove(decl_index);
if (kv.?.value) |match| {
self.freeAtom(&decl.link.macho, match, false);
self.freeUnnamedConsts(decl);
self.freeUnnamedConsts(decl_index);
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
if (decl.link.macho.local_sym_index != 0) {
@ -4307,7 +4322,10 @@ pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {
}
}
pub fn getDeclVAddr(self: *MachO, decl: *const Module.Decl, reloc_info: File.RelocInfo) !u64 {
pub fn getDeclVAddr(self: *MachO, decl_index: Module.Decl.Index, reloc_info: File.RelocInfo) !u64 {
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
assert(self.llvm_object == null);
assert(decl.link.macho.local_sym_index != 0);

View File

@ -74,27 +74,27 @@ pub fn updateFunc(self: *NvPtx, module: *Module, func: *Module.Fn, air: Air, liv
try self.llvm_object.updateFunc(module, func, air, liveness);
}
pub fn updateDecl(self: *NvPtx, module: *Module, decl: *Module.Decl) !void {
pub fn updateDecl(self: *NvPtx, module: *Module, decl_index: Module.Decl.Index) !void {
if (!build_options.have_llvm) return;
return self.llvm_object.updateDecl(module, decl);
return self.llvm_object.updateDecl(module, decl_index);
}
pub fn updateDeclExports(
self: *NvPtx,
module: *Module,
decl: *const Module.Decl,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
if (!build_options.have_llvm) return;
if (build_options.skip_non_native and builtin.object_format != .nvptx) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
return self.llvm_object.updateDeclExports(module, decl, exports);
return self.llvm_object.updateDeclExports(module, decl_index, exports);
}
pub fn freeDecl(self: *NvPtx, decl: *Module.Decl) void {
pub fn freeDecl(self: *NvPtx, decl_index: Module.Decl.Index) void {
if (!build_options.have_llvm) return;
return self.llvm_object.freeDecl(decl);
return self.llvm_object.freeDecl(decl_index);
}
pub fn flush(self: *NvPtx, comp: *Compilation, prog_node: *std.Progress.Node) !void {

View File

@ -59,9 +59,9 @@ path_arena: std.heap.ArenaAllocator,
/// If we group the decls by file, it makes it really easy to do this (put the symbol in the correct place)
fn_decl_table: std.AutoArrayHashMapUnmanaged(
*Module.File,
struct { sym_index: u32, functions: std.AutoArrayHashMapUnmanaged(*Module.Decl, FnDeclOutput) = .{} },
struct { sym_index: u32, functions: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, FnDeclOutput) = .{} },
) = .{},
data_decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, []const u8) = .{},
data_decl_table: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, []const u8) = .{},
hdr: aout.ExecHdr = undefined,
@ -162,11 +162,13 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Plan9 {
return self;
}
fn putFn(self: *Plan9, decl: *Module.Decl, out: FnDeclOutput) !void {
fn putFn(self: *Plan9, decl_index: Module.Decl.Index, out: FnDeclOutput) !void {
const gpa = self.base.allocator;
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
const fn_map_res = try self.fn_decl_table.getOrPut(gpa, decl.getFileScope());
if (fn_map_res.found_existing) {
try fn_map_res.value_ptr.functions.put(gpa, decl, out);
try fn_map_res.value_ptr.functions.put(gpa, decl_index, out);
} else {
const file = decl.getFileScope();
const arena = self.path_arena.allocator();
@ -178,7 +180,7 @@ fn putFn(self: *Plan9, decl: *Module.Decl, out: FnDeclOutput) !void {
break :blk @intCast(u32, self.syms.items.len - 1);
},
};
try fn_map_res.value_ptr.functions.put(gpa, decl, out);
try fn_map_res.value_ptr.functions.put(gpa, decl_index, out);
var a = std.ArrayList(u8).init(arena);
errdefer a.deinit();
@ -229,9 +231,10 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
@panic("Attempted to compile for object format that was disabled by build configuration");
}
const decl = func.owner_decl;
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
try self.seeDecl(decl);
try self.seeDecl(decl_index);
log.debug("codegen decl {*} ({s})", .{ decl, decl.name });
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
@ -262,7 +265,7 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
.appended => code_buffer.toOwnedSlice(),
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
try module.failed_decls.put(module.gpa, decl_index, em);
return;
},
};
@ -272,19 +275,21 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
.start_line = start_line.?,
.end_line = end_line,
};
try self.putFn(decl, out);
try self.putFn(decl_index, out);
return self.updateFinish(decl);
}
pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl: *Module.Decl) !u32 {
pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.Index) !u32 {
_ = self;
_ = tv;
_ = decl;
_ = decl_index;
log.debug("TODO lowerUnnamedConst for Plan9", .{});
return error.AnalysisFail;
}
pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void {
pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index) !void {
const decl = module.declPtr(decl_index);
if (decl.val.tag() == .extern_fn) {
return; // TODO Should we do more when front-end analyzed extern decl?
}
@ -295,7 +300,7 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void {
}
}
try self.seeDecl(decl);
try self.seeDecl(decl_index);
log.debug("codegen decl {*} ({s})", .{ decl, decl.name });
@ -315,13 +320,13 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void {
.appended => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
try module.failed_decls.put(module.gpa, decl_index, em);
return;
},
};
var duped_code = try self.base.allocator.dupe(u8, code);
errdefer self.base.allocator.free(duped_code);
try self.data_decl_table.put(self.base.allocator, decl, duped_code);
try self.data_decl_table.put(self.base.allocator, decl_index, duped_code);
return self.updateFinish(decl);
}
/// called at the end of update{Decl,Func}
@ -435,7 +440,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
while (it_file.next()) |fentry| {
var it = fentry.value_ptr.functions.iterator();
while (it.next()) |entry| {
const decl = entry.key_ptr.*;
const decl_index = entry.key_ptr.*;
const decl = mod.declPtr(decl_index);
const out = entry.value_ptr.*;
log.debug("write text decl {*} ({s}), lines {d} to {d}", .{ decl, decl.name, out.start_line + 1, out.end_line });
{
@ -462,7 +468,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
self.syms.items[decl.link.plan9.sym_index.?].value = off;
if (mod.decl_exports.get(decl)) |exports| {
if (mod.decl_exports.get(decl_index)) |exports| {
try self.addDeclExports(mod, decl, exports);
}
}
@ -482,7 +488,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
{
var it = self.data_decl_table.iterator();
while (it.next()) |entry| {
const decl = entry.key_ptr.*;
const decl_index = entry.key_ptr.*;
const decl = mod.declPtr(decl_index);
const code = entry.value_ptr.*;
log.debug("write data decl {*} ({s})", .{ decl, decl.name });
@ -498,7 +505,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
self.syms.items[decl.link.plan9.sym_index.?].value = off;
if (mod.decl_exports.get(decl)) |exports| {
if (mod.decl_exports.get(decl_index)) |exports| {
try self.addDeclExports(mod, decl, exports);
}
}
@ -564,24 +571,25 @@ fn addDeclExports(
}
}
pub fn freeDecl(self: *Plan9, decl: *Module.Decl) void {
pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void {
// TODO audit the lifetimes of decls table entries. It's possible to get
// allocateDeclIndexes and then freeDecl without any updateDecl in between.
// However that is planned to change, see the TODO comment in Module.zig
// in the deleteUnusedDecl function.
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
const is_fn = (decl.val.tag() == .function);
if (is_fn) {
var symidx_and_submap =
self.fn_decl_table.get(decl.getFileScope()).?;
var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope()).?;
var submap = symidx_and_submap.functions;
_ = submap.swapRemove(decl);
_ = submap.swapRemove(decl_index);
if (submap.count() == 0) {
self.syms.items[symidx_and_submap.sym_index] = aout.Sym.undefined_symbol;
self.syms_index_free_list.append(self.base.allocator, symidx_and_submap.sym_index) catch {};
submap.deinit(self.base.allocator);
}
} else {
_ = self.data_decl_table.swapRemove(decl);
_ = self.data_decl_table.swapRemove(decl_index);
}
if (decl.link.plan9.got_index) |i| {
// TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length
@ -593,7 +601,9 @@ pub fn freeDecl(self: *Plan9, decl: *Module.Decl) void {
}
}
pub fn seeDecl(self: *Plan9, decl: *Module.Decl) !void {
pub fn seeDecl(self: *Plan9, decl_index: Module.Decl.Index) !void {
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
if (decl.link.plan9.got_index == null) {
if (self.got_index_free_list.popOrNull()) |i| {
decl.link.plan9.got_index = i;
@ -607,14 +617,13 @@ pub fn seeDecl(self: *Plan9, decl: *Module.Decl) !void {
pub fn updateDeclExports(
self: *Plan9,
module: *Module,
decl: *Module.Decl,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
try self.seeDecl(decl);
try self.seeDecl(decl_index);
// we do all the things in flush
_ = self;
_ = module;
_ = decl;
_ = exports;
}
pub fn deinit(self: *Plan9) void {
@ -709,14 +718,18 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
});
}
}
const mod = self.base.options.module.?;
// write the data symbols
{
var it = self.data_decl_table.iterator();
while (it.next()) |entry| {
const decl = entry.key_ptr.*;
const decl_index = entry.key_ptr.*;
const decl = mod.declPtr(decl_index);
const sym = self.syms.items[decl.link.plan9.sym_index.?];
try self.writeSym(writer, sym);
if (self.base.options.module.?.decl_exports.get(decl)) |exports| {
if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| {
for (exports) |e| {
try self.writeSym(writer, self.syms.items[e.link.plan9.?]);
}
@ -737,10 +750,11 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
// write all the decls come from the file of the z symbol
var submap_it = symidx_and_submap.functions.iterator();
while (submap_it.next()) |entry| {
const decl = entry.key_ptr.*;
const decl_index = entry.key_ptr.*;
const decl = mod.declPtr(decl_index);
const sym = self.syms.items[decl.link.plan9.sym_index.?];
try self.writeSym(writer, sym);
if (self.base.options.module.?.decl_exports.get(decl)) |exports| {
if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| {
for (exports) |e| {
const s = self.syms.items[e.link.plan9.?];
if (mem.eql(u8, s.name, "_start"))
@ -754,12 +768,18 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
}
/// this will be removed, moved to updateFinish
pub fn allocateDeclIndexes(self: *Plan9, decl: *Module.Decl) !void {
pub fn allocateDeclIndexes(self: *Plan9, decl_index: Module.Decl.Index) !void {
_ = self;
_ = decl;
_ = decl_index;
}
pub fn getDeclVAddr(self: *Plan9, decl: *const Module.Decl, reloc_info: link.File.RelocInfo) !u64 {
pub fn getDeclVAddr(
self: *Plan9,
decl_index: Module.Decl.Index,
reloc_info: link.File.RelocInfo,
) !u64 {
_ = reloc_info;
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
if (decl.ty.zigTypeTag() == .Fn) {
var start = self.bases.text;
var it_file = self.fn_decl_table.iterator();
@ -767,7 +787,7 @@ pub fn getDeclVAddr(self: *Plan9, decl: *const Module.Decl, reloc_info: link.Fil
var symidx_and_submap = fentry.value_ptr;
var submap_it = symidx_and_submap.functions.iterator();
while (submap_it.next()) |entry| {
if (entry.key_ptr.* == decl) return start;
if (entry.key_ptr.* == decl_index) return start;
start += entry.value_ptr.code.len;
}
}
@ -776,7 +796,7 @@ pub fn getDeclVAddr(self: *Plan9, decl: *const Module.Decl, reloc_info: link.Fil
var start = self.bases.data + self.got_len * if (!self.sixtyfour_bit) @as(u32, 4) else 8;
var it = self.data_decl_table.iterator();
while (it.next()) |kv| {
if (decl == kv.key_ptr.*) return start;
if (decl_index == kv.key_ptr.*) return start;
start += kv.value_ptr.len;
}
unreachable;

View File

@ -54,7 +54,7 @@ base: link.File,
/// This linker backend does not try to incrementally link output SPIR-V code.
/// Instead, it tracks all declarations in this table, and iterates over it
/// in the flush function.
decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, DeclGenContext) = .{},
decl_table: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, DeclGenContext) = .{},
const DeclGenContext = struct {
air: Air,
@ -145,29 +145,31 @@ pub fn updateFunc(self: *SpirV, module: *Module, func: *Module.Fn, air: Air, liv
};
}
pub fn updateDecl(self: *SpirV, module: *Module, decl: *Module.Decl) !void {
pub fn updateDecl(self: *SpirV, module: *Module, decl_index: Module.Decl.Index) !void {
if (build_options.skip_non_native) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
_ = module;
// Keep track of all decls so we can iterate over them on flush().
_ = try self.decl_table.getOrPut(self.base.allocator, decl);
_ = try self.decl_table.getOrPut(self.base.allocator, decl_index);
}
pub fn updateDeclExports(
self: *SpirV,
module: *Module,
decl: *const Module.Decl,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
_ = self;
_ = module;
_ = decl;
_ = decl_index;
_ = exports;
}
pub fn freeDecl(self: *SpirV, decl: *Module.Decl) void {
const index = self.decl_table.getIndex(decl).?;
pub fn freeDecl(self: *SpirV, decl_index: Module.Decl.Index) void {
const index = self.decl_table.getIndex(decl_index).?;
const module = self.base.options.module.?;
const decl = module.declPtr(decl_index);
if (decl.val.tag() == .function) {
self.decl_table.values()[index].deinit(self.base.allocator);
}
@ -208,7 +210,8 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No
// TODO: We're allocating an ID unconditionally now, are there
// declarations which don't generate a result?
// TODO: fn_link is used here, but thats probably not the right field. It will work anyway though.
for (self.decl_table.keys()) |decl| {
for (self.decl_table.keys()) |decl_index| {
const decl = module.declPtr(decl_index);
if (decl.has_tv) {
decl.fn_link.spirv.id = spv.allocId();
}
@ -220,7 +223,8 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No
var it = self.decl_table.iterator();
while (it.next()) |entry| {
const decl = entry.key_ptr.*;
const decl_index = entry.key_ptr.*;
const decl = module.declPtr(decl_index);
if (!decl.has_tv) continue;
const air = entry.value_ptr.air;
@ -228,7 +232,7 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No
// Note, if `decl` is not a function, air/liveness may be undefined.
if (try decl_gen.gen(decl, air, liveness)) |msg| {
try module.failed_decls.put(module.gpa, decl, msg);
try module.failed_decls.put(module.gpa, decl_index, msg);
return; // TODO: Attempt to generate more decls?
}
}

View File

@ -48,7 +48,7 @@ host_name: []const u8 = "env",
/// List of all `Decl` that are currently alive.
/// This is ment for bookkeeping so we can safely cleanup all codegen memory
/// when calling `deinit`
decls: std.AutoHashMapUnmanaged(*Module.Decl, void) = .{},
decls: std.AutoHashMapUnmanaged(Module.Decl.Index, void) = .{},
/// List of all symbols generated by Zig code.
symbols: std.ArrayListUnmanaged(Symbol) = .{},
/// List of symbol indexes which are free to be used.
@ -429,9 +429,11 @@ pub fn deinit(self: *Wasm) void {
if (self.llvm_object) |llvm_object| llvm_object.destroy(gpa);
}
const mod = self.base.options.module.?;
var decl_it = self.decls.keyIterator();
while (decl_it.next()) |decl_ptr| {
decl_ptr.*.link.wasm.deinit(gpa);
while (decl_it.next()) |decl_index_ptr| {
const decl = mod.declPtr(decl_index_ptr.*);
decl.link.wasm.deinit(gpa);
}
for (self.func_types.items) |*func_type| {
@ -476,12 +478,13 @@ pub fn deinit(self: *Wasm) void {
self.string_table.deinit(gpa);
}
pub fn allocateDeclIndexes(self: *Wasm, decl: *Module.Decl) !void {
pub fn allocateDeclIndexes(self: *Wasm, decl_index: Module.Decl.Index) !void {
if (self.llvm_object) |_| return;
const decl = self.base.options.module.?.declPtr(decl_index);
if (decl.link.wasm.sym_index != 0) return;
try self.symbols.ensureUnusedCapacity(self.base.allocator, 1);
try self.decls.putNoClobber(self.base.allocator, decl, {});
try self.decls.putNoClobber(self.base.allocator, decl_index, {});
const atom = &decl.link.wasm;
@ -502,14 +505,15 @@ pub fn allocateDeclIndexes(self: *Wasm, decl: *Module.Decl) !void {
try self.symbol_atom.putNoClobber(self.base.allocator, atom.symbolLoc(), atom);
}
pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
pub fn updateFunc(self: *Wasm, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .wasm) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness);
if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness);
}
const decl = func.owner_decl;
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
assert(decl.link.wasm.sym_index != 0); // Must call allocateDeclIndexes()
decl.link.wasm.clear();
@ -530,7 +534,7 @@ pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, live
.appended => code_writer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
try mod.failed_decls.put(mod.gpa, decl_index, em);
return;
},
};
@ -540,14 +544,15 @@ pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, live
// Generate code for the Decl, storing it in memory to be later written to
// the file on flush().
pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
pub fn updateDecl(self: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !void {
if (build_options.skip_non_native and builtin.object_format != .wasm) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl);
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index);
}
const decl = mod.declPtr(decl_index);
assert(decl.link.wasm.sym_index != 0); // Must call allocateDeclIndexes()
decl.link.wasm.clear();
@ -580,7 +585,7 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
.appended => code_writer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
try mod.failed_decls.put(mod.gpa, decl_index, em);
return;
},
};
@ -590,12 +595,13 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, code: []const u8) !void {
if (code.len == 0) return;
const mod = self.base.options.module.?;
const atom: *Atom = &decl.link.wasm;
atom.size = @intCast(u32, code.len);
atom.alignment = decl.ty.abiAlignment(self.base.options.target);
const symbol = &self.symbols.items[atom.sym_index];
const full_name = try decl.getFullyQualifiedName(self.base.allocator);
const full_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(full_name);
symbol.name = try self.string_table.put(self.base.allocator, full_name);
try atom.code.appendSlice(self.base.allocator, code);
@ -606,12 +612,15 @@ fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, code: []const u8) !void {
/// Lowers a constant typed value to a local symbol and atom.
/// Returns the symbol index of the local
/// The given `decl` is the parent decl whom owns the constant.
pub fn lowerUnnamedConst(self: *Wasm, decl: *Module.Decl, tv: TypedValue) !u32 {
pub fn lowerUnnamedConst(self: *Wasm, tv: TypedValue, decl_index: Module.Decl.Index) !u32 {
assert(tv.ty.zigTypeTag() != .Fn); // cannot create local symbols for functions
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
// Create and initialize a new local symbol and atom
const local_index = decl.link.wasm.locals.items.len;
const fqdn = try decl.getFullyQualifiedName(self.base.allocator);
const fqdn = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(fqdn);
const name = try std.fmt.allocPrintZ(self.base.allocator, "__unnamed_{s}_{d}", .{ fqdn, local_index });
defer self.base.allocator.free(name);
@ -641,7 +650,6 @@ pub fn lowerUnnamedConst(self: *Wasm, decl: *Module.Decl, tv: TypedValue) !u32 {
var value_bytes = std.ArrayList(u8).init(self.base.allocator);
defer value_bytes.deinit();
const module = self.base.options.module.?;
const result = try codegen.generateSymbol(
&self.base,
decl.srcLoc(),
@ -658,7 +666,7 @@ pub fn lowerUnnamedConst(self: *Wasm, decl: *Module.Decl, tv: TypedValue) !u32 {
.appended => value_bytes.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
try mod.failed_decls.put(mod.gpa, decl_index, em);
return error.AnalysisFail;
},
};
@ -672,9 +680,11 @@ pub fn lowerUnnamedConst(self: *Wasm, decl: *Module.Decl, tv: TypedValue) !u32 {
/// Returns the given pointer address
pub fn getDeclVAddr(
self: *Wasm,
decl: *const Module.Decl,
decl_index: Module.Decl.Index,
reloc_info: link.File.RelocInfo,
) !u64 {
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
const target_symbol_index = decl.link.wasm.sym_index;
assert(target_symbol_index != 0);
assert(reloc_info.parent_atom_index != 0);
@ -722,21 +732,23 @@ pub fn deleteExport(self: *Wasm, exp: Export) void {
pub fn updateDeclExports(
self: *Wasm,
module: *Module,
decl: *const Module.Decl,
mod: *Module,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
if (build_options.skip_non_native and builtin.object_format != .wasm) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl, exports);
if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(mod, decl_index, exports);
}
const decl = mod.declPtr(decl_index);
for (exports) |exp| {
if (exp.options.section) |section| {
try module.failed_exports.putNoClobber(module.gpa, exp, try Module.ErrorMsg.create(
module.gpa,
try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create(
mod.gpa,
decl.srcLoc(),
"Unimplemented: ExportOptions.section '{s}'",
.{section},
@ -754,8 +766,8 @@ pub fn updateDeclExports(
// are strong symbols, we have a linker error.
// In the other case we replace one with the other.
if (!exp_is_weak and !existing_sym.isWeak()) {
try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(
module.gpa,
try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create(
mod.gpa,
decl.srcLoc(),
\\LinkError: symbol '{s}' defined multiple times
\\ first definition in '{s}'
@ -773,8 +785,9 @@ pub fn updateDeclExports(
}
}
const sym_index = exp.exported_decl.link.wasm.sym_index;
const sym_loc = exp.exported_decl.link.wasm.symbolLoc();
const exported_decl = mod.declPtr(exp.exported_decl);
const sym_index = exported_decl.link.wasm.sym_index;
const sym_loc = exported_decl.link.wasm.symbolLoc();
const symbol = sym_loc.getSymbol(self);
switch (exp.options.linkage) {
.Internal => {
@ -786,8 +799,8 @@ pub fn updateDeclExports(
},
.Strong => {}, // symbols are strong by default
.LinkOnce => {
try module.failed_exports.putNoClobber(module.gpa, exp, try Module.ErrorMsg.create(
module.gpa,
try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create(
mod.gpa,
decl.srcLoc(),
"Unimplemented: LinkOnce",
.{},
@ -813,13 +826,15 @@ pub fn updateDeclExports(
}
}
pub fn freeDecl(self: *Wasm, decl: *Module.Decl) void {
pub fn freeDecl(self: *Wasm, decl_index: Module.Decl.Index) void {
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl);
if (self.llvm_object) |llvm_object| return llvm_object.freeDecl(decl_index);
}
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
const atom = &decl.link.wasm;
self.symbols_free_list.append(self.base.allocator, atom.sym_index) catch {};
_ = self.decls.remove(decl);
_ = self.decls.remove(decl_index);
self.symbols.items[atom.sym_index].tag = .dead;
for (atom.locals.items) |local_atom| {
const local_symbol = &self.symbols.items[local_atom.sym_index];
@ -1414,8 +1429,8 @@ fn populateErrorNameTable(self: *Wasm) !void {
// Addend for each relocation to the table
var addend: u32 = 0;
const module = self.base.options.module.?;
for (module.error_name_list.items) |error_name| {
const mod = self.base.options.module.?;
for (mod.error_name_list.items) |error_name| {
const len = @intCast(u32, error_name.len + 1); // names are 0-termianted
const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
@ -1456,9 +1471,11 @@ fn resetState(self: *Wasm) void {
for (self.segment_info.items) |*segment_info| {
self.base.allocator.free(segment_info.name);
}
const mod = self.base.options.module.?;
var decl_it = self.decls.keyIterator();
while (decl_it.next()) |decl| {
const atom = &decl.*.link.wasm;
while (decl_it.next()) |decl_index_ptr| {
const decl = mod.declPtr(decl_index_ptr.*);
const atom = &decl.link.wasm;
atom.next = null;
atom.prev = null;
@ -1546,12 +1563,14 @@ pub fn flushModule(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
defer self.resetState();
try self.setupStart();
try self.setupImports();
const mod = self.base.options.module.?;
var decl_it = self.decls.keyIterator();
while (decl_it.next()) |decl| {
if (decl.*.isExtern()) continue;
while (decl_it.next()) |decl_index_ptr| {
const decl = mod.declPtr(decl_index_ptr.*);
if (decl.isExtern()) continue;
const atom = &decl.*.link.wasm;
if (decl.*.ty.zigTypeTag() == .Fn) {
try self.parseAtom(atom, .{ .function = decl.*.fn_link.wasm });
if (decl.ty.zigTypeTag() == .Fn) {
try self.parseAtom(atom, .{ .function = decl.fn_link.wasm });
} else {
try self.parseAtom(atom, .data);
}
@ -2045,7 +2064,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
const module_obj_path: ?[]const u8 = if (self.base.options.module) |module| blk: {
const module_obj_path: ?[]const u8 = if (self.base.options.module) |mod| blk: {
const use_stage1 = build_options.is_stage1 and self.base.options.use_stage1;
if (use_stage1) {
const obj_basename = try std.zig.binNameAlloc(arena, .{
@ -2054,7 +2073,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
.output_mode = .Obj,
});
switch (self.base.options.cache_mode) {
.incremental => break :blk try module.zig_cache_artifact_directory.join(
.incremental => break :blk try mod.zig_cache_artifact_directory.join(
arena,
&[_][]const u8{obj_basename},
),
@ -2253,7 +2272,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
}
if (auto_export_symbols) {
if (self.base.options.module) |module| {
if (self.base.options.module) |mod| {
// when we use stage1, we use the exports that stage1 provided us.
// For stage2, we can directly retrieve them from the module.
const use_stage1 = build_options.is_stage1 and self.base.options.use_stage1;
@ -2264,14 +2283,15 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
} else {
const skip_export_non_fn = target.os.tag == .wasi and
self.base.options.wasi_exec_model == .command;
for (module.decl_exports.values()) |exports| {
for (mod.decl_exports.values()) |exports| {
for (exports) |exprt| {
if (skip_export_non_fn and exprt.exported_decl.ty.zigTypeTag() != .Fn) {
const exported_decl = mod.declPtr(exprt.exported_decl);
if (skip_export_non_fn and exported_decl.ty.zigTypeTag() != .Fn) {
// skip exporting symbols when we're building a WASI command
// and the symbol is not a function
continue;
}
const symbol_name = exprt.exported_decl.name;
const symbol_name = exported_decl.name;
const arg = try std.fmt.allocPrint(arena, "--export={s}", .{symbol_name});
try argv.append(arg);
}

View File

@ -3892,7 +3892,7 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
.tree_loaded = true,
.zir = undefined,
.pkg = undefined,
.root_decl = null,
.root_decl = .none,
};
file.pkg = try Package.create(gpa, null, file.sub_file_path);
@ -4098,7 +4098,7 @@ fn fmtPathFile(
.tree_loaded = true,
.zir = undefined,
.pkg = undefined,
.root_decl = null,
.root_decl = .none,
};
file.pkg = try Package.create(fmt.gpa, null, file.sub_file_path);
@ -4757,7 +4757,7 @@ pub fn cmdAstCheck(
.tree = undefined,
.zir = undefined,
.pkg = undefined,
.root_decl = null,
.root_decl = .none,
};
if (zig_source_file) |file_name| {
var f = fs.cwd().openFile(file_name, .{}) catch |err| {
@ -4910,7 +4910,7 @@ pub fn cmdChangelist(
.tree = undefined,
.zir = undefined,
.pkg = undefined,
.root_decl = null,
.root_decl = .none,
};
file.pkg = try Package.create(gpa, null, file.sub_file_path);

View File

@ -7,7 +7,7 @@ const Value = @import("value.zig").Value;
const Air = @import("Air.zig");
const Liveness = @import("Liveness.zig");
pub fn dump(gpa: Allocator, air: Air, liveness: Liveness) void {
pub fn dump(module: *Module, air: Air, liveness: Liveness) void {
const instruction_bytes = air.instructions.len *
// Here we don't use @sizeOf(Air.Inst.Data) because it would include
// the debug safety tag but we want to measure release size.
@ -41,11 +41,12 @@ pub fn dump(gpa: Allocator, air: Air, liveness: Liveness) void {
liveness.special.count(), fmtIntSizeBin(liveness_special_bytes),
});
// zig fmt: on
var arena = std.heap.ArenaAllocator.init(gpa);
var arena = std.heap.ArenaAllocator.init(module.gpa);
defer arena.deinit();
var writer: Writer = .{
.gpa = gpa,
.module = module,
.gpa = module.gpa,
.arena = arena.allocator(),
.air = air,
.liveness = liveness,
@ -58,6 +59,7 @@ pub fn dump(gpa: Allocator, air: Air, liveness: Liveness) void {
}
const Writer = struct {
module: *Module,
gpa: Allocator,
arena: Allocator,
air: Air,
@ -591,7 +593,8 @@ const Writer = struct {
fn writeDbgInline(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
const function = w.air.values[ty_pl.payload].castTag(.function).?.data;
try s.print("{s}", .{function.owner_decl.name});
const owner_decl = w.module.declPtr(function.owner_decl);
try s.print("{s}", .{owner_decl.name});
}
fn writeDbgVar(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {

View File

@ -521,7 +521,7 @@ pub const Type = extern union {
}
}
pub fn eql(a: Type, b: Type, target: Target) bool {
pub fn eql(a: Type, b: Type, mod: *Module) bool {
// As a shortcut, if the small tags / addresses match, we're done.
if (a.tag_if_small_enough == b.tag_if_small_enough) return true;
@ -637,7 +637,7 @@ pub const Type = extern union {
const a_info = a.fnInfo();
const b_info = b.fnInfo();
if (!eql(a_info.return_type, b_info.return_type, target))
if (!eql(a_info.return_type, b_info.return_type, mod))
return false;
if (a_info.cc != b_info.cc)
@ -663,7 +663,7 @@ pub const Type = extern union {
if (a_param_ty.tag() == .generic_poison) continue;
if (b_param_ty.tag() == .generic_poison) continue;
if (!eql(a_param_ty, b_param_ty, target))
if (!eql(a_param_ty, b_param_ty, mod))
return false;
}
@ -681,13 +681,13 @@ pub const Type = extern union {
if (a.arrayLen() != b.arrayLen())
return false;
const elem_ty = a.elemType();
if (!elem_ty.eql(b.elemType(), target))
if (!elem_ty.eql(b.elemType(), mod))
return false;
const sentinel_a = a.sentinel();
const sentinel_b = b.sentinel();
if (sentinel_a) |sa| {
if (sentinel_b) |sb| {
return sa.eql(sb, elem_ty, target);
return sa.eql(sb, elem_ty, mod);
} else {
return false;
}
@ -718,7 +718,7 @@ pub const Type = extern union {
const info_a = a.ptrInfo().data;
const info_b = b.ptrInfo().data;
if (!info_a.pointee_type.eql(info_b.pointee_type, target))
if (!info_a.pointee_type.eql(info_b.pointee_type, mod))
return false;
if (info_a.@"align" != info_b.@"align")
return false;
@ -741,7 +741,7 @@ pub const Type = extern union {
const sentinel_b = info_b.sentinel;
if (sentinel_a) |sa| {
if (sentinel_b) |sb| {
if (!sa.eql(sb, info_a.pointee_type, target))
if (!sa.eql(sb, info_a.pointee_type, mod))
return false;
} else {
return false;
@ -762,7 +762,7 @@ pub const Type = extern union {
var buf_a: Payload.ElemType = undefined;
var buf_b: Payload.ElemType = undefined;
return a.optionalChild(&buf_a).eql(b.optionalChild(&buf_b), target);
return a.optionalChild(&buf_a).eql(b.optionalChild(&buf_b), mod);
},
.anyerror_void_error_union, .error_union => {
@ -770,18 +770,18 @@ pub const Type = extern union {
const a_set = a.errorUnionSet();
const b_set = b.errorUnionSet();
if (!a_set.eql(b_set, target)) return false;
if (!a_set.eql(b_set, mod)) return false;
const a_payload = a.errorUnionPayload();
const b_payload = b.errorUnionPayload();
if (!a_payload.eql(b_payload, target)) return false;
if (!a_payload.eql(b_payload, mod)) return false;
return true;
},
.anyframe_T => {
if (b.zigTypeTag() != .AnyFrame) return false;
return a.childType().eql(b.childType(), target);
return a.childType().eql(b.childType(), mod);
},
.empty_struct => {
@ -804,7 +804,7 @@ pub const Type = extern union {
for (a_tuple.types) |a_ty, i| {
const b_ty = b_tuple.types[i];
if (!eql(a_ty, b_ty, target)) return false;
if (!eql(a_ty, b_ty, mod)) return false;
}
for (a_tuple.values) |a_val, i| {
@ -820,7 +820,7 @@ pub const Type = extern union {
if (b_val.tag() == .unreachable_value) {
return false;
} else {
if (!Value.eql(a_val, b_val, ty, target)) return false;
if (!Value.eql(a_val, b_val, ty, mod)) return false;
}
}
}
@ -840,7 +840,7 @@ pub const Type = extern union {
for (a_struct_obj.types) |a_ty, i| {
const b_ty = b_struct_obj.types[i];
if (!eql(a_ty, b_ty, target)) return false;
if (!eql(a_ty, b_ty, mod)) return false;
}
for (a_struct_obj.values) |a_val, i| {
@ -856,7 +856,7 @@ pub const Type = extern union {
if (b_val.tag() == .unreachable_value) {
return false;
} else {
if (!Value.eql(a_val, b_val, ty, target)) return false;
if (!Value.eql(a_val, b_val, ty, mod)) return false;
}
}
}
@ -911,13 +911,13 @@ pub const Type = extern union {
}
}
pub fn hash(self: Type, target: Target) u64 {
pub fn hash(self: Type, mod: *Module) u64 {
var hasher = std.hash.Wyhash.init(0);
self.hashWithHasher(&hasher, target);
self.hashWithHasher(&hasher, mod);
return hasher.final();
}
pub fn hashWithHasher(ty: Type, hasher: *std.hash.Wyhash, target: Target) void {
pub fn hashWithHasher(ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void {
switch (ty.tag()) {
.generic_poison => unreachable,
@ -1036,7 +1036,7 @@ pub const Type = extern union {
std.hash.autoHash(hasher, std.builtin.TypeId.Fn);
const fn_info = ty.fnInfo();
hashWithHasher(fn_info.return_type, hasher, target);
hashWithHasher(fn_info.return_type, hasher, mod);
std.hash.autoHash(hasher, fn_info.alignment);
std.hash.autoHash(hasher, fn_info.cc);
std.hash.autoHash(hasher, fn_info.is_var_args);
@ -1046,7 +1046,7 @@ pub const Type = extern union {
for (fn_info.param_types) |param_ty, i| {
std.hash.autoHash(hasher, fn_info.paramIsComptime(i));
if (param_ty.tag() == .generic_poison) continue;
hashWithHasher(param_ty, hasher, target);
hashWithHasher(param_ty, hasher, mod);
}
},
@ -1059,8 +1059,8 @@ pub const Type = extern union {
const elem_ty = ty.elemType();
std.hash.autoHash(hasher, ty.arrayLen());
hashWithHasher(elem_ty, hasher, target);
hashSentinel(ty.sentinel(), elem_ty, hasher, target);
hashWithHasher(elem_ty, hasher, mod);
hashSentinel(ty.sentinel(), elem_ty, hasher, mod);
},
.vector => {
@ -1068,7 +1068,7 @@ pub const Type = extern union {
const elem_ty = ty.elemType();
std.hash.autoHash(hasher, ty.vectorLen());
hashWithHasher(elem_ty, hasher, target);
hashWithHasher(elem_ty, hasher, mod);
},
.single_const_pointer_to_comptime_int,
@ -1092,8 +1092,8 @@ pub const Type = extern union {
std.hash.autoHash(hasher, std.builtin.TypeId.Pointer);
const info = ty.ptrInfo().data;
hashWithHasher(info.pointee_type, hasher, target);
hashSentinel(info.sentinel, info.pointee_type, hasher, target);
hashWithHasher(info.pointee_type, hasher, mod);
hashSentinel(info.sentinel, info.pointee_type, hasher, mod);
std.hash.autoHash(hasher, info.@"align");
std.hash.autoHash(hasher, info.@"addrspace");
std.hash.autoHash(hasher, info.bit_offset);
@ -1111,22 +1111,22 @@ pub const Type = extern union {
std.hash.autoHash(hasher, std.builtin.TypeId.Optional);
var buf: Payload.ElemType = undefined;
hashWithHasher(ty.optionalChild(&buf), hasher, target);
hashWithHasher(ty.optionalChild(&buf), hasher, mod);
},
.anyerror_void_error_union, .error_union => {
std.hash.autoHash(hasher, std.builtin.TypeId.ErrorUnion);
const set_ty = ty.errorUnionSet();
hashWithHasher(set_ty, hasher, target);
hashWithHasher(set_ty, hasher, mod);
const payload_ty = ty.errorUnionPayload();
hashWithHasher(payload_ty, hasher, target);
hashWithHasher(payload_ty, hasher, mod);
},
.anyframe_T => {
std.hash.autoHash(hasher, std.builtin.TypeId.AnyFrame);
hashWithHasher(ty.childType(), hasher, target);
hashWithHasher(ty.childType(), hasher, mod);
},
.empty_struct => {
@ -1145,10 +1145,10 @@ pub const Type = extern union {
std.hash.autoHash(hasher, tuple.types.len);
for (tuple.types) |field_ty, i| {
hashWithHasher(field_ty, hasher, target);
hashWithHasher(field_ty, hasher, mod);
const field_val = tuple.values[i];
if (field_val.tag() == .unreachable_value) continue;
field_val.hash(field_ty, hasher, target);
field_val.hash(field_ty, hasher, mod);
}
},
.anon_struct => {
@ -1160,9 +1160,9 @@ pub const Type = extern union {
const field_name = struct_obj.names[i];
const field_val = struct_obj.values[i];
hasher.update(field_name);
hashWithHasher(field_ty, hasher, target);
hashWithHasher(field_ty, hasher, mod);
if (field_val.tag() == .unreachable_value) continue;
field_val.hash(field_ty, hasher, target);
field_val.hash(field_ty, hasher, mod);
}
},
@ -1210,35 +1210,35 @@ pub const Type = extern union {
}
}
fn hashSentinel(opt_val: ?Value, ty: Type, hasher: *std.hash.Wyhash, target: Target) void {
fn hashSentinel(opt_val: ?Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void {
if (opt_val) |s| {
std.hash.autoHash(hasher, true);
s.hash(ty, hasher, target);
s.hash(ty, hasher, mod);
} else {
std.hash.autoHash(hasher, false);
}
}
pub const HashContext64 = struct {
target: Target,
mod: *Module,
pub fn hash(self: @This(), t: Type) u64 {
return t.hash(self.target);
return t.hash(self.mod);
}
pub fn eql(self: @This(), a: Type, b: Type) bool {
return a.eql(b, self.target);
return a.eql(b, self.mod);
}
};
pub const HashContext32 = struct {
target: Target,
mod: *Module,
pub fn hash(self: @This(), t: Type) u32 {
return @truncate(u32, t.hash(self.target));
return @truncate(u32, t.hash(self.mod));
}
pub fn eql(self: @This(), a: Type, b: Type, b_index: usize) bool {
_ = b_index;
return a.eql(b, self.target);
return a.eql(b, self.mod);
}
};
@ -1483,16 +1483,16 @@ pub const Type = extern union {
@compileError("do not format types directly; use either ty.fmtDebug() or ty.fmt()");
}
pub fn fmt(ty: Type, target: Target) std.fmt.Formatter(format2) {
pub fn fmt(ty: Type, module: *Module) std.fmt.Formatter(format2) {
return .{ .data = .{
.ty = ty,
.target = target,
.module = module,
} };
}
const FormatContext = struct {
ty: Type,
target: Target,
module: *Module,
};
fn format2(
@ -1503,7 +1503,7 @@ pub const Type = extern union {
) !void {
comptime assert(unused_format_string.len == 0);
_ = options;
return print(ctx.ty, writer, ctx.target);
return print(ctx.ty, writer, ctx.module);
}
pub fn fmtDebug(ty: Type) std.fmt.Formatter(dump) {
@ -1579,27 +1579,39 @@ pub const Type = extern union {
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
return struct_obj.owner_decl.renderFullyQualifiedName(writer);
return writer.print("({s} decl={d})", .{
@tagName(t), struct_obj.owner_decl,
});
},
.@"union", .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
return union_obj.owner_decl.renderFullyQualifiedName(writer);
return writer.print("({s} decl={d})", .{
@tagName(t), union_obj.owner_decl,
});
},
.enum_full, .enum_nonexhaustive => {
const enum_full = ty.cast(Payload.EnumFull).?.data;
return enum_full.owner_decl.renderFullyQualifiedName(writer);
return writer.print("({s} decl={d})", .{
@tagName(t), enum_full.owner_decl,
});
},
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
return enum_simple.owner_decl.renderFullyQualifiedName(writer);
return writer.print("({s} decl={d})", .{
@tagName(t), enum_simple.owner_decl,
});
},
.enum_numbered => {
const enum_numbered = ty.castTag(.enum_numbered).?.data;
return enum_numbered.owner_decl.renderFullyQualifiedName(writer);
return writer.print("({s} decl={d})", .{
@tagName(t), enum_numbered.owner_decl,
});
},
.@"opaque" => {
// TODO use declaration name
return writer.writeAll("opaque {}");
const opaque_obj = ty.castTag(.@"opaque").?.data;
return writer.print("({s} decl={d})", .{
@tagName(t), opaque_obj.owner_decl,
});
},
.anyerror_void_error_union => return writer.writeAll("anyerror!void"),
@ -1845,7 +1857,9 @@ pub const Type = extern union {
},
.error_set_inferred => {
const func = ty.castTag(.error_set_inferred).?.data.func;
return writer.print("@typeInfo(@typeInfo(@TypeOf({s})).Fn.return_type.?).ErrorUnion.error_set", .{func.owner_decl.name});
return writer.print("({s} func={d})", .{
@tagName(t), func.owner_decl,
});
},
.error_set_merged => {
const names = ty.castTag(.error_set_merged).?.data.keys();
@ -1871,15 +1885,15 @@ pub const Type = extern union {
pub const nameAllocArena = nameAlloc;
pub fn nameAlloc(ty: Type, ally: Allocator, target: Target) Allocator.Error![:0]const u8 {
pub fn nameAlloc(ty: Type, ally: Allocator, module: *Module) Allocator.Error![:0]const u8 {
var buffer = std.ArrayList(u8).init(ally);
defer buffer.deinit();
try ty.print(buffer.writer(), target);
try ty.print(buffer.writer(), module);
return buffer.toOwnedSliceSentinel(0);
}
/// Prints a name suitable for `@typeName`.
pub fn print(ty: Type, writer: anytype, target: Target) @TypeOf(writer).Error!void {
pub fn print(ty: Type, writer: anytype, mod: *Module) @TypeOf(writer).Error!void {
const t = ty.tag();
switch (t) {
.inferred_alloc_const => unreachable,
@ -1946,32 +1960,38 @@ pub const Type = extern union {
.empty_struct => {
const namespace = ty.castTag(.empty_struct).?.data;
try namespace.renderFullyQualifiedName("", writer);
try namespace.renderFullyQualifiedName(mod, "", writer);
},
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
try struct_obj.owner_decl.renderFullyQualifiedName(writer);
const decl = mod.declPtr(struct_obj.owner_decl);
try decl.renderFullyQualifiedName(mod, writer);
},
.@"union", .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
try union_obj.owner_decl.renderFullyQualifiedName(writer);
const decl = mod.declPtr(union_obj.owner_decl);
try decl.renderFullyQualifiedName(mod, writer);
},
.enum_full, .enum_nonexhaustive => {
const enum_full = ty.cast(Payload.EnumFull).?.data;
try enum_full.owner_decl.renderFullyQualifiedName(writer);
const decl = mod.declPtr(enum_full.owner_decl);
try decl.renderFullyQualifiedName(mod, writer);
},
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
try enum_simple.owner_decl.renderFullyQualifiedName(writer);
const decl = mod.declPtr(enum_simple.owner_decl);
try decl.renderFullyQualifiedName(mod, writer);
},
.enum_numbered => {
const enum_numbered = ty.castTag(.enum_numbered).?.data;
try enum_numbered.owner_decl.renderFullyQualifiedName(writer);
const decl = mod.declPtr(enum_numbered.owner_decl);
try decl.renderFullyQualifiedName(mod, writer);
},
.@"opaque" => {
const opaque_obj = ty.cast(Payload.Opaque).?.data;
try opaque_obj.owner_decl.renderFullyQualifiedName(writer);
const decl = mod.declPtr(opaque_obj.owner_decl);
try decl.renderFullyQualifiedName(mod, writer);
},
.anyerror_void_error_union => try writer.writeAll("anyerror!void"),
@ -1990,7 +2010,8 @@ pub const Type = extern union {
const func = ty.castTag(.error_set_inferred).?.data.func;
try writer.writeAll("@typeInfo(@typeInfo(@TypeOf(");
try func.owner_decl.renderFullyQualifiedName(writer);
const owner_decl = mod.declPtr(func.owner_decl);
try owner_decl.renderFullyQualifiedName(mod, writer);
try writer.writeAll(")).Fn.return_type.?).ErrorUnion.error_set");
},
@ -1999,7 +2020,7 @@ pub const Type = extern union {
try writer.writeAll("fn(");
for (fn_info.param_types) |param_ty, i| {
if (i != 0) try writer.writeAll(", ");
try print(param_ty, writer, target);
try print(param_ty, writer, mod);
}
if (fn_info.is_var_args) {
if (fn_info.param_types.len != 0) {
@ -2016,14 +2037,14 @@ pub const Type = extern union {
if (fn_info.alignment != 0) {
try writer.print("align({d}) ", .{fn_info.alignment});
}
try print(fn_info.return_type, writer, target);
try print(fn_info.return_type, writer, mod);
},
.error_union => {
const error_union = ty.castTag(.error_union).?.data;
try print(error_union.error_set, writer, target);
try print(error_union.error_set, writer, mod);
try writer.writeAll("!");
try print(error_union.payload, writer, target);
try print(error_union.payload, writer, mod);
},
.array_u8 => {
@ -2037,21 +2058,21 @@ pub const Type = extern union {
.vector => {
const payload = ty.castTag(.vector).?.data;
try writer.print("@Vector({d}, ", .{payload.len});
try print(payload.elem_type, writer, target);
try print(payload.elem_type, writer, mod);
try writer.writeAll(")");
},
.array => {
const payload = ty.castTag(.array).?.data;
try writer.print("[{d}]", .{payload.len});
try print(payload.elem_type, writer, target);
try print(payload.elem_type, writer, mod);
},
.array_sentinel => {
const payload = ty.castTag(.array_sentinel).?.data;
try writer.print("[{d}:{}]", .{
payload.len,
payload.sentinel.fmtValue(payload.elem_type, target),
payload.sentinel.fmtValue(payload.elem_type, mod),
});
try print(payload.elem_type, writer, target);
try print(payload.elem_type, writer, mod);
},
.tuple => {
const tuple = ty.castTag(.tuple).?.data;
@ -2063,9 +2084,9 @@ pub const Type = extern union {
if (val.tag() != .unreachable_value) {
try writer.writeAll("comptime ");
}
try print(field_ty, writer, target);
try print(field_ty, writer, mod);
if (val.tag() != .unreachable_value) {
try writer.print(" = {}", .{val.fmtValue(field_ty, target)});
try writer.print(" = {}", .{val.fmtValue(field_ty, mod)});
}
}
try writer.writeAll("}");
@ -2083,10 +2104,10 @@ pub const Type = extern union {
try writer.writeAll(anon_struct.names[i]);
try writer.writeAll(": ");
try print(field_ty, writer, target);
try print(field_ty, writer, mod);
if (val.tag() != .unreachable_value) {
try writer.print(" = {}", .{val.fmtValue(field_ty, target)});
try writer.print(" = {}", .{val.fmtValue(field_ty, mod)});
}
}
try writer.writeAll("}");
@ -2106,8 +2127,8 @@ pub const Type = extern union {
if (info.sentinel) |s| switch (info.size) {
.One, .C => unreachable,
.Many => try writer.print("[*:{}]", .{s.fmtValue(info.pointee_type, target)}),
.Slice => try writer.print("[:{}]", .{s.fmtValue(info.pointee_type, target)}),
.Many => try writer.print("[*:{}]", .{s.fmtValue(info.pointee_type, mod)}),
.Slice => try writer.print("[:{}]", .{s.fmtValue(info.pointee_type, mod)}),
} else switch (info.size) {
.One => try writer.writeAll("*"),
.Many => try writer.writeAll("[*]"),
@ -2129,7 +2150,7 @@ pub const Type = extern union {
if (info.@"volatile") try writer.writeAll("volatile ");
if (info.@"allowzero" and info.size != .C) try writer.writeAll("allowzero ");
try print(info.pointee_type, writer, target);
try print(info.pointee_type, writer, mod);
},
.int_signed => {
@ -2143,22 +2164,22 @@ pub const Type = extern union {
.optional => {
const child_type = ty.castTag(.optional).?.data;
try writer.writeByte('?');
try print(child_type, writer, target);
try print(child_type, writer, mod);
},
.optional_single_mut_pointer => {
const pointee_type = ty.castTag(.optional_single_mut_pointer).?.data;
try writer.writeAll("?*");
try print(pointee_type, writer, target);
try print(pointee_type, writer, mod);
},
.optional_single_const_pointer => {
const pointee_type = ty.castTag(.optional_single_const_pointer).?.data;
try writer.writeAll("?*const ");
try print(pointee_type, writer, target);
try print(pointee_type, writer, mod);
},
.anyframe_T => {
const return_type = ty.castTag(.anyframe_T).?.data;
try writer.print("anyframe->", .{});
try print(return_type, writer, target);
try print(return_type, writer, mod);
},
.error_set => {
const names = ty.castTag(.error_set).?.data.names.keys();
@ -3834,8 +3855,8 @@ pub const Type = extern union {
/// For [*]T, returns *T
/// For []T, returns *T
/// Handles const-ness and address spaces in particular.
pub fn elemPtrType(ptr_ty: Type, arena: Allocator, target: Target) !Type {
return try Type.ptr(arena, target, .{
pub fn elemPtrType(ptr_ty: Type, arena: Allocator, mod: *Module) !Type {
return try Type.ptr(arena, mod, .{
.pointee_type = ptr_ty.elemType2(),
.mutable = ptr_ty.ptrIsMutable(),
.@"addrspace" = ptr_ty.ptrAddressSpace(),
@ -3948,9 +3969,9 @@ pub const Type = extern union {
return union_obj.fields;
}
pub fn unionFieldType(ty: Type, enum_tag: Value, target: Target) Type {
pub fn unionFieldType(ty: Type, enum_tag: Value, mod: *Module) Type {
const union_obj = ty.cast(Payload.Union).?.data;
const index = union_obj.tag_ty.enumTagFieldIndex(enum_tag, target).?;
const index = union_obj.tag_ty.enumTagFieldIndex(enum_tag, mod).?;
assert(union_obj.haveFieldTypes());
return union_obj.fields.values()[index].ty;
}
@ -4970,20 +4991,20 @@ pub const Type = extern union {
/// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or
/// an integer which represents the enum value. Returns the field index in
/// declaration order, or `null` if `enum_tag` does not match any field.
pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, target: Target) ?usize {
pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?usize {
if (enum_tag.castTag(.enum_field_index)) |payload| {
return @as(usize, payload.data);
}
const S = struct {
fn fieldWithRange(int_ty: Type, int_val: Value, end: usize, tg: Target) ?usize {
fn fieldWithRange(int_ty: Type, int_val: Value, end: usize, m: *Module) ?usize {
if (int_val.compareWithZero(.lt)) return null;
var end_payload: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = end,
};
const end_val = Value.initPayload(&end_payload.base);
if (int_val.compare(.gte, end_val, int_ty, tg)) return null;
return @intCast(usize, int_val.toUnsignedInt(tg));
if (int_val.compare(.gte, end_val, int_ty, m)) return null;
return @intCast(usize, int_val.toUnsignedInt(m.getTarget()));
}
};
switch (ty.tag()) {
@ -4991,11 +5012,11 @@ pub const Type = extern union {
const enum_full = ty.cast(Payload.EnumFull).?.data;
const tag_ty = enum_full.tag_ty;
if (enum_full.values.count() == 0) {
return S.fieldWithRange(tag_ty, enum_tag, enum_full.fields.count(), target);
return S.fieldWithRange(tag_ty, enum_tag, enum_full.fields.count(), mod);
} else {
return enum_full.values.getIndexContext(enum_tag, .{
.ty = tag_ty,
.target = target,
.mod = mod,
});
}
},
@ -5003,11 +5024,11 @@ pub const Type = extern union {
const enum_obj = ty.castTag(.enum_numbered).?.data;
const tag_ty = enum_obj.tag_ty;
if (enum_obj.values.count() == 0) {
return S.fieldWithRange(tag_ty, enum_tag, enum_obj.fields.count(), target);
return S.fieldWithRange(tag_ty, enum_tag, enum_obj.fields.count(), mod);
} else {
return enum_obj.values.getIndexContext(enum_tag, .{
.ty = tag_ty,
.target = target,
.mod = mod,
});
}
},
@ -5020,7 +5041,7 @@ pub const Type = extern union {
.data = bits,
};
const tag_ty = Type.initPayload(&buffer.base);
return S.fieldWithRange(tag_ty, enum_tag, fields_len, target);
return S.fieldWithRange(tag_ty, enum_tag, fields_len, mod);
},
.atomic_order,
.atomic_rmw_op,
@ -5224,32 +5245,35 @@ pub const Type = extern union {
}
}
pub fn declSrcLoc(ty: Type) Module.SrcLoc {
return declSrcLocOrNull(ty).?;
pub fn declSrcLoc(ty: Type, mod: *Module) Module.SrcLoc {
return declSrcLocOrNull(ty, mod).?;
}
pub fn declSrcLocOrNull(ty: Type) ?Module.SrcLoc {
pub fn declSrcLocOrNull(ty: Type, mod: *Module) ?Module.SrcLoc {
switch (ty.tag()) {
.enum_full, .enum_nonexhaustive => {
const enum_full = ty.cast(Payload.EnumFull).?.data;
return enum_full.srcLoc();
return enum_full.srcLoc(mod);
},
.enum_numbered => {
const enum_numbered = ty.castTag(.enum_numbered).?.data;
return enum_numbered.srcLoc(mod);
},
.enum_numbered => return ty.castTag(.enum_numbered).?.data.srcLoc(),
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
return enum_simple.srcLoc();
return enum_simple.srcLoc(mod);
},
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
return struct_obj.srcLoc();
return struct_obj.srcLoc(mod);
},
.error_set => {
const error_set = ty.castTag(.error_set).?.data;
return error_set.srcLoc();
return error_set.srcLoc(mod);
},
.@"union", .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
return union_obj.srcLoc();
return union_obj.srcLoc(mod);
},
.atomic_order,
.atomic_rmw_op,
@ -5268,7 +5292,7 @@ pub const Type = extern union {
}
}
pub fn getOwnerDecl(ty: Type) *Module.Decl {
pub fn getOwnerDecl(ty: Type) Module.Decl.Index {
switch (ty.tag()) {
.enum_full, .enum_nonexhaustive => {
const enum_full = ty.cast(Payload.EnumFull).?.data;
@ -5357,30 +5381,30 @@ pub const Type = extern union {
}
/// Asserts the type is an enum.
pub fn enumHasInt(ty: Type, int: Value, target: Target) bool {
pub fn enumHasInt(ty: Type, int: Value, mod: *Module) bool {
const S = struct {
fn intInRange(tag_ty: Type, int_val: Value, end: usize, tg: Target) bool {
fn intInRange(tag_ty: Type, int_val: Value, end: usize, m: *Module) bool {
if (int_val.compareWithZero(.lt)) return false;
var end_payload: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = end,
};
const end_val = Value.initPayload(&end_payload.base);
if (int_val.compare(.gte, end_val, tag_ty, tg)) return false;
if (int_val.compare(.gte, end_val, tag_ty, m)) return false;
return true;
}
};
switch (ty.tag()) {
.enum_nonexhaustive => return int.intFitsInType(ty, target),
.enum_nonexhaustive => return int.intFitsInType(ty, mod.getTarget()),
.enum_full => {
const enum_full = ty.castTag(.enum_full).?.data;
const tag_ty = enum_full.tag_ty;
if (enum_full.values.count() == 0) {
return S.intInRange(tag_ty, int, enum_full.fields.count(), target);
return S.intInRange(tag_ty, int, enum_full.fields.count(), mod);
} else {
return enum_full.values.containsContext(int, .{
.ty = tag_ty,
.target = target,
.mod = mod,
});
}
},
@ -5388,11 +5412,11 @@ pub const Type = extern union {
const enum_obj = ty.castTag(.enum_numbered).?.data;
const tag_ty = enum_obj.tag_ty;
if (enum_obj.values.count() == 0) {
return S.intInRange(tag_ty, int, enum_obj.fields.count(), target);
return S.intInRange(tag_ty, int, enum_obj.fields.count(), mod);
} else {
return enum_obj.values.containsContext(int, .{
.ty = tag_ty,
.target = target,
.mod = mod,
});
}
},
@ -5405,7 +5429,7 @@ pub const Type = extern union {
.data = bits,
};
const tag_ty = Type.initPayload(&buffer.base);
return S.intInRange(tag_ty, int, fields_len, target);
return S.intInRange(tag_ty, int, fields_len, mod);
},
.atomic_order,
.atomic_rmw_op,
@ -5937,7 +5961,9 @@ pub const Type = extern union {
pub const @"anyopaque" = initTag(.anyopaque);
pub const @"null" = initTag(.@"null");
pub fn ptr(arena: Allocator, target: Target, data: Payload.Pointer.Data) !Type {
pub fn ptr(arena: Allocator, mod: *Module, data: Payload.Pointer.Data) !Type {
const target = mod.getTarget();
var d = data;
if (d.size == .C) {
@ -5967,7 +5993,7 @@ pub const Type = extern union {
d.bit_offset == 0 and d.host_size == 0 and !d.@"allowzero" and !d.@"volatile")
{
if (d.sentinel) |sent| {
if (!d.mutable and d.pointee_type.eql(Type.u8, target)) {
if (!d.mutable and d.pointee_type.eql(Type.u8, mod)) {
switch (d.size) {
.Slice => {
if (sent.compareWithZero(.eq)) {
@ -5982,7 +6008,7 @@ pub const Type = extern union {
else => {},
}
}
} else if (!d.mutable and d.pointee_type.eql(Type.u8, target)) {
} else if (!d.mutable and d.pointee_type.eql(Type.u8, mod)) {
switch (d.size) {
.Slice => return Type.initTag(.const_slice_u8),
.Many => return Type.initTag(.manyptr_const_u8),
@ -6016,11 +6042,11 @@ pub const Type = extern union {
len: u64,
sent: ?Value,
elem_type: Type,
target: Target,
mod: *Module,
) Allocator.Error!Type {
if (elem_type.eql(Type.u8, target)) {
if (elem_type.eql(Type.u8, mod)) {
if (sent) |some| {
if (some.eql(Value.zero, elem_type, target)) {
if (some.eql(Value.zero, elem_type, mod)) {
return Tag.array_u8_sentinel_0.create(arena, len);
}
} else {
@ -6067,11 +6093,11 @@ pub const Type = extern union {
arena: Allocator,
error_set: Type,
payload: Type,
target: Target,
mod: *Module,
) Allocator.Error!Type {
assert(error_set.zigTypeTag() == .ErrorSet);
if (error_set.eql(Type.@"anyerror", target) and
payload.eql(Type.void, target))
if (error_set.eql(Type.@"anyerror", mod) and
payload.eql(Type.void, mod))
{
return Type.initTag(.anyerror_void_error_union);
}

View File

@ -731,16 +731,16 @@ pub const Value = extern union {
.int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", options, out_stream),
.int_big_positive => return out_stream.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}),
.int_big_negative => return out_stream.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}),
.function => return out_stream.print("(function '{s}')", .{val.castTag(.function).?.data.owner_decl.name}),
.function => return out_stream.print("(function decl={d})", .{val.castTag(.function).?.data.owner_decl}),
.extern_fn => return out_stream.writeAll("(extern function)"),
.variable => return out_stream.writeAll("(variable)"),
.decl_ref_mut => {
const decl = val.castTag(.decl_ref_mut).?.data.decl;
return out_stream.print("(decl_ref_mut '{s}')", .{decl.name});
const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index;
return out_stream.print("(decl_ref_mut {d})", .{decl_index});
},
.decl_ref => {
const decl = val.castTag(.decl_ref).?.data;
return out_stream.print("(decl ref '{s}')", .{decl.name});
const decl_index = val.castTag(.decl_ref).?.data;
return out_stream.print("(decl_ref {d})", .{decl_index});
},
.elem_ptr => {
const elem_ptr = val.castTag(.elem_ptr).?.data;
@ -798,16 +798,17 @@ pub const Value = extern union {
return .{ .data = val };
}
pub fn fmtValue(val: Value, ty: Type, target: Target) std.fmt.Formatter(TypedValue.format) {
pub fn fmtValue(val: Value, ty: Type, mod: *Module) std.fmt.Formatter(TypedValue.format) {
return .{ .data = .{
.tv = .{ .ty = ty, .val = val },
.target = target,
.mod = mod,
} };
}
/// Asserts that the value is representable as an array of bytes.
/// Copies the value into a freshly allocated slice of memory, which is owned by the caller.
pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, target: Target) ![]u8 {
pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 {
const target = mod.getTarget();
switch (val.tag()) {
.bytes => {
const bytes = val.castTag(.bytes).?.data;
@ -823,25 +824,26 @@ pub const Value = extern union {
return result;
},
.decl_ref => {
const decl = val.castTag(.decl_ref).?.data;
const decl_index = val.castTag(.decl_ref).?.data;
const decl = mod.declPtr(decl_index);
const decl_val = try decl.value();
return decl_val.toAllocatedBytes(decl.ty, allocator, target);
return decl_val.toAllocatedBytes(decl.ty, allocator, mod);
},
.the_only_possible_value => return &[_]u8{},
.slice => {
const slice = val.castTag(.slice).?.data;
return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(target), allocator, target);
return arrayToAllocatedBytes(slice.ptr, slice.len.toUnsignedInt(target), allocator, mod);
},
else => return arrayToAllocatedBytes(val, ty.arrayLen(), allocator, target),
else => return arrayToAllocatedBytes(val, ty.arrayLen(), allocator, mod),
}
}
fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, target: Target) ![]u8 {
fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 {
const result = try allocator.alloc(u8, @intCast(usize, len));
var elem_value_buf: ElemValueBuffer = undefined;
for (result) |*elem, i| {
const elem_val = val.elemValueBuffer(i, &elem_value_buf);
elem.* = @intCast(u8, elem_val.toUnsignedInt(target));
const elem_val = val.elemValueBuffer(mod, i, &elem_value_buf);
elem.* = @intCast(u8, elem_val.toUnsignedInt(mod.getTarget()));
}
return result;
}
@ -1164,7 +1166,7 @@ pub const Value = extern union {
var elem_value_buf: ElemValueBuffer = undefined;
var buf_off: usize = 0;
while (elem_i < len) : (elem_i += 1) {
const elem_val = val.elemValueBuffer(elem_i, &elem_value_buf);
const elem_val = val.elemValueBuffer(mod, elem_i, &elem_value_buf);
writeToMemory(elem_val, elem_ty, mod, buffer[buf_off..]);
buf_off += elem_size;
}
@ -1975,34 +1977,47 @@ pub const Value = extern union {
/// Asserts the values are comparable. Both operands have type `ty`.
/// Vector results will be reduced with AND.
pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, target: Target) bool {
pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) bool {
if (ty.zigTypeTag() == .Vector) {
var i: usize = 0;
while (i < ty.vectorLen()) : (i += 1) {
if (!compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType(), target)) {
if (!compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType(), mod)) {
return false;
}
}
return true;
}
return compareScalar(lhs, op, rhs, ty, target);
return compareScalar(lhs, op, rhs, ty, mod);
}
/// Asserts the values are comparable. Both operands have type `ty`.
pub fn compareScalar(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, target: Target) bool {
pub fn compareScalar(
lhs: Value,
op: std.math.CompareOperator,
rhs: Value,
ty: Type,
mod: *Module,
) bool {
return switch (op) {
.eq => lhs.eql(rhs, ty, target),
.neq => !lhs.eql(rhs, ty, target),
else => compareHetero(lhs, op, rhs, target),
.eq => lhs.eql(rhs, ty, mod),
.neq => !lhs.eql(rhs, ty, mod),
else => compareHetero(lhs, op, rhs, mod.getTarget()),
};
}
/// Asserts the values are comparable vectors of type `ty`.
pub fn compareVector(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, allocator: Allocator, target: Target) !Value {
pub fn compareVector(
lhs: Value,
op: std.math.CompareOperator,
rhs: Value,
ty: Type,
allocator: Allocator,
mod: *Module,
) !Value {
assert(ty.zigTypeTag() == .Vector);
const result_data = try allocator.alloc(Value, ty.vectorLen());
for (result_data) |*scalar, i| {
const res_bool = compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType(), target);
const res_bool = compareScalar(lhs.indexVectorlike(i), op, rhs.indexVectorlike(i), ty.scalarType(), mod);
scalar.* = if (res_bool) Value.@"true" else Value.@"false";
}
return Value.Tag.aggregate.create(allocator, result_data);
@ -2032,7 +2047,8 @@ pub const Value = extern union {
/// for `a`. This function must act *as if* `a` has been coerced to `ty`. This complication
/// is required in order to make generic function instantiation effecient - specifically
/// the insertion into the monomorphized function table.
pub fn eql(a: Value, b: Value, ty: Type, target: Target) bool {
pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool {
const target = mod.getTarget();
const a_tag = a.tag();
const b_tag = b.tag();
if (a_tag == b_tag) switch (a_tag) {
@ -2052,31 +2068,31 @@ pub const Value = extern union {
const a_payload = a.castTag(.opt_payload).?.data;
const b_payload = b.castTag(.opt_payload).?.data;
var buffer: Type.Payload.ElemType = undefined;
return eql(a_payload, b_payload, ty.optionalChild(&buffer), target);
return eql(a_payload, b_payload, ty.optionalChild(&buffer), mod);
},
.slice => {
const a_payload = a.castTag(.slice).?.data;
const b_payload = b.castTag(.slice).?.data;
if (!eql(a_payload.len, b_payload.len, Type.usize, target)) return false;
if (!eql(a_payload.len, b_payload.len, Type.usize, mod)) return false;
var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = ty.slicePtrFieldType(&ptr_buf);
return eql(a_payload.ptr, b_payload.ptr, ptr_ty, target);
return eql(a_payload.ptr, b_payload.ptr, ptr_ty, mod);
},
.elem_ptr => {
const a_payload = a.castTag(.elem_ptr).?.data;
const b_payload = b.castTag(.elem_ptr).?.data;
if (a_payload.index != b_payload.index) return false;
return eql(a_payload.array_ptr, b_payload.array_ptr, ty, target);
return eql(a_payload.array_ptr, b_payload.array_ptr, ty, mod);
},
.field_ptr => {
const a_payload = a.castTag(.field_ptr).?.data;
const b_payload = b.castTag(.field_ptr).?.data;
if (a_payload.field_index != b_payload.field_index) return false;
return eql(a_payload.container_ptr, b_payload.container_ptr, ty, target);
return eql(a_payload.container_ptr, b_payload.container_ptr, ty, mod);
},
.@"error" => {
const a_name = a.castTag(.@"error").?.data.name;
@ -2086,7 +2102,7 @@ pub const Value = extern union {
.eu_payload => {
const a_payload = a.castTag(.eu_payload).?.data;
const b_payload = b.castTag(.eu_payload).?.data;
return eql(a_payload, b_payload, ty.errorUnionPayload(), target);
return eql(a_payload, b_payload, ty.errorUnionPayload(), mod);
},
.eu_payload_ptr => @panic("TODO: Implement more pointer eql cases"),
.opt_payload_ptr => @panic("TODO: Implement more pointer eql cases"),
@ -2104,7 +2120,7 @@ pub const Value = extern union {
const types = ty.tupleFields().types;
assert(types.len == a_field_vals.len);
for (types) |field_ty, i| {
if (!eql(a_field_vals[i], b_field_vals[i], field_ty, target)) return false;
if (!eql(a_field_vals[i], b_field_vals[i], field_ty, mod)) return false;
}
return true;
}
@ -2113,7 +2129,7 @@ pub const Value = extern union {
const fields = ty.structFields().values();
assert(fields.len == a_field_vals.len);
for (fields) |field, i| {
if (!eql(a_field_vals[i], b_field_vals[i], field.ty, target)) return false;
if (!eql(a_field_vals[i], b_field_vals[i], field.ty, mod)) return false;
}
return true;
}
@ -2122,7 +2138,7 @@ pub const Value = extern union {
for (a_field_vals) |a_elem, i| {
const b_elem = b_field_vals[i];
if (!eql(a_elem, b_elem, elem_ty, target)) return false;
if (!eql(a_elem, b_elem, elem_ty, mod)) return false;
}
return true;
},
@ -2132,7 +2148,7 @@ pub const Value = extern union {
switch (ty.containerLayout()) {
.Packed, .Extern => {
const tag_ty = ty.unionTagTypeHypothetical();
if (!a_union.tag.eql(b_union.tag, tag_ty, target)) {
if (!a_union.tag.eql(b_union.tag, tag_ty, mod)) {
// In this case, we must disregard mismatching tags and compare
// based on the in-memory bytes of the payloads.
@panic("TODO comptime comparison of extern union values with mismatching tags");
@ -2140,13 +2156,13 @@ pub const Value = extern union {
},
.Auto => {
const tag_ty = ty.unionTagTypeHypothetical();
if (!a_union.tag.eql(b_union.tag, tag_ty, target)) {
if (!a_union.tag.eql(b_union.tag, tag_ty, mod)) {
return false;
}
},
}
const active_field_ty = ty.unionFieldType(a_union.tag, target);
return a_union.val.eql(b_union.val, active_field_ty, target);
const active_field_ty = ty.unionFieldType(a_union.tag, mod);
return a_union.val.eql(b_union.val, active_field_ty, mod);
},
else => {},
} else if (a_tag == .null_value or b_tag == .null_value) {
@ -2171,7 +2187,7 @@ pub const Value = extern union {
var buf_b: ToTypeBuffer = undefined;
const a_type = a.toType(&buf_a);
const b_type = b.toType(&buf_b);
return a_type.eql(b_type, target);
return a_type.eql(b_type, mod);
},
.Enum => {
var buf_a: Payload.U64 = undefined;
@ -2180,7 +2196,7 @@ pub const Value = extern union {
const b_val = b.enumToInt(ty, &buf_b);
var buf_ty: Type.Payload.Bits = undefined;
const int_ty = ty.intTagType(&buf_ty);
return eql(a_val, b_val, int_ty, target);
return eql(a_val, b_val, int_ty, mod);
},
.Array, .Vector => {
const len = ty.arrayLen();
@ -2189,9 +2205,9 @@ pub const Value = extern union {
var a_buf: ElemValueBuffer = undefined;
var b_buf: ElemValueBuffer = undefined;
while (i < len) : (i += 1) {
const a_elem = elemValueBuffer(a, i, &a_buf);
const b_elem = elemValueBuffer(b, i, &b_buf);
if (!eql(a_elem, b_elem, elem_ty, target)) return false;
const a_elem = elemValueBuffer(a, mod, i, &a_buf);
const b_elem = elemValueBuffer(b, mod, i, &b_buf);
if (!eql(a_elem, b_elem, elem_ty, mod)) return false;
}
return true;
},
@ -2215,7 +2231,7 @@ pub const Value = extern union {
.base = .{ .tag = .opt_payload },
.data = a,
};
return eql(Value.initPayload(&buffer.base), b, ty, target);
return eql(Value.initPayload(&buffer.base), b, ty, mod);
}
},
else => {},
@ -2225,7 +2241,7 @@ pub const Value = extern union {
/// This function is used by hash maps and so treats floating-point NaNs as equal
/// to each other, and not equal to other floating-point values.
pub fn hash(val: Value, ty: Type, hasher: *std.hash.Wyhash, target: Target) void {
pub fn hash(val: Value, ty: Type, hasher: *std.hash.Wyhash, mod: *Module) void {
const zig_ty_tag = ty.zigTypeTag();
std.hash.autoHash(hasher, zig_ty_tag);
if (val.isUndef()) return;
@ -2242,7 +2258,7 @@ pub const Value = extern union {
.Type => {
var buf: ToTypeBuffer = undefined;
return val.toType(&buf).hashWithHasher(hasher, target);
return val.toType(&buf).hashWithHasher(hasher, mod);
},
.Float, .ComptimeFloat => {
// Normalize the float here because this hash must match eql semantics.
@ -2263,11 +2279,11 @@ pub const Value = extern union {
const slice = val.castTag(.slice).?.data;
var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = ty.slicePtrFieldType(&ptr_buf);
hash(slice.ptr, ptr_ty, hasher, target);
hash(slice.len, Type.usize, hasher, target);
hash(slice.ptr, ptr_ty, hasher, mod);
hash(slice.len, Type.usize, hasher, mod);
},
else => return hashPtr(val, hasher, target),
else => return hashPtr(val, hasher, mod.getTarget()),
},
.Array, .Vector => {
const len = ty.arrayLen();
@ -2275,15 +2291,15 @@ pub const Value = extern union {
var index: usize = 0;
var elem_value_buf: ElemValueBuffer = undefined;
while (index < len) : (index += 1) {
const elem_val = val.elemValueBuffer(index, &elem_value_buf);
elem_val.hash(elem_ty, hasher, target);
const elem_val = val.elemValueBuffer(mod, index, &elem_value_buf);
elem_val.hash(elem_ty, hasher, mod);
}
},
.Struct => {
if (ty.isTupleOrAnonStruct()) {
const fields = ty.tupleFields();
for (fields.values) |field_val, i| {
field_val.hash(fields.types[i], hasher, target);
field_val.hash(fields.types[i], hasher, mod);
}
return;
}
@ -2292,13 +2308,13 @@ pub const Value = extern union {
switch (val.tag()) {
.empty_struct_value => {
for (fields) |field| {
field.default_val.hash(field.ty, hasher, target);
field.default_val.hash(field.ty, hasher, mod);
}
},
.aggregate => {
const field_values = val.castTag(.aggregate).?.data;
for (field_values) |field_val, i| {
field_val.hash(fields[i].ty, hasher, target);
field_val.hash(fields[i].ty, hasher, mod);
}
},
else => unreachable,
@ -2310,7 +2326,7 @@ pub const Value = extern union {
const sub_val = payload.data;
var buffer: Type.Payload.ElemType = undefined;
const sub_ty = ty.optionalChild(&buffer);
sub_val.hash(sub_ty, hasher, target);
sub_val.hash(sub_ty, hasher, mod);
} else {
std.hash.autoHash(hasher, false); // non-null
}
@ -2319,14 +2335,14 @@ pub const Value = extern union {
if (val.tag() == .@"error") {
std.hash.autoHash(hasher, false); // error
const sub_ty = ty.errorUnionSet();
val.hash(sub_ty, hasher, target);
val.hash(sub_ty, hasher, mod);
return;
}
if (val.castTag(.eu_payload)) |payload| {
std.hash.autoHash(hasher, true); // payload
const sub_ty = ty.errorUnionPayload();
payload.data.hash(sub_ty, hasher, target);
payload.data.hash(sub_ty, hasher, mod);
return;
} else unreachable;
},
@ -2339,15 +2355,15 @@ pub const Value = extern union {
.Enum => {
var enum_space: Payload.U64 = undefined;
const int_val = val.enumToInt(ty, &enum_space);
hashInt(int_val, hasher, target);
hashInt(int_val, hasher, mod.getTarget());
},
.Union => {
const union_obj = val.cast(Payload.Union).?.data;
if (ty.unionTagType()) |tag_ty| {
union_obj.tag.hash(tag_ty, hasher, target);
union_obj.tag.hash(tag_ty, hasher, mod);
}
const active_field_ty = ty.unionFieldType(union_obj.tag, target);
union_obj.val.hash(active_field_ty, hasher, target);
const active_field_ty = ty.unionFieldType(union_obj.tag, mod);
union_obj.val.hash(active_field_ty, hasher, mod);
},
.Fn => {
const func: *Module.Fn = val.castTag(.function).?.data;
@ -2372,30 +2388,30 @@ pub const Value = extern union {
pub const ArrayHashContext = struct {
ty: Type,
target: Target,
mod: *Module,
pub fn hash(self: @This(), val: Value) u32 {
const other_context: HashContext = .{ .ty = self.ty, .target = self.target };
const other_context: HashContext = .{ .ty = self.ty, .mod = self.mod };
return @truncate(u32, other_context.hash(val));
}
pub fn eql(self: @This(), a: Value, b: Value, b_index: usize) bool {
_ = b_index;
return a.eql(b, self.ty, self.target);
return a.eql(b, self.ty, self.mod);
}
};
pub const HashContext = struct {
ty: Type,
target: Target,
mod: *Module,
pub fn hash(self: @This(), val: Value) u64 {
var hasher = std.hash.Wyhash.init(0);
val.hash(self.ty, &hasher, self.target);
val.hash(self.ty, &hasher, self.mod);
return hasher.final();
}
pub fn eql(self: @This(), a: Value, b: Value) bool {
return a.eql(b, self.ty, self.target);
return a.eql(b, self.ty, self.mod);
}
};
@ -2434,9 +2450,9 @@ pub const Value = extern union {
/// Gets the decl referenced by this pointer. If the pointer does not point
/// to a decl, or if it points to some part of a decl (like field_ptr or element_ptr),
/// this function returns null.
pub fn pointerDecl(val: Value) ?*Module.Decl {
pub fn pointerDecl(val: Value) ?Module.Decl.Index {
return switch (val.tag()) {
.decl_ref_mut => val.castTag(.decl_ref_mut).?.data.decl,
.decl_ref_mut => val.castTag(.decl_ref_mut).?.data.decl_index,
.extern_fn => val.castTag(.extern_fn).?.data.owner_decl,
.function => val.castTag(.function).?.data.owner_decl,
.variable => val.castTag(.variable).?.data.owner_decl,
@ -2462,7 +2478,7 @@ pub const Value = extern union {
.function,
.variable,
=> {
const decl: *Module.Decl = ptr_val.pointerDecl().?;
const decl: Module.Decl.Index = ptr_val.pointerDecl().?;
std.hash.autoHash(hasher, decl);
},
@ -2505,53 +2521,6 @@ pub const Value = extern union {
}
}
pub fn markReferencedDeclsAlive(val: Value) void {
switch (val.tag()) {
.decl_ref_mut => return val.castTag(.decl_ref_mut).?.data.decl.markAlive(),
.extern_fn => return val.castTag(.extern_fn).?.data.owner_decl.markAlive(),
.function => return val.castTag(.function).?.data.owner_decl.markAlive(),
.variable => return val.castTag(.variable).?.data.owner_decl.markAlive(),
.decl_ref => return val.cast(Payload.Decl).?.data.markAlive(),
.repeated,
.eu_payload,
.opt_payload,
.empty_array_sentinel,
=> return markReferencedDeclsAlive(val.cast(Payload.SubValue).?.data),
.eu_payload_ptr,
.opt_payload_ptr,
=> return markReferencedDeclsAlive(val.cast(Payload.PayloadPtr).?.data.container_ptr),
.slice => {
const slice = val.cast(Payload.Slice).?.data;
markReferencedDeclsAlive(slice.ptr);
markReferencedDeclsAlive(slice.len);
},
.elem_ptr => {
const elem_ptr = val.cast(Payload.ElemPtr).?.data;
return markReferencedDeclsAlive(elem_ptr.array_ptr);
},
.field_ptr => {
const field_ptr = val.cast(Payload.FieldPtr).?.data;
return markReferencedDeclsAlive(field_ptr.container_ptr);
},
.aggregate => {
for (val.castTag(.aggregate).?.data) |field_val| {
markReferencedDeclsAlive(field_val);
}
},
.@"union" => {
const data = val.cast(Payload.Union).?.data;
markReferencedDeclsAlive(data.tag);
markReferencedDeclsAlive(data.val);
},
else => {},
}
}
pub fn slicePtr(val: Value) Value {
return switch (val.tag()) {
.slice => val.castTag(.slice).?.data.ptr,
@ -2561,11 +2530,12 @@ pub const Value = extern union {
};
}
pub fn sliceLen(val: Value, target: Target) u64 {
pub fn sliceLen(val: Value, mod: *Module) u64 {
return switch (val.tag()) {
.slice => val.castTag(.slice).?.data.len.toUnsignedInt(target),
.slice => val.castTag(.slice).?.data.len.toUnsignedInt(mod.getTarget()),
.decl_ref => {
const decl = val.castTag(.decl_ref).?.data;
const decl_index = val.castTag(.decl_ref).?.data;
const decl = mod.declPtr(decl_index);
if (decl.ty.zigTypeTag() == .Array) {
return decl.ty.arrayLen();
} else {
@ -2599,18 +2569,19 @@ pub const Value = extern union {
/// Asserts the value is a single-item pointer to an array, or an array,
/// or an unknown-length pointer, and returns the element value at the index.
pub fn elemValue(val: Value, arena: Allocator, index: usize) !Value {
return elemValueAdvanced(val, index, arena, undefined);
pub fn elemValue(val: Value, mod: *Module, arena: Allocator, index: usize) !Value {
return elemValueAdvanced(val, mod, index, arena, undefined);
}
pub const ElemValueBuffer = Payload.U64;
pub fn elemValueBuffer(val: Value, index: usize, buffer: *ElemValueBuffer) Value {
return elemValueAdvanced(val, index, null, buffer) catch unreachable;
pub fn elemValueBuffer(val: Value, mod: *Module, index: usize, buffer: *ElemValueBuffer) Value {
return elemValueAdvanced(val, mod, index, null, buffer) catch unreachable;
}
pub fn elemValueAdvanced(
val: Value,
mod: *Module,
index: usize,
arena: ?Allocator,
buffer: *ElemValueBuffer,
@ -2643,13 +2614,13 @@ pub const Value = extern union {
.repeated => return val.castTag(.repeated).?.data,
.aggregate => return val.castTag(.aggregate).?.data[index],
.slice => return val.castTag(.slice).?.data.ptr.elemValueAdvanced(index, arena, buffer),
.slice => return val.castTag(.slice).?.data.ptr.elemValueAdvanced(mod, index, arena, buffer),
.decl_ref => return val.castTag(.decl_ref).?.data.val.elemValueAdvanced(index, arena, buffer),
.decl_ref_mut => return val.castTag(.decl_ref_mut).?.data.decl.val.elemValueAdvanced(index, arena, buffer),
.decl_ref => return mod.declPtr(val.castTag(.decl_ref).?.data).val.elemValueAdvanced(mod, index, arena, buffer),
.decl_ref_mut => return mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val.elemValueAdvanced(mod, index, arena, buffer),
.elem_ptr => {
const data = val.castTag(.elem_ptr).?.data;
return data.array_ptr.elemValueAdvanced(index + data.index, arena, buffer);
return data.array_ptr.elemValueAdvanced(mod, index + data.index, arena, buffer);
},
// The child type of arrays which have only one possible value need
@ -2661,18 +2632,24 @@ pub const Value = extern union {
}
// Asserts that the provided start/end are in-bounds.
pub fn sliceArray(val: Value, arena: Allocator, start: usize, end: usize) error{OutOfMemory}!Value {
pub fn sliceArray(
val: Value,
mod: *Module,
arena: Allocator,
start: usize,
end: usize,
) error{OutOfMemory}!Value {
return switch (val.tag()) {
.empty_array_sentinel => if (start == 0 and end == 1) val else Value.initTag(.empty_array),
.bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]),
.aggregate => Tag.aggregate.create(arena, val.castTag(.aggregate).?.data[start..end]),
.slice => sliceArray(val.castTag(.slice).?.data.ptr, arena, start, end),
.slice => sliceArray(val.castTag(.slice).?.data.ptr, mod, arena, start, end),
.decl_ref => sliceArray(val.castTag(.decl_ref).?.data.val, arena, start, end),
.decl_ref_mut => sliceArray(val.castTag(.decl_ref_mut).?.data.decl.val, arena, start, end),
.decl_ref => sliceArray(mod.declPtr(val.castTag(.decl_ref).?.data).val, mod, arena, start, end),
.decl_ref_mut => sliceArray(mod.declPtr(val.castTag(.decl_ref_mut).?.data.decl_index).val, mod, arena, start, end),
.elem_ptr => blk: {
const elem_ptr = val.castTag(.elem_ptr).?.data;
break :blk sliceArray(elem_ptr.array_ptr, arena, start + elem_ptr.index, end + elem_ptr.index);
break :blk sliceArray(elem_ptr.array_ptr, mod, arena, start + elem_ptr.index, end + elem_ptr.index);
},
.repeated,
@ -2718,7 +2695,13 @@ pub const Value = extern union {
}
/// Returns a pointer to the element value at the index.
pub fn elemPtr(val: Value, ty: Type, arena: Allocator, index: usize, target: Target) Allocator.Error!Value {
pub fn elemPtr(
val: Value,
ty: Type,
arena: Allocator,
index: usize,
mod: *Module,
) Allocator.Error!Value {
const elem_ty = ty.elemType2();
const ptr_val = switch (val.tag()) {
.slice => val.castTag(.slice).?.data.ptr,
@ -2727,7 +2710,7 @@ pub const Value = extern union {
if (ptr_val.tag() == .elem_ptr) {
const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
if (elem_ptr.elem_ty.eql(elem_ty, target)) {
if (elem_ptr.elem_ty.eql(elem_ty, mod)) {
return Tag.elem_ptr.create(arena, .{
.array_ptr = elem_ptr.array_ptr,
.elem_ty = elem_ptr.elem_ty,
@ -5059,7 +5042,7 @@ pub const Value = extern union {
pub const Decl = struct {
base: Payload,
data: *Module.Decl,
data: Module.Decl.Index,
};
pub const Variable = struct {
@ -5079,7 +5062,7 @@ pub const Value = extern union {
data: Data,
pub const Data = struct {
decl: *Module.Decl,
decl_index: Module.Decl.Index,
runtime_index: u32,
};
};
@ -5215,7 +5198,7 @@ pub const Value = extern union {
base: Payload = .{ .tag = base_tag },
data: struct {
decl: *Module.Decl,
decl_index: Module.Decl.Index,
/// 0 means ABI-aligned.
alignment: u16,
},