Merge pull request #21700 from ziglang/cli-lib-dirs

move linker input file parsing to the frontend
This commit is contained in:
Andrew Kelley 2024-10-23 22:56:04 -07:00 committed by GitHub
commit c563ba6b15
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
39 changed files with 2901 additions and 2306 deletions

View File

@ -522,6 +522,7 @@ set(ZIG_STAGE2_SOURCES
src/Sema.zig
src/Sema/bitcast.zig
src/Sema/comptime_ptr_access.zig
src/ThreadSafeQueue.zig
src/Type.zig
src/Value.zig
src/Zcu.zig
@ -601,7 +602,6 @@ set(ZIG_STAGE2_SOURCES
src/link/Elf/Archive.zig
src/link/Elf/Atom.zig
src/link/Elf/AtomList.zig
src/link/Elf/LdScript.zig
src/link/Elf/LinkerDefined.zig
src/link/Elf/Merge.zig
src/link/Elf/Object.zig
@ -615,6 +615,7 @@ set(ZIG_STAGE2_SOURCES
src/link/Elf/relocatable.zig
src/link/Elf/relocation.zig
src/link/Elf/synthetic_sections.zig
src/link/LdScript.zig
src/link/MachO.zig
src/link/MachO/Archive.zig
src/link/MachO/Atom.zig

View File

@ -38,8 +38,6 @@ comptime {
@export(&strncpy, .{ .name = "strncpy", .linkage = .strong });
@export(&strcat, .{ .name = "strcat", .linkage = .strong });
@export(&strncat, .{ .name = "strncat", .linkage = .strong });
} else if (is_msvc) {
@export(&_fltused, .{ .name = "_fltused", .linkage = .strong });
}
}
@ -62,8 +60,6 @@ fn wasm_start() callconv(.C) void {
_ = main(0, undefined);
}
var _fltused: c_int = 1;
fn strcpy(dest: [*:0]u8, src: [*:0]const u8) callconv(.C) [*:0]u8 {
var i: usize = 0;
while (src[i] != 0) : (i += 1) {

View File

@ -280,6 +280,10 @@ pub fn main() !void {
builder.enable_darling = true;
} else if (mem.eql(u8, arg, "-fno-darling")) {
builder.enable_darling = false;
} else if (mem.eql(u8, arg, "-fallow-so-scripts")) {
graph.allow_so_scripts = true;
} else if (mem.eql(u8, arg, "-fno-allow-so-scripts")) {
graph.allow_so_scripts = false;
} else if (mem.eql(u8, arg, "-freference-trace")) {
builder.reference_trace = 256;
} else if (mem.startsWith(u8, arg, "-freference-trace=")) {
@ -1341,6 +1345,8 @@ fn usage(b: *std.Build, out_stream: anytype) !void {
\\Advanced Options:
\\ -freference-trace[=num] How many lines of reference trace should be shown per compile error
\\ -fno-reference-trace Disable reference trace
\\ -fallow-so-scripts Allows .so files to be GNU ld scripts
\\ -fno-allow-so-scripts (default) .so files must be ELF files
\\ --build-file [file] Override path to build.zig
\\ --cache-dir [path] Override path to local Zig cache directory
\\ --global-cache-dir [path] Override path to global Zig cache directory

View File

@ -1,6 +1,7 @@
const builtin = @import("builtin");
const common = @import("compiler_rt/common.zig");
pub const panic = @import("compiler_rt/common.zig").panic;
pub const panic = common.panic;
comptime {
// Integer routines
@ -236,4 +237,10 @@ comptime {
_ = @import("compiler_rt/bcmp.zig");
_ = @import("compiler_rt/ssp.zig");
}
if (!builtin.link_libc and builtin.abi == .msvc) {
@export(&_fltused, .{ .name = "_fltused", .linkage = common.linkage, .visibility = common.visibility });
}
}
var _fltused: c_int = 1;

View File

@ -123,6 +123,7 @@ pub const Graph = struct {
incremental: ?bool = null,
random_seed: u32 = 0,
dependency_cache: InitializedDepMap = .empty,
allow_so_scripts: ?bool = null,
};
const AvailableDeps = []const struct { []const u8, []const u8 };

View File

@ -142,6 +142,9 @@ pub const hasher_init: Hasher = Hasher.init(&[_]u8{
pub const File = struct {
prefixed_path: PrefixedPath,
max_file_size: ?usize,
/// Populated if the user calls `addOpenedFile`.
/// The handle is not owned here.
handle: ?fs.File,
stat: Stat,
bin_digest: BinDigest,
contents: ?[]const u8,
@ -173,6 +176,11 @@ pub const File = struct {
const new = new_max_size orelse return;
file.max_file_size = if (file.max_file_size) |old| @max(old, new) else new;
}
pub fn updateHandle(file: *File, new_handle: ?fs.File) void {
const handle = new_handle orelse return;
file.handle = handle;
}
};
pub const HashHelper = struct {
@ -363,15 +371,20 @@ pub const Manifest = struct {
/// var file_contents = cache_hash.files.keys()[file_index].contents.?;
/// ```
pub fn addFilePath(m: *Manifest, file_path: Path, max_file_size: ?usize) !usize {
return addOpenedFile(m, file_path, null, max_file_size);
}
/// Same as `addFilePath` except the file has already been opened.
pub fn addOpenedFile(m: *Manifest, path: Path, handle: ?fs.File, max_file_size: ?usize) !usize {
const gpa = m.cache.gpa;
try m.files.ensureUnusedCapacity(gpa, 1);
const resolved_path = try fs.path.resolve(gpa, &.{
file_path.root_dir.path orelse ".",
file_path.subPathOrDot(),
path.root_dir.path orelse ".",
path.subPathOrDot(),
});
errdefer gpa.free(resolved_path);
const prefixed_path = try m.cache.findPrefixResolved(resolved_path);
return addFileInner(m, prefixed_path, max_file_size);
return addFileInner(m, prefixed_path, handle, max_file_size);
}
/// Deprecated; use `addFilePath`.
@ -383,13 +396,14 @@ pub const Manifest = struct {
const prefixed_path = try self.cache.findPrefix(file_path);
errdefer gpa.free(prefixed_path.sub_path);
return addFileInner(self, prefixed_path, max_file_size);
return addFileInner(self, prefixed_path, null, max_file_size);
}
fn addFileInner(self: *Manifest, prefixed_path: PrefixedPath, max_file_size: ?usize) !usize {
fn addFileInner(self: *Manifest, prefixed_path: PrefixedPath, handle: ?fs.File, max_file_size: ?usize) usize {
const gop = self.files.getOrPutAssumeCapacityAdapted(prefixed_path, FilesAdapter{});
if (gop.found_existing) {
gop.key_ptr.updateMaxSize(max_file_size);
gop.key_ptr.updateHandle(handle);
return gop.index;
}
gop.key_ptr.* = .{
@ -398,6 +412,7 @@ pub const Manifest = struct {
.max_file_size = max_file_size,
.stat = undefined,
.bin_digest = undefined,
.handle = handle,
};
self.hash.add(prefixed_path.prefix);
@ -565,6 +580,7 @@ pub const Manifest = struct {
},
.contents = null,
.max_file_size = null,
.handle = null,
.stat = .{
.size = stat_size,
.inode = stat_inode,
@ -708,12 +724,19 @@ pub const Manifest = struct {
}
fn populateFileHash(self: *Manifest, ch_file: *File) !void {
const pp = ch_file.prefixed_path;
const dir = self.cache.prefixes()[pp.prefix].handle;
const file = try dir.openFile(pp.sub_path, .{});
defer file.close();
if (ch_file.handle) |handle| {
return populateFileHashHandle(self, ch_file, handle);
} else {
const pp = ch_file.prefixed_path;
const dir = self.cache.prefixes()[pp.prefix].handle;
const handle = try dir.openFile(pp.sub_path, .{});
defer handle.close();
return populateFileHashHandle(self, ch_file, handle);
}
}
const actual_stat = try file.stat();
fn populateFileHashHandle(self: *Manifest, ch_file: *File, handle: fs.File) !void {
const actual_stat = try handle.stat();
ch_file.stat = .{
.size = actual_stat.size,
.mtime = actual_stat.mtime,
@ -739,8 +762,7 @@ pub const Manifest = struct {
var hasher = hasher_init;
var off: usize = 0;
while (true) {
// give me everything you've got, captain
const bytes_read = try file.read(contents[off..]);
const bytes_read = try handle.pread(contents[off..], off);
if (bytes_read == 0) break;
hasher.update(contents[off..][0..bytes_read]);
off += bytes_read;
@ -749,7 +771,7 @@ pub const Manifest = struct {
ch_file.contents = contents;
} else {
try hashFile(file, &ch_file.bin_digest);
try hashFile(handle, &ch_file.bin_digest);
}
self.hash.hasher.update(&ch_file.bin_digest);
@ -813,6 +835,7 @@ pub const Manifest = struct {
gop.key_ptr.* = .{
.prefixed_path = prefixed_path,
.max_file_size = null,
.handle = null,
.stat = undefined,
.bin_digest = undefined,
.contents = null,
@ -851,6 +874,7 @@ pub const Manifest = struct {
new_file.* = .{
.prefixed_path = prefixed_path,
.max_file_size = null,
.handle = null,
.stat = stat,
.bin_digest = undefined,
.contents = null,
@ -1067,6 +1091,7 @@ pub const Manifest = struct {
gop.key_ptr.* = .{
.prefixed_path = prefixed_path,
.max_file_size = file.max_file_size,
.handle = file.handle,
.stat = file.stat,
.bin_digest = file.bin_digest,
.contents = null,
@ -1103,14 +1128,14 @@ pub fn writeSmallFile(dir: fs.Dir, sub_path: []const u8, data: []const u8) !void
fn hashFile(file: fs.File, bin_digest: *[Hasher.mac_length]u8) !void {
var buf: [1024]u8 = undefined;
var hasher = hasher_init;
var off: u64 = 0;
while (true) {
const bytes_read = try file.read(&buf);
const bytes_read = try file.pread(&buf, off);
if (bytes_read == 0) break;
hasher.update(buf[0..bytes_read]);
off += bytes_read;
}
hasher.final(bin_digest);
}

View File

@ -186,6 +186,15 @@ want_lto: ?bool = null,
use_llvm: ?bool,
use_lld: ?bool,
/// Corresponds to the `-fallow-so-scripts` / `-fno-allow-so-scripts` CLI
/// flags, overriding the global user setting provided to the `zig build`
/// command.
///
/// The compiler defaults this value to off so that users whose system shared
/// libraries are all ELF files don't have to pay the cost of checking every
/// file to find out if it is a text file instead.
allow_so_scripts: ?bool = null,
/// This is an advanced setting that can change the intent of this Compile step.
/// If this value is non-null, it means that this Compile step exists to
/// check for compile errors and return *success* if they match, and failure
@ -236,6 +245,7 @@ pub const ExpectedCompileErrors = union(enum) {
contains: []const u8,
exact: []const []const u8,
starts_with: []const u8,
stderr_contains: []const u8,
};
pub const Entry = union(enum) {
@ -1035,6 +1045,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
if (b.reference_trace) |some| {
try zig_args.append(try std.fmt.allocPrint(arena, "-freference-trace={d}", .{some}));
}
try addFlag(&zig_args, "allow-so-scripts", compile.allow_so_scripts orelse b.graph.allow_so_scripts);
try addFlag(&zig_args, "llvm", compile.use_llvm);
try addFlag(&zig_args, "lld", compile.use_lld);
@ -1945,24 +1956,24 @@ fn checkCompileErrors(compile: *Compile) !void {
const arena = compile.step.owner.allocator;
var actual_stderr_list = std.ArrayList(u8).init(arena);
var actual_errors_list = std.ArrayList(u8).init(arena);
try actual_eb.renderToWriter(.{
.ttyconf = .no_color,
.include_reference_trace = false,
.include_source_line = false,
}, actual_stderr_list.writer());
const actual_stderr = try actual_stderr_list.toOwnedSlice();
}, actual_errors_list.writer());
const actual_errors = try actual_errors_list.toOwnedSlice();
// Render the expected lines into a string that we can compare verbatim.
var expected_generated = std.ArrayList(u8).init(arena);
const expect_errors = compile.expect_errors.?;
var actual_line_it = mem.splitScalar(u8, actual_stderr, '\n');
var actual_line_it = mem.splitScalar(u8, actual_errors, '\n');
// TODO merge this with the testing.expectEqualStrings logic, and also CheckFile
switch (expect_errors) {
.starts_with => |expect_starts_with| {
if (std.mem.startsWith(u8, actual_stderr, expect_starts_with)) return;
if (std.mem.startsWith(u8, actual_errors, expect_starts_with)) return;
return compile.step.fail(
\\
\\========= should start with: ============
@ -1970,7 +1981,7 @@ fn checkCompileErrors(compile: *Compile) !void {
\\========= but not found: ================
\\{s}
\\=========================================
, .{ expect_starts_with, actual_stderr });
, .{ expect_starts_with, actual_errors });
},
.contains => |expect_line| {
while (actual_line_it.next()) |actual_line| {
@ -1978,6 +1989,29 @@ fn checkCompileErrors(compile: *Compile) !void {
return;
}
return compile.step.fail(
\\
\\========= should contain: ===============
\\{s}
\\========= but not found: ================
\\{s}
\\=========================================
, .{ expect_line, actual_errors });
},
.stderr_contains => |expect_line| {
const actual_stderr: []const u8 = if (compile.step.result_error_msgs.items.len > 0)
compile.step.result_error_msgs.items[0]
else
&.{};
compile.step.result_error_msgs.clearRetainingCapacity();
var stderr_line_it = mem.splitScalar(u8, actual_stderr, '\n');
while (stderr_line_it.next()) |actual_line| {
if (!matchCompileError(actual_line, expect_line)) continue;
return;
}
return compile.step.fail(
\\
\\========= should contain: ===============
@ -2003,7 +2037,7 @@ fn checkCompileErrors(compile: *Compile) !void {
try expected_generated.append('\n');
}
if (mem.eql(u8, expected_generated.items, actual_stderr)) return;
if (mem.eql(u8, expected_generated.items, actual_errors)) return;
return compile.step.fail(
\\
@ -2012,7 +2046,7 @@ fn checkCompileErrors(compile: *Compile) !void {
\\========= but found: ====================
\\{s}
\\=========================================
, .{ expected_generated.items, actual_stderr });
, .{ expected_generated.items, actual_errors });
},
}
}

View File

@ -14,6 +14,11 @@ pub fn start(self: *WaitGroup) void {
assert((state / one_pending) < (std.math.maxInt(usize) / one_pending));
}
pub fn startMany(self: *WaitGroup, n: usize) void {
const state = self.state.fetchAdd(one_pending * n, .monotonic);
assert((state / one_pending) < (std.math.maxInt(usize) / one_pending));
}
pub fn finish(self: *WaitGroup) void {
const state = self.state.fetchSub(one_pending, .acq_rel);
assert((state / one_pending) > 0);

File diff suppressed because it is too large Load Diff

View File

@ -2899,6 +2899,7 @@ fn zirStructDecl(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.declareDependency(.{ .interned = wip_ty.index });
@ -3149,6 +3150,7 @@ fn zirEnumDecl(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
return Air.internedToRef(wip_ty.index);
@ -3272,6 +3274,7 @@ fn zirUnionDecl(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.declareDependency(.{ .interned = wip_ty.index });
@ -3357,6 +3360,7 @@ fn zirOpaqueDecl(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.addTypeReferenceEntry(src, wip_ty.index);
@ -9595,7 +9599,7 @@ fn resolveGenericBody(
}
/// Given a library name, examines if the library name should end up in
/// `link.File.Options.system_libs` table (for example, libc is always
/// `link.File.Options.windows_libs` table (for example, libc is always
/// specified via dedicated flag `link_libc` instead),
/// and puts it there if it doesn't exist.
/// It also dupes the library name which can then be saved as part of the
@ -22456,6 +22460,7 @@ fn reifyEnum(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
return Air.internedToRef(wip_ty.index);
@ -22713,6 +22718,7 @@ fn reifyUnion(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.declareDependency(.{ .interned = wip_ty.index });
@ -22997,6 +23003,7 @@ fn reifyStruct(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (block.ownerModule().strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
try sema.declareDependency(.{ .interned = wip_ty.index });

72
src/ThreadSafeQueue.zig Normal file
View File

@ -0,0 +1,72 @@
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
pub fn ThreadSafeQueue(comptime T: type) type {
return struct {
worker_owned: std.ArrayListUnmanaged(T),
/// Protected by `mutex`.
shared: std.ArrayListUnmanaged(T),
mutex: std.Thread.Mutex,
state: State,
const Self = @This();
pub const State = enum { wait, run };
pub const empty: Self = .{
.worker_owned = .empty,
.shared = .empty,
.mutex = .{},
.state = .wait,
};
pub fn deinit(self: *Self, gpa: Allocator) void {
self.worker_owned.deinit(gpa);
self.shared.deinit(gpa);
self.* = undefined;
}
/// Must be called from the worker thread.
pub fn check(self: *Self) ?[]T {
assert(self.worker_owned.items.len == 0);
{
self.mutex.lock();
defer self.mutex.unlock();
assert(self.state == .run);
if (self.shared.items.len == 0) {
self.state = .wait;
return null;
}
std.mem.swap(std.ArrayListUnmanaged(T), &self.worker_owned, &self.shared);
}
const result = self.worker_owned.items;
self.worker_owned.clearRetainingCapacity();
return result;
}
/// Adds items to the queue, returning true if and only if the worker
/// thread is waiting. Thread-safe.
/// Not safe to call from the worker thread.
pub fn enqueue(self: *Self, gpa: Allocator, items: []const T) error{OutOfMemory}!bool {
self.mutex.lock();
defer self.mutex.unlock();
try self.shared.appendSlice(gpa, items);
return switch (self.state) {
.run => false,
.wait => {
self.state = .run;
return true;
},
};
}
/// Safe only to call exactly once when initially starting the worker.
pub fn start(self: *Self) bool {
assert(self.state == .wait);
if (self.shared.items.len == 0) return false;
self.state = .run;
return true;
}
};
}

View File

@ -845,6 +845,7 @@ fn ensureFuncBodyAnalyzedInner(
return .{ .ies_outdated = ies_outdated };
}
// This job depends on any resolve_type_fully jobs queued up before it.
try comp.queueJob(.{ .codegen_func = .{
.func = func_index,
.air = air,
@ -1016,6 +1017,7 @@ fn createFileRootStruct(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (file.mod.strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
zcu.setFileRootType(file_index, wip_ty.index);
@ -1362,6 +1364,7 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult {
if (file.mod.strip) break :queue_codegen;
}
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_nav = nav_index });
}
@ -2593,7 +2596,7 @@ pub fn populateTestFunctions(
}
}
pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void {
pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) error{OutOfMemory}!void {
const zcu = pt.zcu;
const comp = zcu.comp;
const ip = &zcu.intern_pool;
@ -3163,6 +3166,7 @@ pub fn navPtrType(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) Allocator.
pub fn getExtern(pt: Zcu.PerThread, key: InternPool.Key.Extern) Allocator.Error!InternPool.Index {
const result = try pt.zcu.intern_pool.getExtern(pt.zcu.gpa, pt.tid, key);
if (result.new_nav.unwrap()) |nav| {
// This job depends on any resolve_type_fully jobs queued up before it.
try pt.zcu.comp.queueJob(.{ .codegen_nav = nav });
}
return result.index;

View File

@ -133,7 +133,7 @@ const Owner = union(enum) {
switch (owner) {
.nav_index => |nav_index| {
const elf_file = func.bin_file.cast(.elf).?;
return elf_file.zigObjectPtr().?.getOrCreateMetadataForNav(elf_file, nav_index);
return elf_file.zigObjectPtr().?.getOrCreateMetadataForNav(pt.zcu, nav_index);
},
.lazy_sym => |lazy_sym| {
const elf_file = func.bin_file.cast(.elf).?;
@ -5002,7 +5002,7 @@ fn genCall(
.func => |func_val| {
if (func.bin_file.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
const sym_index = try zo.getOrCreateMetadataForNav(elf_file, func_val.owner_nav);
const sym_index = try zo.getOrCreateMetadataForNav(zcu, func_val.owner_nav);
if (func.mod.pic) {
return func.fail("TODO: genCall pic", .{});

View File

@ -126,7 +126,7 @@ const Owner = union(enum) {
const pt = ctx.pt;
switch (owner) {
.nav_index => |nav_index| if (ctx.bin_file.cast(.elf)) |elf_file| {
return elf_file.zigObjectPtr().?.getOrCreateMetadataForNav(elf_file, nav_index);
return elf_file.zigObjectPtr().?.getOrCreateMetadataForNav(pt.zcu, nav_index);
} else if (ctx.bin_file.cast(.macho)) |macho_file| {
return macho_file.getZigObject().?.getOrCreateMetadataForNav(macho_file, nav_index);
} else if (ctx.bin_file.cast(.coff)) |coff_file| {
@ -12605,7 +12605,7 @@ fn genCall(self: *Self, info: union(enum) {
.func => |func| {
if (self.bin_file.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
const sym_index = try zo.getOrCreateMetadataForNav(elf_file, func.owner_nav);
const sym_index = try zo.getOrCreateMetadataForNav(zcu, func.owner_nav);
try self.asmImmediate(.{ ._, .call }, Immediate.rel(.{ .sym_index = sym_index }));
} else if (self.bin_file.cast(.coff)) |coff_file| {
const atom = try coff_file.getOrCreateAtomForNav(func.owner_nav);

View File

@ -866,7 +866,7 @@ fn genNavRef(
zo.symbol(sym_index).flags.is_extern_ptr = true;
return .{ .mcv = .{ .lea_symbol = sym_index } };
}
const sym_index = try zo.getOrCreateMetadataForNav(elf_file, nav_index);
const sym_index = try zo.getOrCreateMetadataForNav(zcu, nav_index);
if (!single_threaded and is_threadlocal) {
return .{ .mcv = .{ .load_tlv = sym_index } };
}

View File

@ -6,12 +6,14 @@ const fs = std.fs;
const path = fs.path;
const assert = std.debug.assert;
const Version = std.SemanticVersion;
const Path = std.Build.Cache.Path;
const Compilation = @import("Compilation.zig");
const build_options = @import("build_options");
const trace = @import("tracy.zig").trace;
const Cache = std.Build.Cache;
const Module = @import("Package/Module.zig");
const link = @import("link.zig");
pub const Lib = struct {
name: []const u8,
@ -717,11 +719,11 @@ fn lib_path(comp: *Compilation, arena: Allocator, sub_path: []const u8) ![]const
pub const BuiltSharedObjects = struct {
lock: Cache.Lock,
dir_path: []u8,
dir_path: Path,
pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator) void {
self.lock.release();
gpa.free(self.dir_path);
gpa.free(self.dir_path.sub_path);
self.* = undefined;
}
};
@ -742,7 +744,9 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
const gpa = comp.gpa;
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
@ -751,7 +755,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi
// Use the global cache directory.
var cache: Cache = .{
.gpa = comp.gpa,
.gpa = gpa,
.manifest_dir = try comp.global_cache_directory.handle.makeOpenPath("h", .{}),
};
cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
@ -772,12 +776,13 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi
if (try man.hit()) {
const digest = man.final();
assert(comp.glibc_so_files == null);
comp.glibc_so_files = BuiltSharedObjects{
return queueSharedObjects(comp, .{
.lock = man.toOwnedLock(),
.dir_path = try comp.global_cache_directory.join(comp.gpa, &.{ "o", &digest }),
};
return;
.dir_path = .{
.root_dir = comp.global_cache_directory,
.sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
},
});
}
const digest = man.final();
@ -790,8 +795,8 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi
defer o_directory.handle.close();
const abilists_contents = man.files.keys()[abilists_index].contents.?;
const metadata = try loadMetaData(comp.gpa, abilists_contents);
defer metadata.destroy(comp.gpa);
const metadata = try loadMetaData(gpa, abilists_contents);
defer metadata.destroy(gpa);
const target_targ_index = for (metadata.all_targets, 0..) |targ, i| {
if (targ.arch == target.cpu.arch and
@ -835,7 +840,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi
map_contents.deinit(); // The most recent allocation of an arena can be freed :)
}
var stubs_asm = std.ArrayList(u8).init(comp.gpa);
var stubs_asm = std.ArrayList(u8).init(gpa);
defer stubs_asm.deinit();
for (libs, 0..) |lib, lib_i| {
@ -1195,7 +1200,6 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi
var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "pthread", etc.
const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable;
try o_directory.handle.writeFile(.{ .sub_path = asm_file_basename, .data = stubs_asm.items });
try buildSharedLib(comp, arena, comp.global_cache_directory, o_directory, asm_file_basename, lib, prog_node);
}
@ -1203,14 +1207,45 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi
log.warn("failed to write cache manifest for glibc stubs: {s}", .{@errorName(err)});
};
assert(comp.glibc_so_files == null);
comp.glibc_so_files = BuiltSharedObjects{
return queueSharedObjects(comp, .{
.lock = man.toOwnedLock(),
.dir_path = try comp.global_cache_directory.join(comp.gpa, &.{ "o", &digest }),
};
.dir_path = .{
.root_dir = comp.global_cache_directory,
.sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
},
});
}
// zig fmt: on
fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
const target_version = comp.getTarget().os.version_range.linux.glibc;
assert(comp.glibc_so_files == null);
comp.glibc_so_files = so_files;
var task_buffer: [libs.len]link.Task = undefined;
var task_buffer_i: usize = 0;
{
comp.mutex.lock(); // protect comp.arena
defer comp.mutex.unlock();
for (libs) |lib| {
if (lib.removed_in) |rem_in| {
if (target_version.order(rem_in) != .lt) continue;
}
const so_path: Path = .{
.root_dir = so_files.dir_path.root_dir,
.sub_path = std.fmt.allocPrint(comp.arena, "{s}{c}lib{s}.so.{d}", .{
so_files.dir_path.sub_path, fs.path.sep, lib.name, lib.sover,
}) catch return comp.setAllocFailure(),
};
task_buffer[task_buffer_i] = .{ .load_dso = so_path };
task_buffer_i += 1;
}
}
comp.queueLinkTasks(task_buffer[0..task_buffer_i]);
}
fn buildSharedLib(
comp: *Compilation,

View File

@ -355,7 +355,9 @@ pub fn buildLibCXX(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
};
assert(comp.libcxx_static_lib == null);
comp.libcxx_static_lib = try sub_compilation.toCrtFile();
const crt_file = try sub_compilation.toCrtFile();
comp.libcxx_static_lib = crt_file;
comp.queueLinkTaskMode(crt_file.full_object_path, output_mode);
}
pub fn buildLibCXXABI(comp: *Compilation, prog_node: std.Progress.Node) BuildError!void {
@ -584,7 +586,9 @@ pub fn buildLibCXXABI(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
};
assert(comp.libcxxabi_static_lib == null);
comp.libcxxabi_static_lib = try sub_compilation.toCrtFile();
const crt_file = try sub_compilation.toCrtFile();
comp.libcxxabi_static_lib = crt_file;
comp.queueLinkTaskMode(crt_file.full_object_path, output_mode);
}
pub fn hardeningModeFlag(optimize_mode: std.builtin.OptimizeMode) []const u8 {

View File

@ -342,8 +342,10 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
},
};
const crt_file = try sub_compilation.toCrtFile();
comp.queueLinkTaskMode(crt_file.full_object_path, output_mode);
assert(comp.tsan_lib == null);
comp.tsan_lib = try sub_compilation.toCrtFile();
comp.tsan_lib = crt_file;
}
const tsan_sources = [_][]const u8{

View File

@ -199,8 +199,10 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
},
};
const crt_file = try sub_compilation.toCrtFile();
comp.queueLinkTaskMode(crt_file.full_object_path, output_mode);
assert(comp.libunwind_static_lib == null);
comp.libunwind_static_lib = try sub_compilation.toCrtFile();
comp.libunwind_static_lib = crt_file;
}
const unwind_src_list = [_][]const u8{

File diff suppressed because it is too large Load Diff

View File

@ -16,7 +16,7 @@ dynamicbase: bool,
/// default or populated together. They should not be separate fields.
major_subsystem_version: u16,
minor_subsystem_version: u16,
lib_dirs: []const []const u8,
lib_directories: []const Directory,
entry: link.File.OpenOptions.Entry,
entry_addr: ?u32,
module_definition_file: ?[]const u8,
@ -297,7 +297,7 @@ pub fn createEmpty(
.dynamicbase = options.dynamicbase,
.major_subsystem_version = options.major_subsystem_version orelse 6,
.minor_subsystem_version = options.minor_subsystem_version orelse 0,
.lib_dirs = options.lib_dirs,
.lib_directories = options.lib_directories,
.entry_addr = math.cast(u32, options.entry_addr orelse 0) orelse
return error.EntryAddressTooBig,
.module_definition_file = options.module_definition_file,
@ -2727,6 +2727,7 @@ const mem = std.mem;
const Allocator = std.mem.Allocator;
const Path = std.Build.Cache.Path;
const Directory = std.Build.Cache.Directory;
const codegen = @import("../codegen.zig");
const link = @import("../link.zig");

View File

@ -8,6 +8,7 @@ const log = std.log.scoped(.link);
const mem = std.mem;
const Cache = std.Build.Cache;
const Path = std.Build.Cache.Path;
const Directory = std.Build.Cache.Directory;
const mingw = @import("../../mingw.zig");
const link = @import("../../link.zig");
@ -74,10 +75,7 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
comptime assert(Compilation.link_hash_implementation_version == 14);
for (comp.objects) |obj| {
_ = try man.addFilePath(obj.path, null);
man.hash.add(obj.must_link);
}
try link.hashInputs(&man, comp.link_inputs);
for (comp.c_object_table.keys()) |key| {
_ = try man.addFilePath(key.status.success.object_path, null);
}
@ -88,7 +86,10 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
man.hash.addOptionalBytes(entry_name);
man.hash.add(self.base.stack_size);
man.hash.add(self.image_base);
man.hash.addListOfBytes(self.lib_dirs);
{
// TODO remove this, libraries must instead be resolved by the frontend.
for (self.lib_directories) |lib_directory| man.hash.addOptionalBytes(lib_directory.path);
}
man.hash.add(comp.skip_linker_dependencies);
if (comp.config.link_libc) {
man.hash.add(comp.libc_installation != null);
@ -100,7 +101,7 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
}
}
}
try link.hashAddSystemLibs(&man, comp.system_libs);
man.hash.addListOfBytes(comp.windows_libs.keys());
man.hash.addListOfBytes(comp.force_undefined_symbols.keys());
man.hash.addOptional(self.subsystem);
man.hash.add(comp.config.is_test);
@ -148,8 +149,7 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
// here. TODO: think carefully about how we can avoid this redundant operation when doing
// build-obj. See also the corresponding TODO in linkAsArchive.
const the_object_path = blk: {
if (comp.objects.len != 0)
break :blk comp.objects[0].path;
if (link.firstObjectInput(comp.link_inputs)) |obj| break :blk obj.path;
if (comp.c_object_table.count() != 0)
break :blk comp.c_object_table.keys()[0].status.success.object_path;
@ -266,18 +266,24 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
}
}
for (self.lib_dirs) |lib_dir| {
try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{lib_dir}));
for (self.lib_directories) |lib_directory| {
try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{lib_directory.path orelse "."}));
}
try argv.ensureUnusedCapacity(comp.objects.len);
for (comp.objects) |obj| {
if (obj.must_link) {
argv.appendAssumeCapacity(try allocPrint(arena, "-WHOLEARCHIVE:{}", .{@as(Path, obj.path)}));
} else {
argv.appendAssumeCapacity(try obj.path.toString(arena));
}
}
try argv.ensureUnusedCapacity(comp.link_inputs.len);
for (comp.link_inputs) |link_input| switch (link_input) {
.dso_exact => unreachable, // not applicable to PE/COFF
inline .dso, .res => |x| {
argv.appendAssumeCapacity(try x.path.toString(arena));
},
.object, .archive => |obj| {
if (obj.must_link) {
argv.appendAssumeCapacity(try allocPrint(arena, "-WHOLEARCHIVE:{}", .{@as(Path, obj.path)}));
} else {
argv.appendAssumeCapacity(try obj.path.toString(arena));
}
},
};
for (comp.c_object_table.keys()) |key| {
try argv.append(try key.status.success.object_path.toString(arena));
@ -484,20 +490,20 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
if (comp.compiler_rt_lib) |lib| try argv.append(try lib.full_object_path.toString(arena));
}
try argv.ensureUnusedCapacity(comp.system_libs.count());
for (comp.system_libs.keys()) |key| {
try argv.ensureUnusedCapacity(comp.windows_libs.count());
for (comp.windows_libs.keys()) |key| {
const lib_basename = try allocPrint(arena, "{s}.lib", .{key});
if (comp.crt_files.get(lib_basename)) |crt_file| {
argv.appendAssumeCapacity(try crt_file.full_object_path.toString(arena));
continue;
}
if (try findLib(arena, lib_basename, self.lib_dirs)) |full_path| {
if (try findLib(arena, lib_basename, self.lib_directories)) |full_path| {
argv.appendAssumeCapacity(full_path);
continue;
}
if (target.abi.isGnu()) {
const fallback_name = try allocPrint(arena, "lib{s}.dll.a", .{key});
if (try findLib(arena, fallback_name, self.lib_dirs)) |full_path| {
if (try findLib(arena, fallback_name, self.lib_directories)) |full_path| {
argv.appendAssumeCapacity(full_path);
continue;
}
@ -530,14 +536,13 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
}
}
fn findLib(arena: Allocator, name: []const u8, lib_dirs: []const []const u8) !?[]const u8 {
for (lib_dirs) |lib_dir| {
const full_path = try fs.path.join(arena, &.{ lib_dir, name });
fs.cwd().access(full_path, .{}) catch |err| switch (err) {
fn findLib(arena: Allocator, name: []const u8, lib_directories: []const Directory) !?[]const u8 {
for (lib_directories) |lib_directory| {
lib_directory.handle.access(name, .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => |e| return e,
};
return full_path;
return try lib_directory.join(arena, &.{name});
}
return null;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,29 +1,46 @@
objects: std.ArrayListUnmanaged(Object) = .empty,
strtab: std.ArrayListUnmanaged(u8) = .empty,
objects: []const Object,
/// '\n'-delimited
strtab: []const u8,
pub fn deinit(self: *Archive, allocator: Allocator) void {
self.objects.deinit(allocator);
self.strtab.deinit(allocator);
pub fn deinit(a: *Archive, gpa: Allocator) void {
gpa.free(a.objects);
gpa.free(a.strtab);
a.* = undefined;
}
pub fn parse(self: *Archive, elf_file: *Elf, path: Path, handle_index: File.HandleIndex) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const diags = &comp.link_diags;
const handle = elf_file.fileHandle(handle_index);
pub fn parse(
gpa: Allocator,
diags: *Diags,
file_handles: *const std.ArrayListUnmanaged(File.Handle),
path: Path,
handle_index: File.HandleIndex,
) !Archive {
const handle = file_handles.items[handle_index];
var pos: usize = 0;
{
var magic_buffer: [elf.ARMAG.len]u8 = undefined;
const n = try handle.preadAll(&magic_buffer, pos);
if (n != magic_buffer.len) return error.BadMagic;
if (!mem.eql(u8, &magic_buffer, elf.ARMAG)) return error.BadMagic;
pos += magic_buffer.len;
}
const size = (try handle.stat()).size;
var pos: usize = elf.ARMAG.len;
while (true) {
if (pos >= size) break;
if (!mem.isAligned(pos, 2)) pos += 1;
var objects: std.ArrayListUnmanaged(Object) = .empty;
defer objects.deinit(gpa);
var hdr_buffer: [@sizeOf(elf.ar_hdr)]u8 = undefined;
var strtab: std.ArrayListUnmanaged(u8) = .empty;
defer strtab.deinit(gpa);
while (pos < size) {
pos = mem.alignForward(usize, pos, 2);
var hdr: elf.ar_hdr = undefined;
{
const amt = try handle.preadAll(&hdr_buffer, pos);
if (amt != @sizeOf(elf.ar_hdr)) return error.InputOutput;
const n = try handle.preadAll(mem.asBytes(&hdr), pos);
if (n != @sizeOf(elf.ar_hdr)) return error.UnexpectedEndOfFile;
}
const hdr = @as(*align(1) const elf.ar_hdr, @ptrCast(&hdr_buffer)).*;
pos += @sizeOf(elf.ar_hdr);
if (!mem.eql(u8, &hdr.ar_fmag, elf.ARFMAG)) {
@ -37,8 +54,8 @@ pub fn parse(self: *Archive, elf_file: *Elf, path: Path, handle_index: File.Hand
if (hdr.isSymtab() or hdr.isSymtab64()) continue;
if (hdr.isStrtab()) {
try self.strtab.resize(gpa, obj_size);
const amt = try handle.preadAll(self.strtab.items, pos);
try strtab.resize(gpa, obj_size);
const amt = try handle.preadAll(strtab.items, pos);
if (amt != obj_size) return error.InputOutput;
continue;
}
@ -47,7 +64,7 @@ pub fn parse(self: *Archive, elf_file: *Elf, path: Path, handle_index: File.Hand
const name = if (hdr.name()) |name|
name
else if (try hdr.nameOffset()) |off|
self.getString(off)
stringTableLookup(strtab.items, off)
else
unreachable;
@ -70,14 +87,18 @@ pub fn parse(self: *Archive, elf_file: *Elf, path: Path, handle_index: File.Hand
@as(Path, object.path), @as(Path, path),
});
try self.objects.append(gpa, object);
try objects.append(gpa, object);
}
return .{
.objects = try objects.toOwnedSlice(gpa),
.strtab = try strtab.toOwnedSlice(gpa),
};
}
fn getString(self: Archive, off: u32) []const u8 {
assert(off < self.strtab.items.len);
const name = mem.sliceTo(@as([*:'\n']const u8, @ptrCast(self.strtab.items.ptr + off)), 0);
return name[0 .. name.len - 1];
pub fn stringTableLookup(strtab: []const u8, off: u32) [:'\n']const u8 {
const slice = strtab[off..];
return slice[0..mem.indexOfScalar(u8, slice, '\n').? :'\n'];
}
pub fn setArHdr(opts: struct {
@ -290,8 +311,9 @@ const fs = std.fs;
const log = std.log.scoped(.link);
const mem = std.mem;
const Path = std.Build.Cache.Path;
const Allocator = std.mem.Allocator;
const Allocator = mem.Allocator;
const Diags = @import("../../link.zig").Diags;
const Archive = @This();
const Elf = @import("../Elf.zig");
const File = @import("file.zig").File;

View File

@ -102,9 +102,13 @@ pub fn relocsShndx(self: Atom) ?u32 {
return self.relocs_section_index;
}
pub fn priority(self: Atom, elf_file: *Elf) u64 {
const index = self.file(elf_file).?.index();
return (@as(u64, @intCast(index)) << 32) | @as(u64, @intCast(self.input_section_index));
pub fn priority(atom: Atom, elf_file: *Elf) u64 {
const index = atom.file(elf_file).?.index();
return priorityLookup(index, atom.input_section_index);
}
pub fn priorityLookup(file_index: File.Index, input_section_index: u32) u64 {
return (@as(u64, @intCast(file_index)) << 32) | @as(u64, @intCast(input_section_index));
}
/// Returns how much room there is to grow in virtual address space.
@ -255,19 +259,13 @@ pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.ArrayList(elf.El
}
}
pub fn fdes(self: Atom, elf_file: *Elf) []Fde {
const extras = self.extra(elf_file);
return switch (self.file(elf_file).?) {
.shared_object => unreachable,
.linker_defined, .zig_object => &[0]Fde{},
.object => |x| x.fdes.items[extras.fde_start..][0..extras.fde_count],
};
pub fn fdes(atom: Atom, object: *Object) []Fde {
const extras = object.atomExtra(atom.extra_index);
return object.fdes.items[extras.fde_start..][0..extras.fde_count];
}
pub fn markFdesDead(self: Atom, elf_file: *Elf) void {
for (self.fdes(elf_file)) |*fde| {
fde.alive = false;
}
pub fn markFdesDead(self: Atom, object: *Object) void {
for (self.fdes(object)) |*fde| fde.alive = false;
}
pub fn addReloc(self: Atom, alloc: Allocator, reloc: elf.Elf64_Rela, zo: *ZigObject) !void {
@ -946,16 +944,21 @@ fn format2(
atom.output_section_index, atom.alignment.toByteUnits() orelse 0, atom.size,
atom.prev_atom_ref, atom.next_atom_ref,
});
if (atom.fdes(elf_file).len > 0) {
try writer.writeAll(" : fdes{ ");
const extras = atom.extra(elf_file);
for (atom.fdes(elf_file), extras.fde_start..) |fde, i| {
try writer.print("{d}", .{i});
if (!fde.alive) try writer.writeAll("([*])");
if (i - extras.fde_start < extras.fde_count - 1) try writer.writeAll(", ");
}
try writer.writeAll(" }");
}
if (atom.file(elf_file)) |atom_file| switch (atom_file) {
.object => |object| {
if (atom.fdes(object).len > 0) {
try writer.writeAll(" : fdes{ ");
const extras = atom.extra(elf_file);
for (atom.fdes(object), extras.fde_start..) |fde, i| {
try writer.print("{d}", .{i});
if (!fde.alive) try writer.writeAll("([*])");
if (i - extras.fde_start < extras.fde_count - 1) try writer.writeAll(", ");
}
try writer.writeAll(" }");
}
},
else => {},
};
if (!atom.alive) {
try writer.writeAll(" : [*]");
}

View File

@ -37,72 +37,84 @@ num_dynrelocs: u32 = 0,
output_symtab_ctx: Elf.SymtabCtx = .{},
output_ar_state: Archive.ArState = .{},
pub fn deinit(self: *Object, allocator: Allocator) void {
if (self.archive) |*ar| allocator.free(ar.path.sub_path);
allocator.free(self.path.sub_path);
self.shdrs.deinit(allocator);
self.symtab.deinit(allocator);
self.strtab.deinit(allocator);
self.symbols.deinit(allocator);
self.symbols_extra.deinit(allocator);
self.symbols_resolver.deinit(allocator);
self.atoms.deinit(allocator);
self.atoms_indexes.deinit(allocator);
self.atoms_extra.deinit(allocator);
self.comdat_groups.deinit(allocator);
self.comdat_group_data.deinit(allocator);
self.relocs.deinit(allocator);
self.fdes.deinit(allocator);
self.cies.deinit(allocator);
self.eh_frame_data.deinit(allocator);
pub fn deinit(self: *Object, gpa: Allocator) void {
if (self.archive) |*ar| gpa.free(ar.path.sub_path);
gpa.free(self.path.sub_path);
self.shdrs.deinit(gpa);
self.symtab.deinit(gpa);
self.strtab.deinit(gpa);
self.symbols.deinit(gpa);
self.symbols_extra.deinit(gpa);
self.symbols_resolver.deinit(gpa);
self.atoms.deinit(gpa);
self.atoms_indexes.deinit(gpa);
self.atoms_extra.deinit(gpa);
self.comdat_groups.deinit(gpa);
self.comdat_group_data.deinit(gpa);
self.relocs.deinit(gpa);
self.fdes.deinit(gpa);
self.cies.deinit(gpa);
self.eh_frame_data.deinit(gpa);
for (self.input_merge_sections.items) |*isec| {
isec.deinit(allocator);
isec.deinit(gpa);
}
self.input_merge_sections.deinit(allocator);
self.input_merge_sections_indexes.deinit(allocator);
self.input_merge_sections.deinit(gpa);
self.input_merge_sections_indexes.deinit(gpa);
}
pub fn parse(self: *Object, elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
const cpu_arch = elf_file.getTarget().cpu.arch;
const handle = elf_file.fileHandle(self.file_handle);
try self.parseCommon(gpa, handle, elf_file);
pub fn parse(
self: *Object,
gpa: Allocator,
diags: *Diags,
/// For error reporting purposes only.
path: Path,
handle: fs.File,
target: std.Target,
debug_fmt_strip: bool,
default_sym_version: elf.Versym,
) !void {
// Append null input merge section
try self.input_merge_sections.append(gpa, .{});
// Allocate atom index 0 to null atom
try self.atoms.append(gpa, .{ .extra_index = try self.addAtomExtra(gpa, .{}) });
try self.initAtoms(gpa, handle, elf_file);
try self.initSymbols(gpa, elf_file);
try self.initAtoms(gpa, diags, path, handle, debug_fmt_strip, target);
try self.initSymbols(gpa, default_sym_version);
for (self.shdrs.items, 0..) |shdr, i| {
const atom_ptr = self.atom(self.atoms_indexes.items[i]) orelse continue;
if (!atom_ptr.alive) continue;
if ((cpu_arch == .x86_64 and shdr.sh_type == elf.SHT_X86_64_UNWIND) or
mem.eql(u8, atom_ptr.name(elf_file), ".eh_frame"))
if ((target.cpu.arch == .x86_64 and shdr.sh_type == elf.SHT_X86_64_UNWIND) or
mem.eql(u8, self.getString(atom_ptr.name_offset), ".eh_frame"))
{
try self.parseEhFrame(gpa, handle, @as(u32, @intCast(i)), elf_file);
try self.parseEhFrame(gpa, handle, @intCast(i), target);
}
}
}
fn parseCommon(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file: *Elf) !void {
pub fn parseCommon(
self: *Object,
gpa: Allocator,
diags: *Diags,
path: Path,
handle: fs.File,
target: std.Target,
first_eflags: *?elf.Word,
) !void {
const offset = if (self.archive) |ar| ar.offset else 0;
const file_size = (try handle.stat()).size;
const header_buffer = try Elf.preadAllAlloc(allocator, handle, offset, @sizeOf(elf.Elf64_Ehdr));
defer allocator.free(header_buffer);
const header_buffer = try Elf.preadAllAlloc(gpa, handle, offset, @sizeOf(elf.Elf64_Ehdr));
defer gpa.free(header_buffer);
self.header = @as(*align(1) const elf.Elf64_Ehdr, @ptrCast(header_buffer)).*;
const em = elf_file.base.comp.root_mod.resolved_target.result.toElfMachine();
const em = target.toElfMachine();
if (em != self.header.?.e_machine) {
return elf_file.failFile(self.index, "invalid ELF machine type: {s}", .{
return diags.failParse(path, "invalid ELF machine type: {s}", .{
@tagName(self.header.?.e_machine),
});
}
try elf_file.validateEFlags(self.index, self.header.?.e_flags);
try validateEFlags(diags, path, target, self.header.?.e_flags, first_eflags);
if (self.header.?.e_shnum == 0) return;
@ -110,30 +122,30 @@ fn parseCommon(self: *Object, allocator: Allocator, handle: std.fs.File, elf_fil
const shnum = math.cast(usize, self.header.?.e_shnum) orelse return error.Overflow;
const shsize = shnum * @sizeOf(elf.Elf64_Shdr);
if (file_size < offset + shoff or file_size < offset + shoff + shsize) {
return elf_file.failFile(self.index, "corrupt header: section header table extends past the end of file", .{});
return diags.failParse(path, "corrupt header: section header table extends past the end of file", .{});
}
const shdrs_buffer = try Elf.preadAllAlloc(allocator, handle, offset + shoff, shsize);
defer allocator.free(shdrs_buffer);
const shdrs_buffer = try Elf.preadAllAlloc(gpa, handle, offset + shoff, shsize);
defer gpa.free(shdrs_buffer);
const shdrs = @as([*]align(1) const elf.Elf64_Shdr, @ptrCast(shdrs_buffer.ptr))[0..shnum];
try self.shdrs.appendUnalignedSlice(allocator, shdrs);
try self.shdrs.appendUnalignedSlice(gpa, shdrs);
for (self.shdrs.items) |shdr| {
if (shdr.sh_type != elf.SHT_NOBITS) {
if (file_size < offset + shdr.sh_offset or file_size < offset + shdr.sh_offset + shdr.sh_size) {
return elf_file.failFile(self.index, "corrupt section: extends past the end of file", .{});
return diags.failParse(path, "corrupt section: extends past the end of file", .{});
}
}
}
const shstrtab = try self.preadShdrContentsAlloc(allocator, handle, self.header.?.e_shstrndx);
defer allocator.free(shstrtab);
const shstrtab = try self.preadShdrContentsAlloc(gpa, handle, self.header.?.e_shstrndx);
defer gpa.free(shstrtab);
for (self.shdrs.items) |shdr| {
if (shdr.sh_name >= shstrtab.len) {
return elf_file.failFile(self.index, "corrupt section name offset", .{});
return diags.failParse(path, "corrupt section name offset", .{});
}
}
try self.strtab.appendSlice(allocator, shstrtab);
try self.strtab.appendSlice(gpa, shstrtab);
const symtab_index = for (self.shdrs.items, 0..) |shdr, i| switch (shdr.sh_type) {
elf.SHT_SYMTAB => break @as(u32, @intCast(i)),
@ -144,19 +156,19 @@ fn parseCommon(self: *Object, allocator: Allocator, handle: std.fs.File, elf_fil
const shdr = self.shdrs.items[index];
self.first_global = shdr.sh_info;
const raw_symtab = try self.preadShdrContentsAlloc(allocator, handle, index);
defer allocator.free(raw_symtab);
const raw_symtab = try self.preadShdrContentsAlloc(gpa, handle, index);
defer gpa.free(raw_symtab);
const nsyms = math.divExact(usize, raw_symtab.len, @sizeOf(elf.Elf64_Sym)) catch {
return elf_file.failFile(self.index, "symbol table not evenly divisible", .{});
return diags.failParse(path, "symbol table not evenly divisible", .{});
};
const symtab = @as([*]align(1) const elf.Elf64_Sym, @ptrCast(raw_symtab.ptr))[0..nsyms];
const strtab_bias = @as(u32, @intCast(self.strtab.items.len));
const strtab = try self.preadShdrContentsAlloc(allocator, handle, shdr.sh_link);
defer allocator.free(strtab);
try self.strtab.appendSlice(allocator, strtab);
const strtab = try self.preadShdrContentsAlloc(gpa, handle, shdr.sh_link);
defer gpa.free(strtab);
try self.strtab.appendSlice(gpa, strtab);
try self.symtab.ensureUnusedCapacity(allocator, symtab.len);
try self.symtab.ensureUnusedCapacity(gpa, symtab.len);
for (symtab) |sym| {
const out_sym = self.symtab.addOneAssumeCapacity();
out_sym.* = sym;
@ -168,15 +180,56 @@ fn parseCommon(self: *Object, allocator: Allocator, handle: std.fs.File, elf_fil
}
}
fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file: *Elf) !void {
const comp = elf_file.base.comp;
const debug_fmt_strip = comp.config.debug_format == .strip;
const target = comp.root_mod.resolved_target.result;
fn validateEFlags(
diags: *Diags,
path: Path,
target: std.Target,
e_flags: elf.Word,
first_eflags: *?elf.Word,
) error{LinkFailure}!void {
if (first_eflags.*) |*self_eflags| {
switch (target.cpu.arch) {
.riscv64 => {
if (e_flags != self_eflags.*) {
const riscv_eflags: riscv.RiscvEflags = @bitCast(e_flags);
const self_riscv_eflags: *riscv.RiscvEflags = @ptrCast(self_eflags);
self_riscv_eflags.rvc = self_riscv_eflags.rvc or riscv_eflags.rvc;
self_riscv_eflags.tso = self_riscv_eflags.tso or riscv_eflags.tso;
var any_errors: bool = false;
if (self_riscv_eflags.fabi != riscv_eflags.fabi) {
any_errors = true;
diags.addParseError(path, "cannot link object files with different float-point ABIs", .{});
}
if (self_riscv_eflags.rve != riscv_eflags.rve) {
any_errors = true;
diags.addParseError(path, "cannot link object files with different RVEs", .{});
}
if (any_errors) return error.LinkFailure;
}
},
else => {},
}
} else {
first_eflags.* = e_flags;
}
}
fn initAtoms(
self: *Object,
gpa: Allocator,
diags: *Diags,
path: Path,
handle: fs.File,
debug_fmt_strip: bool,
target: std.Target,
) !void {
const shdrs = self.shdrs.items;
try self.atoms.ensureTotalCapacityPrecise(allocator, shdrs.len);
try self.atoms_extra.ensureTotalCapacityPrecise(allocator, shdrs.len * @sizeOf(Atom.Extra));
try self.atoms_indexes.ensureTotalCapacityPrecise(allocator, shdrs.len);
try self.atoms_indexes.resize(allocator, shdrs.len);
try self.atoms.ensureTotalCapacityPrecise(gpa, shdrs.len);
try self.atoms_extra.ensureTotalCapacityPrecise(gpa, shdrs.len * @sizeOf(Atom.Extra));
try self.atoms_indexes.ensureTotalCapacityPrecise(gpa, shdrs.len);
try self.atoms_indexes.resize(gpa, shdrs.len);
@memset(self.atoms_indexes.items, 0);
for (shdrs, 0..) |shdr, i| {
@ -201,24 +254,24 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
};
const shndx: u32 = @intCast(i);
const group_raw_data = try self.preadShdrContentsAlloc(allocator, handle, shndx);
defer allocator.free(group_raw_data);
const group_raw_data = try self.preadShdrContentsAlloc(gpa, handle, shndx);
defer gpa.free(group_raw_data);
const group_nmembers = math.divExact(usize, group_raw_data.len, @sizeOf(u32)) catch {
return elf_file.failFile(self.index, "corrupt section group: not evenly divisible ", .{});
return diags.failParse(path, "corrupt section group: not evenly divisible ", .{});
};
if (group_nmembers == 0) {
return elf_file.failFile(self.index, "corrupt section group: empty section", .{});
return diags.failParse(path, "corrupt section group: empty section", .{});
}
const group_members = @as([*]align(1) const u32, @ptrCast(group_raw_data.ptr))[0..group_nmembers];
if (group_members[0] != elf.GRP_COMDAT) {
return elf_file.failFile(self.index, "corrupt section group: unknown SHT_GROUP format", .{});
return diags.failParse(path, "corrupt section group: unknown SHT_GROUP format", .{});
}
const group_start: u32 = @intCast(self.comdat_group_data.items.len);
try self.comdat_group_data.appendUnalignedSlice(allocator, group_members[1..]);
try self.comdat_group_data.appendUnalignedSlice(gpa, group_members[1..]);
const comdat_group_index = try self.addComdatGroup(allocator);
const comdat_group_index = try self.addComdatGroup(gpa);
const comdat_group = self.comdatGroup(comdat_group_index);
comdat_group.* = .{
.signature_off = group_signature,
@ -242,8 +295,8 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
const shndx: u32 = @intCast(i);
if (self.skipShdr(shndx, debug_fmt_strip)) continue;
const size, const alignment = if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) blk: {
const data = try self.preadShdrContentsAlloc(allocator, handle, shndx);
defer allocator.free(data);
const data = try self.preadShdrContentsAlloc(gpa, handle, shndx);
defer gpa.free(data);
const chdr = @as(*align(1) const elf.Elf64_Chdr, @ptrCast(data.ptr)).*;
break :blk .{ chdr.ch_size, Alignment.fromNonzeroByteUnits(chdr.ch_addralign) };
} else .{ shdr.sh_size, Alignment.fromNonzeroByteUnits(shdr.sh_addralign) };
@ -263,13 +316,13 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
elf.SHT_REL, elf.SHT_RELA => {
const atom_index = self.atoms_indexes.items[shdr.sh_info];
if (self.atom(atom_index)) |atom_ptr| {
const relocs = try self.preadRelocsAlloc(allocator, handle, @intCast(i));
defer allocator.free(relocs);
const relocs = try self.preadRelocsAlloc(gpa, handle, @intCast(i));
defer gpa.free(relocs);
atom_ptr.relocs_section_index = @intCast(i);
const rel_index: u32 = @intCast(self.relocs.items.len);
const rel_count: u32 = @intCast(relocs.len);
self.setAtomFields(atom_ptr, .{ .rel_index = rel_index, .rel_count = rel_count });
try self.relocs.appendUnalignedSlice(allocator, relocs);
try self.relocs.appendUnalignedSlice(gpa, relocs);
if (target.cpu.arch == .riscv64) {
sortRelocs(self.relocs.items[rel_index..][0..rel_count]);
}
@ -293,14 +346,18 @@ fn skipShdr(self: *Object, index: u32, debug_fmt_strip: bool) bool {
return ignore;
}
fn initSymbols(self: *Object, allocator: Allocator, elf_file: *Elf) !void {
fn initSymbols(
self: *Object,
gpa: Allocator,
default_sym_version: elf.Versym,
) !void {
const first_global = self.first_global orelse self.symtab.items.len;
const nglobals = self.symtab.items.len - first_global;
try self.symbols.ensureTotalCapacityPrecise(allocator, self.symtab.items.len);
try self.symbols_extra.ensureTotalCapacityPrecise(allocator, self.symtab.items.len * @sizeOf(Symbol.Extra));
try self.symbols_resolver.ensureTotalCapacityPrecise(allocator, nglobals);
self.symbols_resolver.resize(allocator, nglobals) catch unreachable;
try self.symbols.ensureTotalCapacityPrecise(gpa, self.symtab.items.len);
try self.symbols_extra.ensureTotalCapacityPrecise(gpa, self.symtab.items.len * @sizeOf(Symbol.Extra));
try self.symbols_resolver.ensureTotalCapacityPrecise(gpa, nglobals);
self.symbols_resolver.resize(gpa, nglobals) catch unreachable;
@memset(self.symbols_resolver.items, 0);
for (self.symtab.items, 0..) |sym, i| {
@ -310,7 +367,7 @@ fn initSymbols(self: *Object, allocator: Allocator, elf_file: *Elf) !void {
sym_ptr.name_offset = sym.st_name;
sym_ptr.esym_index = @intCast(i);
sym_ptr.extra_index = self.addSymbolExtraAssumeCapacity(.{});
sym_ptr.version_index = if (i >= first_global) elf_file.default_sym_version else .LOCAL;
sym_ptr.version_index = if (i >= first_global) default_sym_version else .LOCAL;
sym_ptr.flags.weak = sym.st_bind() == elf.STB_WEAK;
if (sym.st_shndx != elf.SHN_ABS and sym.st_shndx != elf.SHN_COMMON) {
sym_ptr.ref = .{ .index = self.atoms_indexes.items[sym.st_shndx], .file = self.index };
@ -318,24 +375,30 @@ fn initSymbols(self: *Object, allocator: Allocator, elf_file: *Elf) !void {
}
}
fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx: u32, elf_file: *Elf) !void {
fn parseEhFrame(
self: *Object,
gpa: Allocator,
handle: fs.File,
shndx: u32,
target: std.Target,
) !void {
const relocs_shndx = for (self.shdrs.items, 0..) |shdr, i| switch (shdr.sh_type) {
elf.SHT_RELA => if (shdr.sh_info == shndx) break @as(u32, @intCast(i)),
else => {},
} else null;
const raw = try self.preadShdrContentsAlloc(allocator, handle, shndx);
defer allocator.free(raw);
const data_start = @as(u32, @intCast(self.eh_frame_data.items.len));
try self.eh_frame_data.appendSlice(allocator, raw);
const raw = try self.preadShdrContentsAlloc(gpa, handle, shndx);
defer gpa.free(raw);
const data_start: u32 = @intCast(self.eh_frame_data.items.len);
try self.eh_frame_data.appendSlice(gpa, raw);
const relocs = if (relocs_shndx) |index|
try self.preadRelocsAlloc(allocator, handle, index)
try self.preadRelocsAlloc(gpa, handle, index)
else
&[0]elf.Elf64_Rela{};
defer allocator.free(relocs);
const rel_start = @as(u32, @intCast(self.relocs.items.len));
try self.relocs.appendUnalignedSlice(allocator, relocs);
if (elf_file.getTarget().cpu.arch == .riscv64) {
defer gpa.free(relocs);
const rel_start: u32 = @intCast(self.relocs.items.len);
try self.relocs.appendUnalignedSlice(gpa, relocs);
if (target.cpu.arch == .riscv64) {
sortRelocs(self.relocs.items[rel_start..][0..relocs.len]);
}
const fdes_start = self.fdes.items.len;
@ -345,11 +408,11 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx:
while (try it.next()) |rec| {
const rel_range = filterRelocs(self.relocs.items[rel_start..][0..relocs.len], rec.offset, rec.size + 4);
switch (rec.tag) {
.cie => try self.cies.append(allocator, .{
.cie => try self.cies.append(gpa, .{
.offset = data_start + rec.offset,
.size = rec.size,
.rel_index = rel_start + @as(u32, @intCast(rel_range.start)),
.rel_num = @as(u32, @intCast(rel_range.len)),
.rel_num = @intCast(rel_range.len),
.input_section_index = shndx,
.file_index = self.index,
}),
@ -361,12 +424,12 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx:
// this can happen for object files built with -r flag by the linker.
continue;
}
try self.fdes.append(allocator, .{
try self.fdes.append(gpa, .{
.offset = data_start + rec.offset,
.size = rec.size,
.cie_index = undefined,
.rel_index = rel_start + @as(u32, @intCast(rel_range.start)),
.rel_num = @as(u32, @intCast(rel_range.len)),
.rel_num = @intCast(rel_range.len),
.input_section_index = shndx,
.file_index = self.index,
});
@ -376,7 +439,7 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx:
// Tie each FDE to its CIE
for (self.fdes.items[fdes_start..]) |*fde| {
const cie_ptr = fde.offset + 4 - fde.ciePointer(elf_file);
const cie_ptr = fde.offset + 4 - fde.ciePointer(self);
const cie_index = for (self.cies.items[cies_start..], cies_start..) |cie, cie_index| {
if (cie.offset == cie_ptr) break @as(u32, @intCast(cie_index));
} else {
@ -392,26 +455,26 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx:
// Tie each FDE record to its matching atom
const SortFdes = struct {
pub fn lessThan(ctx: *Elf, lhs: Fde, rhs: Fde) bool {
pub fn lessThan(ctx: *Object, lhs: Fde, rhs: Fde) bool {
const lhs_atom = lhs.atom(ctx);
const rhs_atom = rhs.atom(ctx);
return lhs_atom.priority(ctx) < rhs_atom.priority(ctx);
return Atom.priorityLookup(ctx.index, lhs_atom.input_section_index) < Atom.priorityLookup(ctx.index, rhs_atom.input_section_index);
}
};
mem.sort(Fde, self.fdes.items[fdes_start..], elf_file, SortFdes.lessThan);
mem.sort(Fde, self.fdes.items[fdes_start..], self, SortFdes.lessThan);
// Create a back-link from atom to FDEs
var i: u32 = @as(u32, @intCast(fdes_start));
var i: u32 = @intCast(fdes_start);
while (i < self.fdes.items.len) {
const fde = self.fdes.items[i];
const atom_ptr = fde.atom(elf_file);
const atom_ptr = fde.atom(self);
const start = i;
i += 1;
while (i < self.fdes.items.len) : (i += 1) {
const next_fde = self.fdes.items[i];
if (atom_ptr.atom_index != next_fde.atom(elf_file).atom_index) break;
if (atom_ptr.atom_index != next_fde.atom(self).atom_index) break;
}
atom_ptr.addExtra(.{ .fde_start = start, .fde_count = i - start }, elf_file);
self.setAtomFields(atom_ptr, .{ .fde_start = start, .fde_count = i - start });
}
}
@ -904,7 +967,7 @@ pub fn markComdatGroupsDead(self: *Object, elf_file: *Elf) void {
const atom_index = self.atoms_indexes.items[shndx];
if (self.atom(atom_index)) |atom_ptr| {
atom_ptr.alive = false;
atom_ptr.markFdesDead(elf_file);
atom_ptr.markFdesDead(self);
}
}
}
@ -970,12 +1033,6 @@ pub fn addAtomsToRelaSections(self: *Object, elf_file: *Elf) !void {
}
}
pub fn parseAr(self: *Object, elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
const handle = elf_file.fileHandle(self.file_handle);
try self.parseCommon(gpa, handle, elf_file);
}
pub fn updateArSymtab(self: Object, ar_symtab: *Archive.ArSymtab, elf_file: *Elf) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
@ -1000,7 +1057,7 @@ pub fn updateArSize(self: *Object, elf_file: *Elf) !void {
pub fn writeAr(self: Object, elf_file: *Elf, writer: anytype) !void {
const size = std.math.cast(usize, self.output_ar_state.size) orelse return error.Overflow;
const offset: u64 = if (self.archive) |ar| ar.offset else 0;
const name = std.fs.path.basename(self.path.sub_path);
const name = fs.path.basename(self.path.sub_path);
const hdr = Archive.setArHdr(.{
.name = if (name.len <= Archive.max_member_name_len)
.{ .name = name }
@ -1136,8 +1193,8 @@ pub fn resolveSymbol(self: Object, index: Symbol.Index, elf_file: *Elf) Elf.Ref
return elf_file.resolver.get(resolv).?;
}
fn addSymbol(self: *Object, allocator: Allocator) !Symbol.Index {
try self.symbols.ensureUnusedCapacity(allocator, 1);
fn addSymbol(self: *Object, gpa: Allocator) !Symbol.Index {
try self.symbols.ensureUnusedCapacity(gpa, 1);
return self.addSymbolAssumeCapacity();
}
@ -1147,9 +1204,9 @@ fn addSymbolAssumeCapacity(self: *Object) Symbol.Index {
return index;
}
pub fn addSymbolExtra(self: *Object, allocator: Allocator, extra: Symbol.Extra) !u32 {
pub fn addSymbolExtra(self: *Object, gpa: Allocator, extra: Symbol.Extra) !u32 {
const fields = @typeInfo(Symbol.Extra).@"struct".fields;
try self.symbols_extra.ensureUnusedCapacity(allocator, fields.len);
try self.symbols_extra.ensureUnusedCapacity(gpa, fields.len);
return self.addSymbolExtraAssumeCapacity(extra);
}
@ -1198,27 +1255,27 @@ pub fn getString(self: Object, off: u32) [:0]const u8 {
return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.strtab.items.ptr + off)), 0);
}
fn addString(self: *Object, allocator: Allocator, str: []const u8) !u32 {
fn addString(self: *Object, gpa: Allocator, str: []const u8) !u32 {
const off: u32 = @intCast(self.strtab.items.len);
try self.strtab.ensureUnusedCapacity(allocator, str.len + 1);
try self.strtab.ensureUnusedCapacity(gpa, str.len + 1);
self.strtab.appendSliceAssumeCapacity(str);
self.strtab.appendAssumeCapacity(0);
return off;
}
/// Caller owns the memory.
fn preadShdrContentsAlloc(self: Object, allocator: Allocator, handle: std.fs.File, index: u32) ![]u8 {
fn preadShdrContentsAlloc(self: Object, gpa: Allocator, handle: fs.File, index: u32) ![]u8 {
assert(index < self.shdrs.items.len);
const offset = if (self.archive) |ar| ar.offset else 0;
const shdr = self.shdrs.items[index];
const sh_offset = math.cast(u64, shdr.sh_offset) orelse return error.Overflow;
const sh_size = math.cast(u64, shdr.sh_size) orelse return error.Overflow;
return Elf.preadAllAlloc(allocator, handle, offset + sh_offset, sh_size);
return Elf.preadAllAlloc(gpa, handle, offset + sh_offset, sh_size);
}
/// Caller owns the memory.
fn preadRelocsAlloc(self: Object, allocator: Allocator, handle: std.fs.File, shndx: u32) ![]align(1) const elf.Elf64_Rela {
const raw = try self.preadShdrContentsAlloc(allocator, handle, shndx);
fn preadRelocsAlloc(self: Object, gpa: Allocator, handle: fs.File, shndx: u32) ![]align(1) const elf.Elf64_Rela {
const raw = try self.preadShdrContentsAlloc(gpa, handle, shndx);
const num = @divExact(raw.len, @sizeOf(elf.Elf64_Rela));
return @as([*]align(1) const elf.Elf64_Rela, @ptrCast(raw.ptr))[0..num];
}
@ -1230,9 +1287,9 @@ const AddAtomArgs = struct {
alignment: Alignment,
};
fn addAtom(self: *Object, allocator: Allocator, args: AddAtomArgs) !Atom.Index {
try self.atoms.ensureUnusedCapacity(allocator, 1);
try self.atoms_extra.ensureUnusedCapacity(allocator, @sizeOf(Atom.Extra));
fn addAtom(self: *Object, gpa: Allocator, args: AddAtomArgs) !Atom.Index {
try self.atoms.ensureUnusedCapacity(gpa, 1);
try self.atoms_extra.ensureUnusedCapacity(gpa, @sizeOf(Atom.Extra));
return self.addAtomAssumeCapacity(args);
}
@ -1257,9 +1314,9 @@ pub fn atom(self: *Object, atom_index: Atom.Index) ?*Atom {
return &self.atoms.items[atom_index];
}
pub fn addAtomExtra(self: *Object, allocator: Allocator, extra: Atom.Extra) !u32 {
pub fn addAtomExtra(self: *Object, gpa: Allocator, extra: Atom.Extra) !u32 {
const fields = @typeInfo(Atom.Extra).@"struct".fields;
try self.atoms_extra.ensureUnusedCapacity(allocator, fields.len);
try self.atoms_extra.ensureUnusedCapacity(gpa, fields.len);
return self.addAtomExtraAssumeCapacity(extra);
}
@ -1308,9 +1365,9 @@ fn setAtomFields(o: *Object, atom_ptr: *Atom, opts: Atom.Extra.AsOptionals) void
o.setAtomExtra(atom_ptr.extra_index, extras);
}
fn addInputMergeSection(self: *Object, allocator: Allocator) !Merge.InputSection.Index {
fn addInputMergeSection(self: *Object, gpa: Allocator) !Merge.InputSection.Index {
const index: Merge.InputSection.Index = @intCast(self.input_merge_sections.items.len);
const msec = try self.input_merge_sections.addOne(allocator);
const msec = try self.input_merge_sections.addOne(gpa);
msec.* = .{};
return index;
}
@ -1320,9 +1377,9 @@ fn inputMergeSection(self: *Object, index: Merge.InputSection.Index) ?*Merge.Inp
return &self.input_merge_sections.items[index];
}
fn addComdatGroup(self: *Object, allocator: Allocator) !Elf.ComdatGroup.Index {
fn addComdatGroup(self: *Object, gpa: Allocator) !Elf.ComdatGroup.Index {
const index = @as(Elf.ComdatGroup.Index, @intCast(self.comdat_groups.items.len));
_ = try self.comdat_groups.addOne(allocator);
_ = try self.comdat_groups.addOne(gpa);
return index;
}
@ -1516,8 +1573,9 @@ const log = std.log.scoped(.link);
const math = std.math;
const mem = std.mem;
const Path = std.Build.Cache.Path;
const Allocator = mem.Allocator;
const Allocator = std.mem.Allocator;
const Diags = @import("../../link.zig").Diags;
const Archive = @import("Archive.zig");
const Atom = @import("Atom.zig");
const AtomList = @import("AtomList.zig");
@ -1528,3 +1586,4 @@ const File = @import("file.zig").File;
const Merge = @import("Merge.zig");
const Symbol = @import("Symbol.zig");
const Alignment = Atom.Alignment;
const riscv = @import("../riscv.zig");

View File

@ -928,7 +928,7 @@ pub fn getNavVAddr(
nav.name.toSlice(ip),
@"extern".lib_name.toSlice(ip),
),
else => try self.getOrCreateMetadataForNav(elf_file, nav_index),
else => try self.getOrCreateMetadataForNav(zcu, nav_index),
};
const this_sym = self.symbol(this_sym_index);
const vaddr = this_sym.address(.{}, elf_file);
@ -1102,21 +1102,15 @@ pub fn freeNav(self: *ZigObject, elf_file: *Elf, nav_index: InternPool.Nav.Index
}
}
pub fn getOrCreateMetadataForNav(
self: *ZigObject,
elf_file: *Elf,
nav_index: InternPool.Nav.Index,
) !Symbol.Index {
const gpa = elf_file.base.comp.gpa;
pub fn getOrCreateMetadataForNav(self: *ZigObject, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Symbol.Index {
const gpa = zcu.gpa;
const gop = try self.navs.getOrPut(gpa, nav_index);
if (!gop.found_existing) {
const any_non_single_threaded = elf_file.base.comp.config.any_non_single_threaded;
const symbol_index = try self.newSymbolWithAtom(gpa, 0);
const zcu = elf_file.base.comp.zcu.?;
const nav_val = Value.fromInterned(zcu.intern_pool.getNav(nav_index).status.resolved.val);
const sym = self.symbol(symbol_index);
if (nav_val.getVariable(zcu)) |variable| {
if (variable.is_threadlocal and any_non_single_threaded) {
if (variable.is_threadlocal and zcu.comp.config.any_non_single_threaded) {
sym.flags.is_tls = true;
}
}
@ -1425,8 +1419,8 @@ pub fn updateFunc(
log.debug("updateFunc {}({d})", .{ ip.getNav(func.owner_nav).fqn.fmt(ip), func.owner_nav });
const sym_index = try self.getOrCreateMetadataForNav(elf_file, func.owner_nav);
self.symbol(sym_index).atom(elf_file).?.freeRelocs(self);
const sym_index = try self.getOrCreateMetadataForNav(zcu, func.owner_nav);
self.atom(self.symbol(sym_index).ref.index).?.freeRelocs(self);
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
@ -1460,12 +1454,12 @@ pub fn updateFunc(
ip.getNav(func.owner_nav).fqn.fmt(ip),
});
const old_rva, const old_alignment = blk: {
const atom_ptr = self.symbol(sym_index).atom(elf_file).?;
const atom_ptr = self.atom(self.symbol(sym_index).ref.index).?;
break :blk .{ atom_ptr.value, atom_ptr.alignment };
};
try self.updateNavCode(elf_file, pt, func.owner_nav, sym_index, shndx, code, elf.STT_FUNC);
const new_rva, const new_alignment = blk: {
const atom_ptr = self.symbol(sym_index).atom(elf_file).?;
const atom_ptr = self.atom(self.symbol(sym_index).ref.index).?;
break :blk .{ atom_ptr.value, atom_ptr.alignment };
};
@ -1477,7 +1471,7 @@ pub fn updateFunc(
.{
.index = sym_index,
.addr = @intCast(sym.address(.{}, elf_file)),
.size = sym.atom(elf_file).?.size,
.size = self.atom(sym.ref.index).?.size,
},
wip_nav,
);
@ -1500,7 +1494,7 @@ pub fn updateFunc(
});
defer gpa.free(name);
const osec = if (self.text_index) |sect_sym_index|
self.symbol(sect_sym_index).atom(elf_file).?.output_section_index
self.atom(self.symbol(sect_sym_index).ref.index).?.output_section_index
else osec: {
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".text"),
@ -1565,7 +1559,7 @@ pub fn updateNav(
};
if (nav_init != .none and Value.fromInterned(nav_init).typeOf(zcu).hasRuntimeBits(zcu)) {
const sym_index = try self.getOrCreateMetadataForNav(elf_file, nav_index);
const sym_index = try self.getOrCreateMetadataForNav(zcu, nav_index);
self.symbol(sym_index).atom(elf_file).?.freeRelocs(self);
var code_buffer = std.ArrayList(u8).init(zcu.gpa);
@ -1789,7 +1783,7 @@ pub fn updateExports(
const gpa = elf_file.base.comp.gpa;
const metadata = switch (exported) {
.nav => |nav| blk: {
_ = try self.getOrCreateMetadataForNav(elf_file, nav);
_ = try self.getOrCreateMetadataForNav(zcu, nav);
break :blk self.navs.getPtr(nav).?;
},
.uav => |uav| self.uavs.getPtr(uav) orelse blk: {

View File

@ -19,18 +19,16 @@ pub const Fde = struct {
return base + fde.out_offset;
}
pub fn data(fde: Fde, elf_file: *Elf) []u8 {
const object = elf_file.file(fde.file_index).?.object;
pub fn data(fde: Fde, object: *Object) []u8 {
return object.eh_frame_data.items[fde.offset..][0..fde.calcSize()];
}
pub fn cie(fde: Fde, elf_file: *Elf) Cie {
const object = elf_file.file(fde.file_index).?.object;
pub fn cie(fde: Fde, object: *Object) Cie {
return object.cies.items[fde.cie_index];
}
pub fn ciePointer(fde: Fde, elf_file: *Elf) u32 {
const fde_data = fde.data(elf_file);
pub fn ciePointer(fde: Fde, object: *Object) u32 {
const fde_data = fde.data(object);
return std.mem.readInt(u32, fde_data[4..8], .little);
}
@ -38,16 +36,14 @@ pub const Fde = struct {
return fde.size + 4;
}
pub fn atom(fde: Fde, elf_file: *Elf) *Atom {
const object = elf_file.file(fde.file_index).?.object;
const rel = fde.relocs(elf_file)[0];
pub fn atom(fde: Fde, object: *Object) *Atom {
const rel = fde.relocs(object)[0];
const sym = object.symtab.items[rel.r_sym()];
const atom_index = object.atoms_indexes.items[sym.st_shndx];
return object.atom(atom_index).?;
}
pub fn relocs(fde: Fde, elf_file: *Elf) []align(1) const elf.Elf64_Rela {
const object = elf_file.file(fde.file_index).?.object;
pub fn relocs(fde: Fde, object: *Object) []const elf.Elf64_Rela {
return object.relocs.items[fde.rel_index..][0..fde.rel_num];
}
@ -87,7 +83,8 @@ pub const Fde = struct {
const fde = ctx.fde;
const elf_file = ctx.elf_file;
const base_addr = fde.address(elf_file);
const atom_name = fde.atom(elf_file).name(elf_file);
const object = elf_file.file(fde.file_index).?.object;
const atom_name = fde.atom(object).name(elf_file);
try writer.print("@{x} : size({x}) : cie({d}) : {s}", .{
base_addr + fde.out_offset,
fde.calcSize(),
@ -306,7 +303,7 @@ pub fn calcEhFrameRelocs(elf_file: *Elf) usize {
}
for (object.fdes.items) |fde| {
if (!fde.alive) continue;
count += fde.relocs(elf_file).len;
count += fde.relocs(object).len;
}
}
return count;
@ -369,16 +366,16 @@ pub fn writeEhFrame(elf_file: *Elf, writer: anytype) !void {
for (object.fdes.items) |fde| {
if (!fde.alive) continue;
const contents = fde.data(elf_file);
const contents = fde.data(object);
std.mem.writeInt(
i32,
contents[4..8],
@truncate(@as(i64, @intCast(fde.out_offset + 4)) - @as(i64, @intCast(fde.cie(elf_file).out_offset))),
@truncate(@as(i64, @intCast(fde.out_offset + 4)) - @as(i64, @intCast(fde.cie(object).out_offset))),
.little,
);
for (fde.relocs(elf_file)) |rel| {
for (fde.relocs(object)) |rel| {
const ref = object.resolveSymbol(rel.r_sym(), elf_file);
const sym = elf_file.symbol(ref).?;
resolveReloc(fde, sym, rel, elf_file, contents) catch |err| switch (err) {
@ -412,12 +409,12 @@ pub fn writeEhFrameRelocatable(elf_file: *Elf, writer: anytype) !void {
for (object.fdes.items) |fde| {
if (!fde.alive) continue;
const contents = fde.data(elf_file);
const contents = fde.data(object);
std.mem.writeInt(
i32,
contents[4..8],
@truncate(@as(i64, @intCast(fde.out_offset + 4)) - @as(i64, @intCast(fde.cie(elf_file).out_offset))),
@truncate(@as(i64, @intCast(fde.out_offset + 4)) - @as(i64, @intCast(fde.cie(object).out_offset))),
.little,
);
@ -490,7 +487,7 @@ pub fn writeEhFrameRelocs(elf_file: *Elf, writer: anytype) !void {
for (object.fdes.items) |fde| {
if (!fde.alive) continue;
for (fde.relocs(elf_file)) |rel| {
for (fde.relocs(object)) |rel| {
const ref = object.resolveSymbol(rel.r_sym(), elf_file);
const sym = elf_file.symbol(ref).?;
const r_offset = fde.address(elf_file) + rel.r_offset - fde.offset;
@ -548,7 +545,7 @@ pub fn writeEhFrameHdr(elf_file: *Elf, writer: anytype) !void {
for (object.fdes.items) |fde| {
if (!fde.alive) continue;
const relocs = fde.relocs(elf_file);
const relocs = fde.relocs(object);
assert(relocs.len > 0); // Should this be an error? Things are completely broken anyhow if this trips...
const rel = relocs[0];
const ref = object.resolveSymbol(rel.r_sym(), elf_file);

View File

@ -279,8 +279,8 @@ pub const File = union(enum) {
pub const Index = u32;
pub const Entry = union(enum) {
null: void,
zig_object: ZigObject,
null,
zig_object,
linker_defined: LinkerDefined,
object: Object,
shared_object: SharedObject,

View File

@ -103,15 +103,20 @@ fn markLive(atom: *Atom, elf_file: *Elf) void {
assert(atom.visited);
const file = atom.file(elf_file).?;
for (atom.fdes(elf_file)) |fde| {
for (fde.relocs(elf_file)[1..]) |rel| {
const ref = file.resolveSymbol(rel.r_sym(), elf_file);
const target_sym = elf_file.symbol(ref) orelse continue;
const target_atom = target_sym.atom(elf_file) orelse continue;
target_atom.alive = true;
gc_track_live_log.debug("{}marking live atom({d})", .{ track_live_level, target_atom.atom_index });
if (markAtom(target_atom)) markLive(target_atom, elf_file);
}
switch (file) {
.object => |object| {
for (atom.fdes(object)) |fde| {
for (fde.relocs(object)[1..]) |rel| {
const ref = file.resolveSymbol(rel.r_sym(), elf_file);
const target_sym = elf_file.symbol(ref) orelse continue;
const target_atom = target_sym.atom(elf_file) orelse continue;
target_atom.alive = true;
gc_track_live_log.debug("{}marking live atom({d})", .{ track_live_level, target_atom.atom_index });
if (markAtom(target_atom)) markLive(target_atom, elf_file);
}
}
},
else => {},
}
for (atom.relocs(elf_file)) |rel| {
@ -135,23 +140,25 @@ fn mark(roots: std.ArrayList(*Atom), elf_file: *Elf) void {
}
}
fn prune(elf_file: *Elf) void {
const pruneInFile = struct {
fn pruneInFile(file: File, ef: *Elf) void {
for (file.atoms()) |atom_index| {
const atom = file.atom(atom_index) orelse continue;
if (atom.alive and !atom.visited) {
atom.alive = false;
atom.markFdesDead(ef);
}
fn pruneInFile(file: File) void {
for (file.atoms()) |atom_index| {
const atom = file.atom(atom_index) orelse continue;
if (atom.alive and !atom.visited) {
atom.alive = false;
switch (file) {
.object => |object| atom.markFdesDead(object),
else => {},
}
}
}.pruneInFile;
}
}
fn prune(elf_file: *Elf) void {
if (elf_file.zigObjectPtr()) |zo| {
pruneInFile(zo.asFile(), elf_file);
pruneInFile(zo.asFile());
}
for (elf_file.objects.items) |index| {
pruneInFile(elf_file.file(index).?, elf_file);
pruneInFile(elf_file.file(index).?);
}
}

View File

@ -1,27 +1,7 @@
pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void {
pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) link.File.FlushError!void {
const gpa = comp.gpa;
const diags = &comp.link_diags;
for (comp.objects) |obj| {
switch (Compilation.classifyFileExt(obj.path.sub_path)) {
.object => parseObjectStaticLibReportingFailure(elf_file, obj.path),
.static_library => parseArchiveStaticLibReportingFailure(elf_file, obj.path),
else => diags.addParseError(obj.path, "unrecognized file extension", .{}),
}
}
for (comp.c_object_table.keys()) |key| {
parseObjectStaticLibReportingFailure(elf_file, key.status.success.object_path);
}
if (module_obj_path) |path| {
parseObjectStaticLibReportingFailure(elf_file, path);
}
if (comp.include_compiler_rt) {
parseObjectStaticLibReportingFailure(elf_file, comp.compiler_rt_obj.?.full_object_path);
}
if (diags.hasErrors()) return error.FlushFailure;
// First, we flush relocatable object file generated with our backends.
@ -150,22 +130,9 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?Path
if (diags.hasErrors()) return error.FlushFailure;
}
pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void {
pub fn flushObject(elf_file: *Elf, comp: *Compilation) link.File.FlushError!void {
const diags = &comp.link_diags;
for (comp.objects) |obj| {
elf_file.parseInputReportingFailure(obj.path, false, obj.must_link);
}
// This is a set of object files emitted by clang in a single `build-exe` invocation.
// For instance, the implicit `a.o` as compiled by `zig build-exe a.c` will end up
// in this set.
for (comp.c_object_table.keys()) |key| {
elf_file.parseObjectReportingFailure(key.status.success.object_path);
}
if (module_obj_path) |path| elf_file.parseObjectReportingFailure(path);
if (diags.hasErrors()) return error.FlushFailure;
// Now, we are ready to resolve the symbols across all input files.
@ -215,64 +182,6 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?Path) l
if (diags.hasErrors()) return error.FlushFailure;
}
fn parseObjectStaticLibReportingFailure(elf_file: *Elf, path: Path) void {
const diags = &elf_file.base.comp.link_diags;
parseObjectStaticLib(elf_file, path) catch |err| switch (err) {
error.LinkFailure => return,
else => |e| diags.addParseError(path, "parsing object failed: {s}", .{@errorName(e)}),
};
}
fn parseArchiveStaticLibReportingFailure(elf_file: *Elf, path: Path) void {
const diags = &elf_file.base.comp.link_diags;
parseArchiveStaticLib(elf_file, path) catch |err| switch (err) {
error.LinkFailure => return,
else => |e| diags.addParseError(path, "parsing static library failed: {s}", .{@errorName(e)}),
};
}
fn parseObjectStaticLib(elf_file: *Elf, path: Path) Elf.ParseError!void {
const gpa = elf_file.base.comp.gpa;
const handle = try path.root_dir.handle.openFile(path.sub_path, .{});
const fh = try elf_file.addFileHandle(handle);
const index: File.Index = @intCast(try elf_file.files.addOne(gpa));
elf_file.files.set(index, .{ .object = .{
.path = .{
.root_dir = path.root_dir,
.sub_path = try gpa.dupe(u8, path.sub_path),
},
.file_handle = fh,
.index = index,
} });
try elf_file.objects.append(gpa, index);
const object = elf_file.file(index).?.object;
try object.parseAr(elf_file);
}
fn parseArchiveStaticLib(elf_file: *Elf, path: Path) Elf.ParseError!void {
const gpa = elf_file.base.comp.gpa;
const handle = try path.root_dir.handle.openFile(path.sub_path, .{});
const fh = try elf_file.addFileHandle(handle);
var archive = Archive{};
defer archive.deinit(gpa);
try archive.parse(elf_file, path, fh);
const objects = try archive.objects.toOwnedSlice(gpa);
defer gpa.free(objects);
for (objects) |extracted| {
const index = @as(File.Index, @intCast(try elf_file.files.addOne(gpa)));
elf_file.files.set(index, .{ .object = extracted });
const object = &elf_file.files.items(.data)[index].object;
object.index = index;
try object.parseAr(elf_file);
try elf_file.objects.append(gpa, index);
}
}
fn claimUnresolved(elf_file: *Elf) void {
if (elf_file.zigObjectPtr()) |zig_object| {
zig_object.claimUnresolvedRelocatable(elf_file);
@ -473,11 +382,12 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
const SortRelocs = struct {
pub fn lessThan(ctx: void, lhs: elf.Elf64_Rela, rhs: elf.Elf64_Rela) bool {
_ = ctx;
assert(lhs.r_offset != rhs.r_offset);
return lhs.r_offset < rhs.r_offset;
}
};
mem.sort(elf.Elf64_Rela, relocs.items, {}, SortRelocs.lessThan);
mem.sortUnstable(elf.Elf64_Rela, relocs.items, {}, SortRelocs.lessThan);
log.debug("writing {s} from 0x{x} to 0x{x}", .{
elf_file.getShString(shdr.sh_name),

View File

@ -1,45 +1,47 @@
path: Path,
cpu_arch: ?std.Target.Cpu.Arch = null,
args: std.ArrayListUnmanaged(Arg) = .empty,
cpu_arch: ?std.Target.Cpu.Arch,
args: []const Arg,
pub const Arg = struct {
needed: bool = false,
path: []const u8,
};
pub fn deinit(scr: *LdScript, allocator: Allocator) void {
scr.args.deinit(allocator);
pub fn deinit(ls: *LdScript, gpa: Allocator) void {
gpa.free(ls.args);
ls.* = undefined;
}
pub const Error = error{
LinkFailure,
UnexpectedToken,
UnknownCpuArch,
OutOfMemory,
};
pub fn parse(scr: *LdScript, data: []const u8, elf_file: *Elf) Error!void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const diags = &comp.link_diags;
pub fn parse(
gpa: Allocator,
diags: *Diags,
/// For error reporting.
path: Path,
data: []const u8,
) Error!LdScript {
var tokenizer = Tokenizer{ .source = data };
var tokens = std.ArrayList(Token).init(gpa);
defer tokens.deinit();
var line_col = std.ArrayList(LineColumn).init(gpa);
defer line_col.deinit();
var tokens: std.ArrayListUnmanaged(Token) = .empty;
defer tokens.deinit(gpa);
var line_col: std.ArrayListUnmanaged(LineColumn) = .empty;
defer line_col.deinit(gpa);
var line: usize = 0;
var prev_line_last_col: usize = 0;
while (true) {
const tok = tokenizer.next();
try tokens.append(tok);
try tokens.append(gpa, tok);
const column = tok.start - prev_line_last_col;
try line_col.append(.{ .line = line, .column = column });
try line_col.append(gpa, .{ .line = line, .column = column });
switch (tok.id) {
.invalid => {
return diags.failParse(scr.path, "invalid token in LD script: '{s}' ({d}:{d})", .{
return diags.failParse(path, "invalid token in LD script: '{s}' ({d}:{d})", .{
std.fmt.fmtSliceEscapeLower(tok.get(data)), line, column,
});
},
@ -52,18 +54,22 @@ pub fn parse(scr: *LdScript, data: []const u8, elf_file: *Elf) Error!void {
}
}
var it = TokenIterator{ .tokens = tokens.items };
var parser = Parser{ .source = data, .it = &it };
var args = std.ArrayList(Arg).init(gpa);
scr.doParse(.{
.parser = &parser,
.args = &args,
}) catch |err| switch (err) {
var it: TokenIterator = .{ .tokens = tokens.items };
var parser: Parser = .{
.gpa = gpa,
.source = data,
.it = &it,
.args = .empty,
.cpu_arch = null,
};
defer parser.args.deinit(gpa);
parser.start() catch |err| switch (err) {
error.UnexpectedToken => {
const last_token_id = parser.it.pos - 1;
const last_token = parser.it.get(last_token_id);
const lcol = line_col.items[last_token_id];
return diags.failParse(scr.path, "unexpected token in LD script: {s}: '{s}' ({d}:{d})", .{
return diags.failParse(path, "unexpected token in LD script: {s}: '{s}' ({d}:{d})", .{
@tagName(last_token.id),
last_token.get(data),
lcol.line,
@ -72,30 +78,10 @@ pub fn parse(scr: *LdScript, data: []const u8, elf_file: *Elf) Error!void {
},
else => |e| return e,
};
scr.args = args.moveToUnmanaged();
}
fn doParse(scr: *LdScript, ctx: struct {
parser: *Parser,
args: *std.ArrayList(Arg),
}) !void {
while (true) {
ctx.parser.skipAny(&.{ .comment, .new_line });
if (ctx.parser.maybe(.command)) |cmd_id| {
const cmd = ctx.parser.getCommand(cmd_id);
switch (cmd) {
.output_format => scr.cpu_arch = try ctx.parser.outputFormat(),
// TODO we should verify that group only contains libraries
.input, .group => try ctx.parser.group(ctx.args),
else => return error.UnexpectedToken,
}
} else break;
}
if (ctx.parser.it.next()) |tok| switch (tok.id) {
.eof => {},
else => return error.UnexpectedToken,
return .{
.path = path,
.cpu_arch = parser.cpu_arch,
.args = try parser.args.toOwnedSlice(gpa),
};
}
@ -126,9 +112,34 @@ const Command = enum {
};
const Parser = struct {
gpa: Allocator,
source: []const u8,
it: *TokenIterator,
cpu_arch: ?std.Target.Cpu.Arch,
args: std.ArrayListUnmanaged(Arg),
fn start(parser: *Parser) !void {
while (true) {
parser.skipAny(&.{ .comment, .new_line });
if (parser.maybe(.command)) |cmd_id| {
const cmd = parser.getCommand(cmd_id);
switch (cmd) {
.output_format => parser.cpu_arch = try parser.outputFormat(),
// TODO we should verify that group only contains libraries
.input, .group => try parser.group(),
else => return error.UnexpectedToken,
}
} else break;
}
if (parser.it.next()) |tok| switch (tok.id) {
.eof => {},
else => return error.UnexpectedToken,
};
}
fn outputFormat(p: *Parser) !std.Target.Cpu.Arch {
const value = value: {
if (p.skip(&.{.lparen})) {
@ -149,18 +160,19 @@ const Parser = struct {
return error.UnknownCpuArch;
}
fn group(p: *Parser, args: *std.ArrayList(Arg)) !void {
fn group(p: *Parser) !void {
const gpa = p.gpa;
if (!p.skip(&.{.lparen})) return error.UnexpectedToken;
while (true) {
if (p.maybe(.literal)) |tok_id| {
const tok = p.it.get(tok_id);
const path = tok.get(p.source);
try args.append(.{ .path = path, .needed = true });
try p.args.append(gpa, .{ .path = path, .needed = true });
} else if (p.maybe(.command)) |cmd_id| {
const cmd = p.getCommand(cmd_id);
switch (cmd) {
.as_needed => try p.asNeeded(args),
.as_needed => try p.asNeeded(),
else => return error.UnexpectedToken,
}
} else break;
@ -169,13 +181,14 @@ const Parser = struct {
_ = try p.require(.rparen);
}
fn asNeeded(p: *Parser, args: *std.ArrayList(Arg)) !void {
fn asNeeded(p: *Parser) !void {
const gpa = p.gpa;
if (!p.skip(&.{.lparen})) return error.UnexpectedToken;
while (p.maybe(.literal)) |tok_id| {
const tok = p.it.get(tok_id);
const path = tok.get(p.source);
try args.append(.{ .path = path, .needed = false });
try p.args.append(gpa, .{ .path = path, .needed = false });
}
_ = try p.require(.rparen);
@ -227,21 +240,19 @@ const Token = struct {
end: usize,
const Id = enum {
// zig fmt: off
eof,
invalid,
new_line,
lparen, // (
rparen, // )
lbrace, // {
rbrace, // }
lparen, // (
rparen, // )
lbrace, // {
rbrace, // }
comment, // /* */
comment, // /* */
command, // literal with special meaning, see Command
command, // literal with special meaning, see Command
literal,
// zig fmt: on
};
const Index = usize;
@ -430,10 +441,9 @@ const TokenIterator = struct {
};
const LdScript = @This();
const Diags = @import("../link.zig").Diags;
const std = @import("std");
const assert = std.debug.assert;
const Path = std.Build.Cache.Path;
const Allocator = std.mem.Allocator;
const Elf = @import("../Elf.zig");

View File

@ -1,3 +1,7 @@
pub const Atom = @import("MachO/Atom.zig");
pub const DebugSymbols = @import("MachO/DebugSymbols.zig");
pub const Relocation = @import("MachO/Relocation.zig");
base: link.File,
rpath_list: []const []const u8,
@ -114,8 +118,8 @@ headerpad_max_install_names: bool,
dead_strip_dylibs: bool,
/// Treatment of undefined symbols
undefined_treatment: UndefinedTreatment,
/// Resolved list of library search directories
lib_dirs: []const []const u8,
/// TODO: delete this, libraries need to be resolved by the frontend instead
lib_directories: []const Directory,
/// Resolved list of framework search directories
framework_dirs: []const []const u8,
/// List of input frameworks
@ -213,7 +217,8 @@ pub fn createEmpty(
.platform = Platform.fromTarget(target),
.sdk_version = if (options.darwin_sdk_layout) |layout| inferSdkVersion(comp, layout) else null,
.undefined_treatment = if (allow_shlib_undefined) .dynamic_lookup else .@"error",
.lib_dirs = options.lib_dirs,
// TODO delete this, directories must instead be resolved by the frontend
.lib_directories = options.lib_directories,
.framework_dirs = options.framework_dirs,
.force_load_objc = options.force_load_objc,
};
@ -371,48 +376,44 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
if (self.base.isStaticLib()) return relocatable.flushStaticLib(self, comp, module_obj_path);
if (self.base.isObject()) return relocatable.flushObject(self, comp, module_obj_path);
var positionals = std.ArrayList(Compilation.LinkObject).init(gpa);
var positionals = std.ArrayList(link.Input).init(gpa);
defer positionals.deinit();
try positionals.ensureUnusedCapacity(comp.objects.len);
positionals.appendSliceAssumeCapacity(comp.objects);
try positionals.ensureUnusedCapacity(comp.link_inputs.len);
for (comp.link_inputs) |link_input| switch (link_input) {
.dso => continue, // handled below
.object, .archive => positionals.appendAssumeCapacity(link_input),
.dso_exact => @panic("TODO"),
.res => unreachable,
};
// This is a set of object files emitted by clang in a single `build-exe` invocation.
// For instance, the implicit `a.o` as compiled by `zig build-exe a.c` will end up
// in this set.
try positionals.ensureUnusedCapacity(comp.c_object_table.keys().len);
for (comp.c_object_table.keys()) |key| {
positionals.appendAssumeCapacity(.{ .path = key.status.success.object_path });
positionals.appendAssumeCapacity(try link.openObjectInput(diags, key.status.success.object_path));
}
if (module_obj_path) |path| try positionals.append(.{ .path = path });
if (module_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
if (comp.config.any_sanitize_thread) {
try positionals.append(.{ .path = comp.tsan_lib.?.full_object_path });
try positionals.append(try link.openObjectInput(diags, comp.tsan_lib.?.full_object_path));
}
if (comp.config.any_fuzz) {
try positionals.append(.{ .path = comp.fuzzer_lib.?.full_object_path });
try positionals.append(try link.openObjectInput(diags, comp.fuzzer_lib.?.full_object_path));
}
for (positionals.items) |obj| {
self.classifyInputFile(obj.path, .{ .path = obj.path }, obj.must_link) catch |err|
diags.addParseError(obj.path, "failed to read input file: {s}", .{@errorName(err)});
for (positionals.items) |link_input| {
self.classifyInputFile(link_input) catch |err|
diags.addParseError(link_input.path().?, "failed to read input file: {s}", .{@errorName(err)});
}
var system_libs = std.ArrayList(SystemLib).init(gpa);
defer system_libs.deinit();
// libs
try system_libs.ensureUnusedCapacity(comp.system_libs.values().len);
for (comp.system_libs.values()) |info| {
system_libs.appendAssumeCapacity(.{
.needed = info.needed,
.weak = info.weak,
.path = info.path.?,
});
}
// frameworks
try system_libs.ensureUnusedCapacity(self.frameworks.len);
for (self.frameworks) |info| {
@ -436,20 +437,40 @@ pub fn flushModule(self: *MachO, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
else => |e| return e, // TODO: convert into an error
};
for (comp.link_inputs) |link_input| switch (link_input) {
.object, .archive, .dso_exact => continue,
.res => unreachable,
.dso => {
self.classifyInputFile(link_input) catch |err|
diags.addParseError(link_input.path().?, "failed to parse input file: {s}", .{@errorName(err)});
},
};
for (system_libs.items) |lib| {
self.classifyInputFile(lib.path, lib, false) catch |err|
diags.addParseError(lib.path, "failed to parse input file: {s}", .{@errorName(err)});
switch (Compilation.classifyFileExt(lib.path.sub_path)) {
.shared_library => {
const dso_input = try link.openDsoInput(diags, lib.path, lib.needed, lib.weak, lib.reexport);
self.classifyInputFile(dso_input) catch |err|
diags.addParseError(lib.path, "failed to parse input file: {s}", .{@errorName(err)});
},
.static_library => {
const archive_input = try link.openArchiveInput(diags, lib.path, lib.must_link, lib.hidden);
self.classifyInputFile(archive_input) catch |err|
diags.addParseError(lib.path, "failed to parse input file: {s}", .{@errorName(err)});
},
else => unreachable,
}
}
// Finally, link against compiler_rt.
const compiler_rt_path: ?Path = blk: {
if (comp.compiler_rt_lib) |x| break :blk x.full_object_path;
if (comp.compiler_rt_obj) |x| break :blk x.full_object_path;
break :blk null;
};
if (compiler_rt_path) |path| {
self.classifyInputFile(path, .{ .path = path }, false) catch |err|
diags.addParseError(path, "failed to parse input file: {s}", .{@errorName(err)});
if (comp.compiler_rt_lib) |crt_file| {
const path = crt_file.full_object_path;
self.classifyInputFile(try link.openArchiveInput(diags, path, false, false)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
} else if (comp.compiler_rt_obj) |crt_file| {
const path = crt_file.full_object_path;
self.classifyInputFile(try link.openObjectInput(diags, path)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
}
try self.parseInputFiles();
@ -596,9 +617,12 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
}
if (self.base.isRelocatable()) {
for (comp.objects) |obj| {
try argv.append(try obj.path.toString(arena));
}
for (comp.link_inputs) |link_input| switch (link_input) {
.object, .archive => |obj| try argv.append(try obj.path.toString(arena)),
.res => |res| try argv.append(try res.path.toString(arena)),
.dso => |dso| try argv.append(try dso.path.toString(arena)),
.dso_exact => |dso_exact| try argv.appendSlice(&.{ "-l", dso_exact.name }),
};
for (comp.c_object_table.keys()) |key| {
try argv.append(try key.status.success.object_path.toString(arena));
@ -678,13 +702,15 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
try argv.append("dynamic_lookup");
}
for (comp.objects) |obj| {
// TODO: verify this
if (obj.must_link) {
try argv.append("-force_load");
}
try argv.append(try obj.path.toString(arena));
}
for (comp.link_inputs) |link_input| switch (link_input) {
.dso => continue, // handled below
.res => unreachable, // windows only
.object, .archive => |obj| {
if (obj.must_link) try argv.append("-force_load"); // TODO: verify this
try argv.append(try obj.path.toString(arena));
},
.dso_exact => |dso_exact| try argv.appendSlice(&.{ "-l", dso_exact.name }),
};
for (comp.c_object_table.keys()) |key| {
try argv.append(try key.status.success.object_path.toString(arena));
@ -703,21 +729,25 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
try argv.append(try comp.fuzzer_lib.?.full_object_path.toString(arena));
}
for (self.lib_dirs) |lib_dir| {
const arg = try std.fmt.allocPrint(arena, "-L{s}", .{lib_dir});
for (self.lib_directories) |lib_directory| {
// TODO delete this, directories must instead be resolved by the frontend
const arg = try std.fmt.allocPrint(arena, "-L{s}", .{lib_directory.path orelse "."});
try argv.append(arg);
}
for (comp.system_libs.keys()) |l_name| {
const info = comp.system_libs.get(l_name).?;
const arg = if (info.needed)
try std.fmt.allocPrint(arena, "-needed-l{s}", .{l_name})
else if (info.weak)
try std.fmt.allocPrint(arena, "-weak-l{s}", .{l_name})
else
try std.fmt.allocPrint(arena, "-l{s}", .{l_name});
try argv.append(arg);
}
for (comp.link_inputs) |link_input| switch (link_input) {
.object, .archive, .dso_exact => continue, // handled above
.res => unreachable, // windows only
.dso => |dso| {
if (dso.needed) {
try argv.appendSlice(&.{ "-needed-l", try dso.path.toString(arena) });
} else if (dso.weak) {
try argv.appendSlice(&.{ "-weak-l", try dso.path.toString(arena) });
} else {
try argv.appendSlice(&.{ "-l", try dso.path.toString(arena) });
}
},
};
for (self.framework_dirs) |f_dir| {
try argv.append("-F");
@ -751,6 +781,7 @@ fn dumpArgv(self: *MachO, comp: *Compilation) !void {
Compilation.dump_argv(argv.items);
}
/// TODO delete this, libsystem must be resolved when setting up the compilation pipeline
pub fn resolveLibSystem(
self: *MachO,
arena: Allocator,
@ -774,8 +805,8 @@ pub fn resolveLibSystem(
},
};
for (self.lib_dirs) |dir| {
if (try accessLibPath(arena, &test_path, &checked_paths, dir, "System")) break :success;
for (self.lib_directories) |directory| {
if (try accessLibPath(arena, &test_path, &checked_paths, directory.path orelse ".", "System")) break :success;
}
diags.addMissingLibraryError(checked_paths.items, "unable to find libSystem system library", .{});
@ -789,13 +820,14 @@ pub fn resolveLibSystem(
});
}
pub fn classifyInputFile(self: *MachO, path: Path, lib: SystemLib, must_link: bool) !void {
pub fn classifyInputFile(self: *MachO, input: link.Input) !void {
const tracy = trace(@src());
defer tracy.end();
const path, const file = input.pathAndFile().?;
// TODO don't classify now, it's too late. The input file has already been classified
log.debug("classifying input file {}", .{path});
const file = try path.root_dir.handle.openFile(path.sub_path, .{});
const fh = try self.addFileHandle(file);
var buffer: [Archive.SARMAG]u8 = undefined;
@ -806,17 +838,17 @@ pub fn classifyInputFile(self: *MachO, path: Path, lib: SystemLib, must_link: bo
if (h.magic != macho.MH_MAGIC_64) break :blk;
switch (h.filetype) {
macho.MH_OBJECT => try self.addObject(path, fh, offset),
macho.MH_DYLIB => _ = try self.addDylib(lib, true, fh, offset),
macho.MH_DYLIB => _ = try self.addDylib(.fromLinkInput(input), true, fh, offset),
else => return error.UnknownFileType,
}
return;
}
if (readArMagic(file, offset, &buffer) catch null) |ar_magic| blk: {
if (!mem.eql(u8, ar_magic, Archive.ARMAG)) break :blk;
try self.addArchive(lib, must_link, fh, fat_arch);
try self.addArchive(input.archive, fh, fat_arch);
return;
}
_ = try self.addTbd(lib, true, fh);
_ = try self.addTbd(.fromLinkInput(input), true, fh);
}
fn parseFatFile(self: *MachO, file: std.fs.File, path: Path) !?fat.Arch {
@ -903,7 +935,7 @@ fn parseInputFileWorker(self: *MachO, file: File) void {
};
}
fn addArchive(self: *MachO, lib: SystemLib, must_link: bool, handle: File.HandleIndex, fat_arch: ?fat.Arch) !void {
fn addArchive(self: *MachO, lib: link.Input.Object, handle: File.HandleIndex, fat_arch: ?fat.Arch) !void {
const tracy = trace(@src());
defer tracy.end();
@ -918,7 +950,7 @@ fn addArchive(self: *MachO, lib: SystemLib, must_link: bool, handle: File.Handle
self.files.set(index, .{ .object = unpacked });
const object = &self.files.items(.data)[index].object;
object.index = index;
object.alive = must_link or lib.needed; // TODO: or self.options.all_load;
object.alive = lib.must_link; // TODO: or self.options.all_load;
object.hidden = lib.hidden;
try self.objects.append(gpa, index);
}
@ -993,6 +1025,7 @@ fn isHoisted(self: *MachO, install_name: []const u8) bool {
return false;
}
/// TODO delete this, libraries must be instead resolved when instantiating the compilation pipeline
fn accessLibPath(
arena: Allocator,
test_path: *std.ArrayList(u8),
@ -1051,9 +1084,11 @@ fn parseDependentDylibs(self: *MachO) !void {
if (self.dylibs.items.len == 0) return;
const gpa = self.base.comp.gpa;
const lib_dirs = self.lib_dirs;
const framework_dirs = self.framework_dirs;
// TODO delete this, directories must instead be resolved by the frontend
const lib_directories = self.lib_directories;
var arena_alloc = std.heap.ArenaAllocator.init(gpa);
defer arena_alloc.deinit();
const arena = arena_alloc.allocator();
@ -1094,9 +1129,9 @@ fn parseDependentDylibs(self: *MachO) !void {
// Library
const lib_name = eatPrefix(stem, "lib") orelse stem;
for (lib_dirs) |dir| {
for (lib_directories) |lib_directory| {
test_path.clearRetainingCapacity();
if (try accessLibPath(arena, &test_path, &checked_paths, dir, lib_name)) break :full_path test_path.items;
if (try accessLibPath(arena, &test_path, &checked_paths, lib_directory.path orelse ".", lib_name)) break :full_path test_path.items;
}
}
@ -4366,6 +4401,24 @@ const SystemLib = struct {
hidden: bool = false,
reexport: bool = false,
must_link: bool = false,
fn fromLinkInput(link_input: link.Input) SystemLib {
return switch (link_input) {
.dso_exact => unreachable,
.res => unreachable,
.object, .archive => |obj| .{
.path = obj.path,
.must_link = obj.must_link,
.hidden = obj.hidden,
},
.dso => |dso| .{
.path = dso.path,
.needed = dso.needed,
.weak = dso.weak,
.reexport = dso.reexport,
},
};
}
};
pub const SdkLayout = std.zig.LibCDirs.DarwinSdkLayout;
@ -5303,17 +5356,16 @@ const Air = @import("../Air.zig");
const Alignment = Atom.Alignment;
const Allocator = mem.Allocator;
const Archive = @import("MachO/Archive.zig");
pub const Atom = @import("MachO/Atom.zig");
const AtomicBool = std.atomic.Value(bool);
const Bind = bind.Bind;
const Cache = std.Build.Cache;
const Path = Cache.Path;
const CodeSignature = @import("MachO/CodeSignature.zig");
const Compilation = @import("../Compilation.zig");
const DataInCode = synthetic.DataInCode;
pub const DebugSymbols = @import("MachO/DebugSymbols.zig");
const Directory = Cache.Directory;
const Dylib = @import("MachO/Dylib.zig");
const ExportTrie = @import("MachO/dyld_info/Trie.zig");
const Path = Cache.Path;
const File = @import("MachO/file.zig").File;
const GotSection = synthetic.GotSection;
const Hash = std.hash.Wyhash;
@ -5329,7 +5381,6 @@ const Md5 = std.crypto.hash.Md5;
const Zcu = @import("../Zcu.zig");
const InternPool = @import("../InternPool.zig");
const Rebase = @import("MachO/dyld_info/Rebase.zig");
pub const Relocation = @import("MachO/Relocation.zig");
const StringTable = @import("StringTable.zig");
const StubsSection = synthetic.StubsSection;
const StubsHelperSection = synthetic.StubsHelperSection;

View File

@ -3,16 +3,16 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
const diags = &macho_file.base.comp.link_diags;
// TODO: "positional arguments" is a CLI concept, not a linker concept. Delete this unnecessary array list.
var positionals = std.ArrayList(Compilation.LinkObject).init(gpa);
var positionals = std.ArrayList(link.Input).init(gpa);
defer positionals.deinit();
try positionals.ensureUnusedCapacity(comp.objects.len);
positionals.appendSliceAssumeCapacity(comp.objects);
try positionals.ensureUnusedCapacity(comp.link_inputs.len);
positionals.appendSliceAssumeCapacity(comp.link_inputs);
for (comp.c_object_table.keys()) |key| {
try positionals.append(.{ .path = key.status.success.object_path });
try positionals.append(try link.openObjectInput(diags, key.status.success.object_path));
}
if (module_obj_path) |path| try positionals.append(.{ .path = path });
if (module_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
if (macho_file.getZigObject() == null and positionals.items.len == 1) {
// Instead of invoking a full-blown `-r` mode on the input which sadly will strip all
@ -20,7 +20,7 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
// the *only* input file over.
// TODO: in the future, when we implement `dsymutil` alternative directly in the Zig
// compiler, investigate if we can get rid of this `if` prong here.
const path = positionals.items[0].path;
const path = positionals.items[0].path().?;
const in_file = try path.root_dir.handle.openFile(path.sub_path, .{});
const stat = try in_file.stat();
const amt = try in_file.copyRangeAll(0, macho_file.base.file.?, 0, stat.size);
@ -28,9 +28,9 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
return;
}
for (positionals.items) |obj| {
macho_file.classifyInputFile(obj.path, .{ .path = obj.path }, obj.must_link) catch |err|
diags.addParseError(obj.path, "failed to read input file: {s}", .{@errorName(err)});
for (positionals.items) |link_input| {
macho_file.classifyInputFile(link_input) catch |err|
diags.addParseError(link_input.path().?, "failed to read input file: {s}", .{@errorName(err)});
}
if (diags.hasErrors()) return error.FlushFailure;
@ -72,25 +72,25 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
const gpa = comp.gpa;
const diags = &macho_file.base.comp.link_diags;
var positionals = std.ArrayList(Compilation.LinkObject).init(gpa);
var positionals = std.ArrayList(link.Input).init(gpa);
defer positionals.deinit();
try positionals.ensureUnusedCapacity(comp.objects.len);
positionals.appendSliceAssumeCapacity(comp.objects);
try positionals.ensureUnusedCapacity(comp.link_inputs.len);
positionals.appendSliceAssumeCapacity(comp.link_inputs);
for (comp.c_object_table.keys()) |key| {
try positionals.append(.{ .path = key.status.success.object_path });
try positionals.append(try link.openObjectInput(diags, key.status.success.object_path));
}
if (module_obj_path) |path| try positionals.append(.{ .path = path });
if (module_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
if (comp.include_compiler_rt) {
try positionals.append(.{ .path = comp.compiler_rt_obj.?.full_object_path });
try positionals.append(try link.openObjectInput(diags, comp.compiler_rt_obj.?.full_object_path));
}
for (positionals.items) |obj| {
macho_file.classifyInputFile(obj.path, .{ .path = obj.path }, obj.must_link) catch |err|
diags.addParseError(obj.path, "failed to read input file: {s}", .{@errorName(err)});
for (positionals.items) |link_input| {
macho_file.classifyInputFile(link_input) catch |err|
diags.addParseError(link_input.path().?, "failed to read input file: {s}", .{@errorName(err)});
}
if (diags.hasErrors()) return error.FlushFailure;
@ -745,20 +745,15 @@ fn writeHeader(macho_file: *MachO, ncmds: usize, sizeofcmds: usize) !void {
try macho_file.base.file.?.pwriteAll(mem.asBytes(&header), 0);
}
const std = @import("std");
const Path = std.Build.Cache.Path;
const WaitGroup = std.Thread.WaitGroup;
const assert = std.debug.assert;
const build_options = @import("build_options");
const eh_frame = @import("eh_frame.zig");
const fat = @import("fat.zig");
const link = @import("../../link.zig");
const load_commands = @import("load_commands.zig");
const log = std.log.scoped(.link);
const macho = std.macho;
const math = std.math;
const mem = std.mem;
const state_log = std.log.scoped(.link_state);
const std = @import("std");
const trace = @import("../../tracy.zig").trace;
const Path = std.Build.Cache.Path;
const Archive = @import("Archive.zig");
const Atom = @import("Atom.zig");
@ -767,3 +762,9 @@ const File = @import("file.zig").File;
const MachO = @import("../MachO.zig");
const Object = @import("Object.zig");
const Symbol = @import("Symbol.zig");
const build_options = @import("build_options");
const eh_frame = @import("eh_frame.zig");
const fat = @import("fat.zig");
const link = @import("../../link.zig");
const load_commands = @import("load_commands.zig");
const trace = @import("../../tracy.zig").trace;

View File

@ -637,14 +637,6 @@ fn createSyntheticSymbolOffset(wasm: *Wasm, name_offset: u32, tag: Symbol.Tag) !
return loc;
}
fn parseInputFiles(wasm: *Wasm, files: []const []const u8) !void {
for (files) |path| {
if (try wasm.parseObjectFile(path)) continue;
if (try wasm.parseArchive(path, false)) continue; // load archives lazily
log.warn("Unexpected file format at path: '{s}'", .{path});
}
}
/// Parses the object file from given path. Returns true when the given file was an object
/// file and parsed successfully. Returns false when file is not an object file.
/// May return an error instead when parsing failed.
@ -2522,7 +2514,7 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
// Positional arguments to the linker such as object files and static archives.
// TODO: "positional arguments" is a CLI concept, not a linker concept. Delete this unnecessary array list.
var positionals = std.ArrayList([]const u8).init(arena);
try positionals.ensureUnusedCapacity(comp.objects.len);
try positionals.ensureUnusedCapacity(comp.link_inputs.len);
const target = comp.root_mod.resolved_target.result;
const output_mode = comp.config.output_mode;
@ -2566,9 +2558,12 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
try positionals.append(path);
}
for (comp.objects) |object| {
try positionals.append(try object.path.toString(arena));
}
for (comp.link_inputs) |link_input| switch (link_input) {
.object, .archive => |obj| try positionals.append(try obj.path.toString(arena)),
.dso => |dso| try positionals.append(try dso.path.toString(arena)),
.dso_exact => unreachable, // forbidden by frontend
.res => unreachable, // windows only
};
for (comp.c_object_table.keys()) |c_object| {
try positionals.append(try c_object.status.success.object_path.toString(arena));
@ -2577,7 +2572,11 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_no
if (comp.compiler_rt_lib) |lib| try positionals.append(try lib.full_object_path.toString(arena));
if (comp.compiler_rt_obj) |obj| try positionals.append(try obj.full_object_path.toString(arena));
try wasm.parseInputFiles(positionals.items);
for (positionals.items) |path| {
if (try wasm.parseObjectFile(path)) continue;
if (try wasm.parseArchive(path, false)) continue; // load archives lazily
log.warn("Unexpected file format at path: '{s}'", .{path});
}
if (wasm.zig_object_index != .null) {
try wasm.resolveSymbolsInObject(wasm.zig_object_index);
@ -3401,10 +3400,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
comptime assert(Compilation.link_hash_implementation_version == 14);
for (comp.objects) |obj| {
_ = try man.addFilePath(obj.path, null);
man.hash.add(obj.must_link);
}
try link.hashInputs(&man, comp.link_inputs);
for (comp.c_object_table.keys()) |key| {
_ = try man.addFilePath(key.status.success.object_path, null);
}
@ -3458,8 +3454,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
// here. TODO: think carefully about how we can avoid this redundant operation when doing
// build-obj. See also the corresponding TODO in linkAsArchive.
const the_object_path = blk: {
if (comp.objects.len != 0)
break :blk comp.objects[0].path;
if (link.firstObjectInput(comp.link_inputs)) |obj| break :blk obj.path;
if (comp.c_object_table.count() != 0)
break :blk comp.c_object_table.keys()[0].status.success.object_path;
@ -3621,16 +3616,23 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
// Positional arguments to the linker such as object files.
var whole_archive = false;
for (comp.objects) |obj| {
if (obj.must_link and !whole_archive) {
try argv.append("-whole-archive");
whole_archive = true;
} else if (!obj.must_link and whole_archive) {
try argv.append("-no-whole-archive");
whole_archive = false;
}
try argv.append(try obj.path.toString(arena));
}
for (comp.link_inputs) |link_input| switch (link_input) {
.object, .archive => |obj| {
if (obj.must_link and !whole_archive) {
try argv.append("-whole-archive");
whole_archive = true;
} else if (!obj.must_link and whole_archive) {
try argv.append("-no-whole-archive");
whole_archive = false;
}
try argv.append(try obj.path.toString(arena));
},
.dso => |dso| {
try argv.append(try dso.path.toString(arena));
},
.dso_exact => unreachable,
.res => unreachable,
};
if (whole_archive) {
try argv.append("-no-whole-archive");
whole_archive = false;
@ -3643,11 +3645,8 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
try argv.append(p);
}
if (comp.config.output_mode != .Obj and
!comp.skip_linker_dependencies and
!comp.config.link_libc)
{
try argv.append(try comp.libc_static_lib.?.full_object_path.toString(arena));
if (comp.libc_static_lib) |crt_file| {
try argv.append(try crt_file.full_object_path.toString(arena));
}
if (compiler_rt_path) |p| {

File diff suppressed because it is too large Load Diff

View File

@ -19,7 +19,7 @@ pub const CrtFile = enum {
libc_so,
};
pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progress.Node) !void {
pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Progress.Node) !void {
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
@ -28,7 +28,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
switch (crt_file) {
switch (in_crt_file) {
.crti_o => {
var args = std.ArrayList([]const u8).init(arena);
try addCcArgs(comp, arena, &args, false);
@ -195,8 +195,9 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
.libc_so => {
const optimize_mode = comp.compilerRtOptMode();
const strip = comp.compilerRtStrip();
const output_mode: std.builtin.OutputMode = .Lib;
const config = try Compilation.Config.resolve(.{
.output_mode = .Lib,
.output_mode = output_mode,
.link_mode = .dynamic,
.resolved_target = comp.root_mod.resolved_target,
.is_test = false,
@ -276,28 +277,39 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
try comp.updateSubCompilation(sub_compilation, .@"musl libc.so", prog_node);
try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1);
const basename = try comp.gpa.dupe(u8, "libc.so");
errdefer comp.gpa.free(basename);
comp.crt_files.putAssumeCapacityNoClobber(basename, try sub_compilation.toCrtFile());
const crt_file = try sub_compilation.toCrtFile();
comp.queueLinkTaskMode(crt_file.full_object_path, output_mode);
{
comp.mutex.lock();
defer comp.mutex.unlock();
try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1);
comp.crt_files.putAssumeCapacityNoClobber(basename, crt_file);
}
},
}
}
// Return true if musl has arch-specific crti/crtn sources.
// See lib/libc/musl/crt/ARCH/crt?.s .
/// Return true if musl has arch-specific crti/crtn sources.
/// See lib/libc/musl/crt/ARCH/crt?.s .
pub fn needsCrtiCrtn(target: std.Target) bool {
// zig fmt: off
return switch (target.cpu.arch) {
.riscv32,
.riscv64,
.wasm32, .wasm64 => false,
.riscv32, .riscv64, .wasm32, .wasm64 => false,
.loongarch64 => false,
else => true,
};
// zig fmt: on
}
pub fn needsCrt0(output_mode: std.builtin.OutputMode, link_mode: std.builtin.LinkMode, pie: bool) ?CrtFile {
return switch (output_mode) {
.Obj, .Lib => null,
.Exe => switch (link_mode) {
.dynamic => if (pie) .scrt1_o else .crt1_o,
.static => if (pie) .rcrt1_o else .crt1_o,
},
};
}
fn isMuslArchName(name: []const u8) bool {

View File

@ -1,4 +1,6 @@
const std = @import("std");
const assert = std.debug.assert;
const Type = @import("Type.zig");
const AddressSpace = std.builtin.AddressSpace;
const Alignment = @import("InternPool.zig").Alignment;
@ -284,40 +286,18 @@ pub fn hasRedZone(target: std.Target) bool {
pub fn libcFullLinkFlags(target: std.Target) []const []const u8 {
// The linking order of these is significant and should match the order other
// c compilers such as gcc or clang use.
return switch (target.os.tag) {
.netbsd, .openbsd => &[_][]const u8{
"-lm",
"-lpthread",
"-lc",
"-lutil",
},
.solaris, .illumos => &[_][]const u8{
"-lm",
"-lsocket",
"-lnsl",
// Solaris releases after 10 merged the threading libraries into libc.
"-lc",
},
.haiku => &[_][]const u8{
"-lm",
"-lroot",
"-lpthread",
"-lc",
"-lnetwork",
},
else => if (target.isAndroid() or target.abi.isOpenHarmony()) &[_][]const u8{
"-lm",
"-lc",
"-ldl",
} else &[_][]const u8{
"-lm",
"-lpthread",
"-lc",
"-ldl",
"-lrt",
"-lutil",
const result: []const []const u8 = switch (target.os.tag) {
.netbsd, .openbsd => &.{ "-lm", "-lpthread", "-lc", "-lutil" },
// Solaris releases after 10 merged the threading libraries into libc.
.solaris, .illumos => &.{ "-lm", "-lsocket", "-lnsl", "-lc" },
.haiku => &.{ "-lm", "-lroot", "-lpthread", "-lc", "-lnetwork" },
.linux => switch (target.abi) {
.android, .androideabi, .ohos, .ohoseabi => &.{ "-lm", "-lc", "-ldl" },
else => &.{ "-lm", "-lpthread", "-lc", "-ldl", "-lrt", "-lutil" },
},
else => &.{},
};
return result;
}
pub fn clangMightShellOutForAssembly(target: std.Target) bool {

View File

@ -51,6 +51,7 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step {
elf_step.dependOn(testEmitRelocatable(b, .{ .target = musl_target }));
elf_step.dependOn(testRelocatableArchive(b, .{ .target = musl_target }));
elf_step.dependOn(testRelocatableEhFrame(b, .{ .target = musl_target }));
elf_step.dependOn(testRelocatableEhFrameComdatHeavy(b, .{ .target = musl_target }));
elf_step.dependOn(testRelocatableNoEhFrame(b, .{ .target = musl_target }));
// Exercise linker in ar mode
@ -2145,6 +2146,7 @@ fn testLdScript(b: *Build, opts: Options) *Step {
exe.addLibraryPath(dso.getEmittedBinDirectory());
exe.addRPath(dso.getEmittedBinDirectory());
exe.linkLibC();
exe.allow_so_scripts = true;
const run = addRunArtifact(exe);
run.expectExitCode(0);
@ -2164,14 +2166,13 @@ fn testLdScriptPathError(b: *Build, opts: Options) *Step {
exe.linkSystemLibrary2("a", .{});
exe.addLibraryPath(scripts.getDirectory());
exe.linkLibC();
exe.allow_so_scripts = true;
expectLinkErrors(
exe,
test_step,
.{
.contains = "error: missing library dependency: GNU ld script '/?/liba.so' requires 'libfoo.so', but file not found",
},
);
// TODO: A future enhancement could make this error message also mention
// the file that references the missing library.
expectLinkErrors(exe, test_step, .{
.stderr_contains = "error: unable to find dynamic system library 'foo' using strategy 'no_fallback'. searched paths:",
});
return test_step;
}
@ -2203,6 +2204,7 @@ fn testLdScriptAllowUndefinedVersion(b: *Build, opts: Options) *Step {
});
exe.linkLibrary(so);
exe.linkLibC();
exe.allow_so_scripts = true;
const run = addRunArtifact(exe);
run.expectStdErrEqual("3\n");
@ -2225,6 +2227,7 @@ fn testLdScriptDisallowUndefinedVersion(b: *Build, opts: Options) *Step {
const ld = b.addWriteFiles().add("add.ld", "VERSION { ADD_1.0 { global: add; sub; local: *; }; }");
so.setLinkerScript(ld);
so.linker_allow_undefined_version = false;
so.allow_so_scripts = true;
expectLinkErrors(
so,
@ -2721,86 +2724,110 @@ fn testRelocatableArchive(b: *Build, opts: Options) *Step {
fn testRelocatableEhFrame(b: *Build, opts: Options) *Step {
const test_step = addTestStep(b, "relocatable-eh-frame", opts);
{
const obj = addObject(b, opts, .{
.name = "obj1",
.cpp_source_bytes =
\\#include <stdexcept>
\\int try_me() {
\\ throw std::runtime_error("Oh no!");
\\}
,
});
addCppSourceBytes(obj,
\\extern int try_me();
\\int try_again() {
\\ return try_me();
\\}
, &.{});
obj.linkLibCpp();
const obj1 = addObject(b, opts, .{
.name = "obj1",
.cpp_source_bytes =
\\#include <stdexcept>
\\int try_me() {
\\ throw std::runtime_error("Oh no!");
\\}
,
});
obj1.linkLibCpp();
const obj2 = addObject(b, opts, .{
.name = "obj2",
.cpp_source_bytes =
\\extern int try_me();
\\int try_again() {
\\ return try_me();
\\}
,
});
obj2.linkLibCpp();
const exe = addExecutable(b, opts, .{ .name = "test1" });
addCppSourceBytes(exe,
\\#include <iostream>
\\#include <stdexcept>
\\extern int try_again();
\\int main() {
\\ try {
\\ try_again();
\\ } catch (const std::exception &e) {
\\ std::cout << "exception=" << e.what();
\\ }
\\ return 0;
\\}
, &.{});
exe.addObject(obj);
exe.linkLibCpp();
const obj = addObject(b, opts, .{ .name = "obj" });
obj.addObject(obj1);
obj.addObject(obj2);
obj.linkLibCpp();
const run = addRunArtifact(exe);
run.expectStdOutEqual("exception=Oh no!");
test_step.dependOn(&run.step);
}
const exe = addExecutable(b, opts, .{ .name = "test1" });
addCppSourceBytes(exe,
\\#include <iostream>
\\#include <stdexcept>
\\extern int try_again();
\\int main() {
\\ try {
\\ try_again();
\\ } catch (const std::exception &e) {
\\ std::cout << "exception=" << e.what();
\\ }
\\ return 0;
\\}
, &.{});
exe.addObject(obj);
exe.linkLibCpp();
{
// Let's make the object file COMDAT group heavy!
const obj = addObject(b, opts, .{
.name = "obj2",
.cpp_source_bytes =
\\#include <stdexcept>
\\int try_me() {
\\ throw std::runtime_error("Oh no!");
\\}
,
});
addCppSourceBytes(obj,
\\extern int try_me();
\\int try_again() {
\\ return try_me();
\\}
, &.{});
addCppSourceBytes(obj,
\\#include <iostream>
\\#include <stdexcept>
\\extern int try_again();
\\int main() {
\\ try {
\\ try_again();
\\ } catch (const std::exception &e) {
\\ std::cout << "exception=" << e.what();
\\ }
\\ return 0;
\\}
, &.{});
obj.linkLibCpp();
const run = addRunArtifact(exe);
run.expectStdOutEqual("exception=Oh no!");
test_step.dependOn(&run.step);
const exe = addExecutable(b, opts, .{ .name = "test2" });
exe.addObject(obj);
exe.linkLibCpp();
return test_step;
}
const run = addRunArtifact(exe);
run.expectStdOutEqual("exception=Oh no!");
test_step.dependOn(&run.step);
}
fn testRelocatableEhFrameComdatHeavy(b: *Build, opts: Options) *Step {
const test_step = addTestStep(b, "relocatable-eh-frame-comdat-heavy", opts);
const obj1 = addObject(b, opts, .{
.name = "obj1",
.cpp_source_bytes =
\\#include <stdexcept>
\\int try_me() {
\\ throw std::runtime_error("Oh no!");
\\}
,
});
obj1.linkLibCpp();
const obj2 = addObject(b, opts, .{
.name = "obj2",
.cpp_source_bytes =
\\extern int try_me();
\\int try_again() {
\\ return try_me();
\\}
,
});
obj2.linkLibCpp();
const obj3 = addObject(b, opts, .{
.name = "obj3",
.cpp_source_bytes =
\\#include <iostream>
\\#include <stdexcept>
\\extern int try_again();
\\int main() {
\\ try {
\\ try_again();
\\ } catch (const std::exception &e) {
\\ std::cout << "exception=" << e.what();
\\ }
\\ return 0;
\\}
,
});
obj3.linkLibCpp();
const obj = addObject(b, opts, .{ .name = "obj" });
obj.addObject(obj1);
obj.addObject(obj2);
obj.addObject(obj3);
obj.linkLibCpp();
const exe = addExecutable(b, opts, .{ .name = "test2" });
exe.addObject(obj);
exe.linkLibCpp();
const run = addRunArtifact(exe);
run.expectStdOutEqual("exception=Oh no!");
test_step.dependOn(&run.step);
return test_step;
}
@ -3730,11 +3757,15 @@ fn testTlsOffsetAlignment(b: *Build, opts: Options) *Step {
\\#include <pthread.h>
\\#include <dlfcn.h>
\\#include <assert.h>
\\#include <stdio.h>
\\void *(*verify)(void *);
\\
\\int main() {
\\ void *handle = dlopen("liba.so", RTLD_NOW);
\\ assert(handle);
\\ if (!handle) {
\\ fprintf(stderr, "dlopen failed: %s\n", dlerror());
\\ return 1;
\\ }
\\ *(void**)(&verify) = dlsym(handle, "verify");
\\ assert(verify);
\\
@ -3907,16 +3938,8 @@ fn testUnknownFileTypeError(b: *Build, opts: Options) *Step {
exe.linkLibrary(dylib);
exe.linkLibC();
// TODO: improve the test harness to be able to selectively match lines in error output
// while avoiding jankiness
// expectLinkErrors(exe, test_step, .{ .exact = &.{
// "error: invalid token in LD script: '\\x00\\x00\\x00\\x0c\\x00\\x00\\x00/usr/lib/dyld\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x0d' (0:989)",
// "note: while parsing /?/liba.dylib",
// "error: unexpected error: parsing input file failed with error InvalidLdScript",
// "note: while parsing /?/liba.dylib",
// } });
expectLinkErrors(exe, test_step, .{
.starts_with = "error: invalid token in LD script: '\\x00\\x00\\x00\\x0c\\x00\\x00\\x00/usr/lib/dyld\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x0d' (",
.contains = "error: failed to parse shared library: BadMagic",
});
return test_step;