elf: enable adding support for additional cpu archs

This commit is contained in:
Jakub Konka 2024-02-16 12:43:37 +01:00
parent 88d4b5cb18
commit fc7dd3e285
9 changed files with 863 additions and 565 deletions

View File

@ -594,6 +594,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/link/Elf/file.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf/gc.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf/relocatable.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf/relocation.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf/synthetic_sections.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/Archive.zig"

View File

@ -1357,6 +1357,10 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
error.RelaxFail, error.InvalidInstruction, error.CannotEncode => {
log.err("relaxing intructions failed; TODO this should be a fatal linker error", .{});
},
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
},
else => |e| return e,
};
try self.base.file.?.pwriteAll(code, file_offset);
@ -1366,7 +1370,14 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
try self.writePhdrTable();
try self.writeShdrTable();
try self.writeAtoms();
try self.writeSyntheticSections();
self.writeSyntheticSections() catch |err| switch (err) {
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
},
else => |e| return e,
};
if (self.entry_index == null and self.base.isExe()) {
log.debug("flushing. no_entry_point_found = true", .{});
@ -2032,12 +2043,19 @@ fn scanRelocs(self: *Elf) !void {
undefs.deinit();
}
if (self.zigObjectPtr()) |zig_object| {
try zig_object.scanRelocs(self, &undefs);
}
for (self.objects.items) |index| {
const object = self.file(index).?.object;
try object.scanRelocs(self, &undefs);
var objects = try std.ArrayList(File.Index).initCapacity(gpa, self.objects.items.len + 1);
defer objects.deinit();
if (self.zigObjectPtr()) |zo| objects.appendAssumeCapacity(zo.index);
objects.appendSliceAssumeCapacity(self.objects.items);
for (objects.items) |index| {
self.file(index).?.scanRelocs(self, &undefs) catch |err| switch (err) {
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
},
else => |e| return e,
};
}
try self.reportUndefinedSymbols(&undefs);
@ -4470,18 +4488,22 @@ fn writeAtoms(self: *Elf) !void {
defer gpa.free(in_code);
@memcpy(out_code, in_code);
if (shdr.sh_flags & elf.SHF_ALLOC == 0) {
try atom_ptr.resolveRelocsNonAlloc(self, out_code, &undefs);
} else {
atom_ptr.resolveRelocsAlloc(self, out_code) catch |err| switch (err) {
const res = if (shdr.sh_flags & elf.SHF_ALLOC == 0)
atom_ptr.resolveRelocsNonAlloc(self, out_code, &undefs)
else
atom_ptr.resolveRelocsAlloc(self, out_code);
_ = res catch |err| switch (err) {
// TODO
error.RelaxFail, error.InvalidInstruction, error.CannotEncode => {
log.err("relaxing intructions failed; TODO this should be a fatal linker error", .{});
},
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
},
else => |e| return e,
};
}
}
try self.base.file.?.pwriteAll(buffer, sh_offset);
}
@ -5271,24 +5293,26 @@ pub fn addRelaDynAssumeCapacity(self: *Elf, opts: RelaDyn) void {
fn sortRelaDyn(self: *Elf) void {
const Sort = struct {
fn rank(rel: elf.Elf64_Rela) u2 {
return switch (rel.r_type()) {
elf.R_X86_64_RELATIVE => 0,
elf.R_X86_64_IRELATIVE => 2,
fn rank(rel: elf.Elf64_Rela, ctx: *Elf) u2 {
const cpu_arch = ctx.getTarget().cpu.arch;
const r_type = rel.r_type();
const r_kind = relocation.decode(r_type, cpu_arch).?;
return switch (r_kind) {
.rel => 0,
.irel => 2,
else => 1,
};
}
pub fn lessThan(ctx: void, lhs: elf.Elf64_Rela, rhs: elf.Elf64_Rela) bool {
_ = ctx;
if (rank(lhs) == rank(rhs)) {
pub fn lessThan(ctx: *Elf, lhs: elf.Elf64_Rela, rhs: elf.Elf64_Rela) bool {
if (rank(lhs, ctx) == rank(rhs, ctx)) {
if (lhs.r_sym() == rhs.r_sym()) return lhs.r_offset < rhs.r_offset;
return lhs.r_sym() < rhs.r_sym();
}
return rank(lhs) < rank(rhs);
return rank(lhs, ctx) < rank(rhs, ctx);
}
};
mem.sort(elf.Elf64_Rela, self.rela_dyn.items, {}, Sort.lessThan);
mem.sort(elf.Elf64_Rela, self.rela_dyn.items, self, Sort.lessThan);
}
fn calcNumIRelativeRelocs(self: *Elf) usize {
@ -5667,6 +5691,13 @@ fn reportMissingLibraryError(
}
}
pub fn reportUnsupportedCpuArch(self: *Elf) error{OutOfMemory}!void {
var err = try self.addErrorWithNotes(0);
try err.addMsg(self, "fatal linker error: unsupported CPU architecture {s}", .{
@tagName(self.getTarget().cpu.arch),
});
}
pub fn reportParseError(
self: *Elf,
path: []const u8,
@ -5932,6 +5963,10 @@ pub fn lsearch(comptime T: type, haystack: []align(1) const T, predicate: anytyp
return i;
}
pub fn getTarget(self: Elf) std.Target {
return self.base.comp.root_mod.resolved_target.result;
}
/// The following three values are only observed at compile-time and used to emit a compile error
/// to remind the programmer to update expected maximum numbers of different program header types
/// so that we reserve enough space for the program header table up-front.
@ -6059,6 +6094,7 @@ const link = @import("../link.zig");
const lldMain = @import("../main.zig").lldMain;
const musl = @import("../musl.zig");
const relocatable = @import("Elf/relocatable.zig");
const relocation = @import("Elf/relocation.zig");
const target_util = @import("../target.zig");
const trace = @import("../tracy.zig").trace;
const synthetic_sections = @import("Elf/synthetic_sections.zig");

File diff suppressed because it is too large Load Diff

View File

@ -55,6 +55,7 @@ pub fn deinit(self: *Object, allocator: Allocator) void {
pub fn parse(self: *Object, elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
const cpu_arch = elf_file.getTarget().cpu.arch;
const handle = elf_file.fileHandle(self.file_handle);
try self.parseCommon(gpa, handle, elf_file);
@ -64,10 +65,13 @@ pub fn parse(self: *Object, elf_file: *Elf) !void {
for (self.shdrs.items, 0..) |shdr, i| {
const atom = elf_file.atom(self.atoms.items[i]) orelse continue;
if (!atom.flags.alive) continue;
if (shdr.sh_type == elf.SHT_X86_64_UNWIND or mem.eql(u8, atom.name(elf_file), ".eh_frame"))
if ((cpu_arch == .x86_64 and shdr.sh_type == elf.SHT_X86_64_UNWIND) or
mem.eql(u8, atom.name(elf_file), ".eh_frame"))
{
try self.parseEhFrame(gpa, handle, @as(u32, @intCast(i)), elf_file);
}
}
}
fn parseCommon(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file: *Elf) !void {
const offset = if (self.archive) |ar| ar.offset else 0;
@ -286,6 +290,10 @@ fn initOutputSection(self: Object, elf_file: *Elf, shdr: elf.Elf64_Shdr) error{O
}
break :blk name;
};
const @"type" = tt: {
if (elf_file.getTarget().cpu.arch == .x86_64 and
shdr.sh_type == elf.SHT_X86_64_UNWIND) break :tt elf.SHT_PROGBITS;
const @"type" = switch (shdr.sh_type) {
elf.SHT_NULL => unreachable,
elf.SHT_PROGBITS => blk: {
@ -295,9 +303,10 @@ fn initOutputSection(self: Object, elf_file: *Elf, shdr: elf.Elf64_Shdr) error{O
break :blk elf.SHT_FINI_ARRAY;
break :blk shdr.sh_type;
},
elf.SHT_X86_64_UNWIND => elf.SHT_PROGBITS,
else => shdr.sh_type,
};
break :tt @"type";
};
const flags = blk: {
var flags = shdr.sh_flags;
if (!elf_file.base.isRelocatable()) {
@ -596,9 +605,10 @@ pub fn markLive(self: *Object, elf_file: *Elf) void {
}
pub fn markEhFrameAtomsDead(self: Object, elf_file: *Elf) void {
const cpu_arch = elf_file.getTarget().cpu.arch;
for (self.atoms.items) |atom_index| {
const atom = elf_file.atom(atom_index) orelse continue;
const is_eh_frame = atom.inputShdr(elf_file).sh_type == elf.SHT_X86_64_UNWIND or
const is_eh_frame = (cpu_arch == .x86_64 and atom.inputShdr(elf_file).sh_type == elf.SHT_X86_64_UNWIND) or
mem.eql(u8, atom.name(elf_file), ".eh_frame");
if (atom.flags.alive and is_eh_frame) atom.flags.alive = false;
}

View File

@ -653,9 +653,10 @@ pub fn getDeclVAddr(
const this_sym = elf_file.symbol(this_sym_index);
const vaddr = this_sym.address(.{}, elf_file);
const parent_atom = elf_file.symbol(reloc_info.parent_atom_index).atom(elf_file).?;
const r_type = relocation.encode(.abs, elf_file.getTarget().cpu.arch);
try parent_atom.addReloc(elf_file, .{
.r_offset = reloc_info.offset,
.r_info = (@as(u64, @intCast(this_sym.esym_index)) << 32) | elf.R_X86_64_64,
.r_info = (@as(u64, @intCast(this_sym.esym_index)) << 32) | r_type,
.r_addend = reloc_info.addend,
});
return vaddr;
@ -671,9 +672,10 @@ pub fn getAnonDeclVAddr(
const sym = elf_file.symbol(sym_index);
const vaddr = sym.address(.{}, elf_file);
const parent_atom = elf_file.symbol(reloc_info.parent_atom_index).atom(elf_file).?;
const r_type = relocation.encode(.abs, elf_file.getTarget().cpu.arch);
try parent_atom.addReloc(elf_file, .{
.r_offset = reloc_info.offset,
.r_info = (@as(u64, @intCast(sym.esym_index)) << 32) | elf.R_X86_64_64,
.r_info = (@as(u64, @intCast(sym.esym_index)) << 32) | r_type,
.r_addend = reloc_info.addend,
});
return vaddr;
@ -1636,6 +1638,7 @@ const elf = std.elf;
const link = @import("../../link.zig");
const log = std.log.scoped(.link);
const mem = std.mem;
const relocation = @import("relocation.zig");
const trace = @import("../../tracy.zig").trace;
const std = @import("std");

View File

@ -302,26 +302,23 @@ pub fn calcEhFrameRelocs(elf_file: *Elf) usize {
}
fn resolveReloc(rec: anytype, sym: *const Symbol, rel: elf.Elf64_Rela, elf_file: *Elf, contents: []u8) !void {
const cpu_arch = elf_file.getTarget().cpu.arch;
const offset = std.math.cast(usize, rel.r_offset - rec.offset) orelse return error.Overflow;
const P = @as(i64, @intCast(rec.address(elf_file) + offset));
const S = @as(i64, @intCast(sym.address(.{}, elf_file)));
const P = math.cast(i64, rec.address(elf_file) + offset) orelse return error.Overflow;
const S = math.cast(i64, sym.address(.{}, elf_file)) orelse return error.Overflow;
const A = rel.r_addend;
relocs_log.debug(" {s}: {x}: [{x} => {x}] ({s})", .{
Atom.fmtRelocType(rel.r_type()),
relocation.fmtRelocType(rel.r_type(), cpu_arch),
offset,
P,
S + A,
sym.name(elf_file),
});
var where = contents[offset..];
switch (rel.r_type()) {
elf.R_X86_64_32 => std.mem.writeInt(i32, where[0..4], @as(i32, @truncate(S + A)), .little),
elf.R_X86_64_64 => std.mem.writeInt(i64, where[0..8], S + A, .little),
elf.R_X86_64_PC32 => std.mem.writeInt(i32, where[0..4], @as(i32, @intCast(S - P + A)), .little),
elf.R_X86_64_PC64 => std.mem.writeInt(i64, where[0..8], S - P + A, .little),
else => unreachable,
switch (cpu_arch) {
.x86_64 => x86_64.resolveReloc(rel, P, S + A, contents[offset..]),
else => return error.UnsupportedCpuArch,
}
}
@ -403,6 +400,7 @@ pub fn writeEhFrameObject(elf_file: *Elf, writer: anytype) !void {
}
fn emitReloc(elf_file: *Elf, rec: anytype, sym: *const Symbol, rel: elf.Elf64_Rela) elf.Elf64_Rela {
const cpu_arch = elf_file.getTarget().cpu.arch;
const r_offset = rec.address(elf_file) + rel.r_offset - rec.offset;
const r_type = rel.r_type();
var r_addend = rel.r_addend;
@ -418,7 +416,7 @@ fn emitReloc(elf_file: *Elf, rec: anytype, sym: *const Symbol, rel: elf.Elf64_Re
}
relocs_log.debug(" {s}: [{x} => {d}({s})] + {x}", .{
Atom.fmtRelocType(r_type),
relocation.fmtRelocType(r_type, cpu_arch),
r_offset,
r_sym,
sym.name(elf_file),
@ -541,10 +539,24 @@ const EH_PE = struct {
pub const omit = 0xFF;
};
const x86_64 = struct {
fn resolveReloc(rel: elf.Elf64_Rela, source: i64, target: i64, data: []u8) void {
switch (rel.r_type()) {
elf.R_X86_64_32 => std.mem.writeInt(i32, data[0..4], @as(i32, @truncate(target)), .little),
elf.R_X86_64_64 => std.mem.writeInt(i64, data[0..8], target, .little),
elf.R_X86_64_PC32 => std.mem.writeInt(i32, data[0..4], @as(i32, @intCast(target - source)), .little),
elf.R_X86_64_PC64 => std.mem.writeInt(i64, data[0..8], target - source, .little),
else => unreachable,
}
}
};
const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;
const math = std.math;
const relocs_log = std.log.scoped(.link_relocs);
const relocation = @import("relocation.zig");
const Allocator = std.mem.Allocator;
const Atom = @import("Atom.zig");

View File

@ -91,6 +91,13 @@ pub const File = union(enum) {
}
}
pub fn scanRelocs(file: File, elf_file: *Elf, undefs: anytype) !void {
switch (file) {
.linker_defined, .shared_object => unreachable,
inline else => |x| try x.scanRelocs(elf_file, undefs),
}
}
pub fn atoms(file: File) []const Atom.Index {
return switch (file) {
.linker_defined, .shared_object => &[0]Atom.Index{},

264
src/link/Elf/relocation.zig Normal file
View File

@ -0,0 +1,264 @@
pub const Kind = enum {
abs,
copy,
rel,
irel,
glob_dat,
jump_slot,
dtpmod,
dtpoff,
tpoff,
tlsdesc,
};
const x86_64_relocs = [_]struct { Kind, u32 }{
.{ .abs, elf.R_X86_64_64 },
.{ .copy, elf.R_X86_64_COPY },
.{ .rel, elf.R_X86_64_RELATIVE },
.{ .irel, elf.R_X86_64_IRELATIVE },
.{ .glob_dat, elf.R_X86_64_GLOB_DAT },
.{ .jump_slot, elf.R_X86_64_JUMP_SLOT },
.{ .dtpmod, elf.R_X86_64_DTPMOD64 },
.{ .dtpoff, elf.R_X86_64_DTPOFF64 },
.{ .tpoff, elf.R_X86_64_TPOFF64 },
.{ .tlsdesc, elf.R_X86_64_TLSDESC },
};
const aarch64_relocs = [_]struct { Kind, u32 }{
.{ .abs, elf.R_AARCH64_ABS64 },
.{ .copy, elf.R_AARCH64_COPY },
.{ .rel, elf.R_AARCH64_RELATIVE },
.{ .irel, elf.R_AARCH64_IRELATIVE },
.{ .glob_dat, elf.R_AARCH64_GLOB_DAT },
.{ .jump_slot, elf.R_AARCH64_JUMP_SLOT },
.{ .dtpmod, elf.R_AARCH64_TLS_DTPMOD },
.{ .dtpoff, elf.R_AARCH64_TLS_DTPREL },
.{ .tpoff, elf.R_AARCH64_TLS_TPREL },
.{ .tlsdesc, elf.R_AARCH64_TLSDESC },
};
pub fn decode(r_type: u32, cpu_arch: std.Target.Cpu.Arch) ?Kind {
const relocs = switch (cpu_arch) {
.x86_64 => &x86_64_relocs,
.aarch64 => &aarch64_relocs,
else => @panic("TODO unhandled cpu arch"),
};
inline for (relocs) |entry| {
if (entry[1] == r_type) return entry[0];
}
return null;
}
pub fn encode(comptime kind: Kind, cpu_arch: std.Target.Cpu.Arch) u32 {
const relocs = switch (cpu_arch) {
.x86_64 => &x86_64_relocs,
.aarch64 => &aarch64_relocs,
else => @panic("TODO unhandled cpu arch"),
};
inline for (relocs) |entry| {
if (entry[0] == kind) return entry[1];
}
unreachable;
}
const FormatRelocTypeCtx = struct {
r_type: u32,
cpu_arch: std.Target.Cpu.Arch,
};
pub fn fmtRelocType(r_type: u32, cpu_arch: std.Target.Cpu.Arch) std.fmt.Formatter(formatRelocType) {
return .{ .data = .{
.r_type = r_type,
.cpu_arch = cpu_arch,
} };
}
fn formatRelocType(
ctx: FormatRelocTypeCtx,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = unused_fmt_string;
_ = options;
const r_type = ctx.r_type;
const str = switch (ctx.cpu_arch) {
.x86_64 => switch (r_type) {
elf.R_X86_64_NONE => "R_X86_64_NONE",
elf.R_X86_64_64 => "R_X86_64_64",
elf.R_X86_64_PC32 => "R_X86_64_PC32",
elf.R_X86_64_GOT32 => "R_X86_64_GOT32",
elf.R_X86_64_PLT32 => "R_X86_64_PLT32",
elf.R_X86_64_COPY => "R_X86_64_COPY",
elf.R_X86_64_GLOB_DAT => "R_X86_64_GLOB_DAT",
elf.R_X86_64_JUMP_SLOT => "R_X86_64_JUMP_SLOT",
elf.R_X86_64_RELATIVE => "R_X86_64_RELATIVE",
elf.R_X86_64_GOTPCREL => "R_X86_64_GOTPCREL",
elf.R_X86_64_32 => "R_X86_64_32",
elf.R_X86_64_32S => "R_X86_64_32S",
elf.R_X86_64_16 => "R_X86_64_16",
elf.R_X86_64_PC16 => "R_X86_64_PC16",
elf.R_X86_64_8 => "R_X86_64_8",
elf.R_X86_64_PC8 => "R_X86_64_PC8",
elf.R_X86_64_DTPMOD64 => "R_X86_64_DTPMOD64",
elf.R_X86_64_DTPOFF64 => "R_X86_64_DTPOFF64",
elf.R_X86_64_TPOFF64 => "R_X86_64_TPOFF64",
elf.R_X86_64_TLSGD => "R_X86_64_TLSGD",
elf.R_X86_64_TLSLD => "R_X86_64_TLSLD",
elf.R_X86_64_DTPOFF32 => "R_X86_64_DTPOFF32",
elf.R_X86_64_GOTTPOFF => "R_X86_64_GOTTPOFF",
elf.R_X86_64_TPOFF32 => "R_X86_64_TPOFF32",
elf.R_X86_64_PC64 => "R_X86_64_PC64",
elf.R_X86_64_GOTOFF64 => "R_X86_64_GOTOFF64",
elf.R_X86_64_GOTPC32 => "R_X86_64_GOTPC32",
elf.R_X86_64_GOT64 => "R_X86_64_GOT64",
elf.R_X86_64_GOTPCREL64 => "R_X86_64_GOTPCREL64",
elf.R_X86_64_GOTPC64 => "R_X86_64_GOTPC64",
elf.R_X86_64_GOTPLT64 => "R_X86_64_GOTPLT64",
elf.R_X86_64_PLTOFF64 => "R_X86_64_PLTOFF64",
elf.R_X86_64_SIZE32 => "R_X86_64_SIZE32",
elf.R_X86_64_SIZE64 => "R_X86_64_SIZE64",
elf.R_X86_64_GOTPC32_TLSDESC => "R_X86_64_GOTPC32_TLSDESC",
elf.R_X86_64_TLSDESC_CALL => "R_X86_64_TLSDESC_CALL",
elf.R_X86_64_TLSDESC => "R_X86_64_TLSDESC",
elf.R_X86_64_IRELATIVE => "R_X86_64_IRELATIVE",
elf.R_X86_64_RELATIVE64 => "R_X86_64_RELATIVE64",
elf.R_X86_64_GOTPCRELX => "R_X86_64_GOTPCRELX",
elf.R_X86_64_REX_GOTPCRELX => "R_X86_64_REX_GOTPCRELX",
elf.R_X86_64_NUM => "R_X86_64_NUM",
else => "R_X86_64_UNKNOWN",
},
.aarch64 => switch (r_type) {
elf.R_AARCH64_NONE => "R_AARCH64_NONE",
elf.R_AARCH64_ABS64 => "R_AARCH64_ABS64",
elf.R_AARCH64_ABS32 => "R_AARCH64_ABS32",
elf.R_AARCH64_ABS16 => "R_AARCH64_ABS16",
elf.R_AARCH64_PREL64 => "R_AARCH64_PREL64",
elf.R_AARCH64_PREL32 => "R_AARCH64_PREL32",
elf.R_AARCH64_PREL16 => "R_AARCH64_PREL16",
elf.R_AARCH64_MOVW_UABS_G0 => "R_AARCH64_MOVW_UABS_G0",
elf.R_AARCH64_MOVW_UABS_G0_NC => "R_AARCH64_MOVW_UABS_G0_NC",
elf.R_AARCH64_MOVW_UABS_G1 => "R_AARCH64_MOVW_UABS_G1",
elf.R_AARCH64_MOVW_UABS_G1_NC => "R_AARCH64_MOVW_UABS_G1_NC",
elf.R_AARCH64_MOVW_UABS_G2 => "R_AARCH64_MOVW_UABS_G2",
elf.R_AARCH64_MOVW_UABS_G2_NC => "R_AARCH64_MOVW_UABS_G2_NC",
elf.R_AARCH64_MOVW_UABS_G3 => "R_AARCH64_MOVW_UABS_G3",
elf.R_AARCH64_MOVW_SABS_G0 => "R_AARCH64_MOVW_SABS_G0",
elf.R_AARCH64_MOVW_SABS_G1 => "R_AARCH64_MOVW_SABS_G1",
elf.R_AARCH64_MOVW_SABS_G2 => "R_AARCH64_MOVW_SABS_G2",
elf.R_AARCH64_LD_PREL_LO19 => "R_AARCH64_LD_PREL_LO19",
elf.R_AARCH64_ADR_PREL_LO21 => "R_AARCH64_ADR_PREL_LO21",
elf.R_AARCH64_ADR_PREL_PG_HI21 => "R_AARCH64_ADR_PREL_PG_HI21",
elf.R_AARCH64_ADR_PREL_PG_HI21_NC => "R_AARCH64_ADR_PREL_PG_HI21_NC",
elf.R_AARCH64_ADD_ABS_LO12_NC => "R_AARCH64_ADD_ABS_LO12_NC",
elf.R_AARCH64_LDST8_ABS_LO12_NC => "R_AARCH64_LDST8_ABS_LO12_NC",
elf.R_AARCH64_TSTBR14 => "R_AARCH64_TSTBR14",
elf.R_AARCH64_CONDBR19 => "R_AARCH64_CONDBR19",
elf.R_AARCH64_JUMP26 => "R_AARCH64_JUMP26",
elf.R_AARCH64_CALL26 => "R_AARCH64_CALL26",
elf.R_AARCH64_LDST16_ABS_LO12_NC => "R_AARCH64_LDST16_ABS_LO12_NC",
elf.R_AARCH64_LDST32_ABS_LO12_NC => "R_AARCH64_LDST32_ABS_LO12_NC",
elf.R_AARCH64_LDST64_ABS_LO12_NC => "R_AARCH64_LDST64_ABS_LO12_NC",
elf.R_AARCH64_MOVW_PREL_G0 => "R_AARCH64_MOVW_PREL_G0",
elf.R_AARCH64_MOVW_PREL_G0_NC => "R_AARCH64_MOVW_PREL_G0_NC",
elf.R_AARCH64_MOVW_PREL_G1 => "R_AARCH64_MOVW_PREL_G1",
elf.R_AARCH64_MOVW_PREL_G1_NC => "R_AARCH64_MOVW_PREL_G1_NC",
elf.R_AARCH64_MOVW_PREL_G2 => "R_AARCH64_MOVW_PREL_G2",
elf.R_AARCH64_MOVW_PREL_G2_NC => "R_AARCH64_MOVW_PREL_G2_NC",
elf.R_AARCH64_MOVW_PREL_G3 => "R_AARCH64_MOVW_PREL_G3",
elf.R_AARCH64_LDST128_ABS_LO12_NC => "R_AARCH64_LDST128_ABS_LO12_NC",
elf.R_AARCH64_MOVW_GOTOFF_G0 => "R_AARCH64_MOVW_GOTOFF_G0",
elf.R_AARCH64_MOVW_GOTOFF_G0_NC => "R_AARCH64_MOVW_GOTOFF_G0_NC",
elf.R_AARCH64_MOVW_GOTOFF_G1 => "R_AARCH64_MOVW_GOTOFF_G1",
elf.R_AARCH64_MOVW_GOTOFF_G1_NC => "R_AARCH64_MOVW_GOTOFF_G1_NC",
elf.R_AARCH64_MOVW_GOTOFF_G2 => "R_AARCH64_MOVW_GOTOFF_G2",
elf.R_AARCH64_MOVW_GOTOFF_G2_NC => "R_AARCH64_MOVW_GOTOFF_G2_NC",
elf.R_AARCH64_MOVW_GOTOFF_G3 => "R_AARCH64_MOVW_GOTOFF_G3",
elf.R_AARCH64_GOTREL64 => "R_AARCH64_GOTREL64",
elf.R_AARCH64_GOTREL32 => "R_AARCH64_GOTREL32",
elf.R_AARCH64_GOT_LD_PREL19 => "R_AARCH64_GOT_LD_PREL19",
elf.R_AARCH64_LD64_GOTOFF_LO15 => "R_AARCH64_LD64_GOTOFF_LO15",
elf.R_AARCH64_ADR_GOT_PAGE => "R_AARCH64_ADR_GOT_PAGE",
elf.R_AARCH64_LD64_GOT_LO12_NC => "R_AARCH64_LD64_GOT_LO12_NC",
elf.R_AARCH64_LD64_GOTPAGE_LO15 => "R_AARCH64_LD64_GOTPAGE_LO15",
elf.R_AARCH64_TLSGD_ADR_PREL21 => "R_AARCH64_TLSGD_ADR_PREL21",
elf.R_AARCH64_TLSGD_ADR_PAGE21 => "R_AARCH64_TLSGD_ADR_PAGE21",
elf.R_AARCH64_TLSGD_ADD_LO12_NC => "R_AARCH64_TLSGD_ADD_LO12_NC",
elf.R_AARCH64_TLSGD_MOVW_G1 => "R_AARCH64_TLSGD_MOVW_G1",
elf.R_AARCH64_TLSGD_MOVW_G0_NC => "R_AARCH64_TLSGD_MOVW_G0_NC",
elf.R_AARCH64_TLSLD_ADR_PREL21 => "R_AARCH64_TLSLD_ADR_PREL21",
elf.R_AARCH64_TLSLD_ADR_PAGE21 => "R_AARCH64_TLSLD_ADR_PAGE21",
elf.R_AARCH64_TLSLD_ADD_LO12_NC => "R_AARCH64_TLSLD_ADD_LO12_NC",
elf.R_AARCH64_TLSLD_MOVW_G1 => "R_AARCH64_TLSLD_MOVW_G1",
elf.R_AARCH64_TLSLD_MOVW_G0_NC => "R_AARCH64_TLSLD_MOVW_G0_NC",
elf.R_AARCH64_TLSLD_LD_PREL19 => "R_AARCH64_TLSLD_LD_PREL19",
elf.R_AARCH64_TLSLD_MOVW_DTPREL_G2 => "R_AARCH64_TLSLD_MOVW_DTPREL_G2",
elf.R_AARCH64_TLSLD_MOVW_DTPREL_G1 => "R_AARCH64_TLSLD_MOVW_DTPREL_G1",
elf.R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC => "R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC",
elf.R_AARCH64_TLSLD_MOVW_DTPREL_G0 => "R_AARCH64_TLSLD_MOVW_DTPREL_G0",
elf.R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC => "R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC",
elf.R_AARCH64_TLSLD_ADD_DTPREL_HI12 => "R_AARCH64_TLSLD_ADD_DTPREL_HI12",
elf.R_AARCH64_TLSLD_ADD_DTPREL_LO12 => "R_AARCH64_TLSLD_ADD_DTPREL_LO12",
elf.R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC => "R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC",
elf.R_AARCH64_TLSLD_LDST8_DTPREL_LO12 => "R_AARCH64_TLSLD_LDST8_DTPREL_LO12",
elf.R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC => "R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC",
elf.R_AARCH64_TLSLD_LDST16_DTPREL_LO12 => "R_AARCH64_TLSLD_LDST16_DTPREL_LO12",
elf.R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC => "R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC",
elf.R_AARCH64_TLSLD_LDST32_DTPREL_LO12 => "R_AARCH64_TLSLD_LDST32_DTPREL_LO12",
elf.R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC => "R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC",
elf.R_AARCH64_TLSLD_LDST64_DTPREL_LO12 => "R_AARCH64_TLSLD_LDST64_DTPREL_LO12",
elf.R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC => "R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC",
elf.R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 => "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1",
elf.R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC => "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC",
elf.R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 => "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21",
elf.R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC => "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC",
elf.R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 => "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19",
elf.R_AARCH64_TLSLE_MOVW_TPREL_G2 => "R_AARCH64_TLSLE_MOVW_TPREL_G2",
elf.R_AARCH64_TLSLE_MOVW_TPREL_G1 => "R_AARCH64_TLSLE_MOVW_TPREL_G1",
elf.R_AARCH64_TLSLE_MOVW_TPREL_G1_NC => "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC",
elf.R_AARCH64_TLSLE_MOVW_TPREL_G0 => "R_AARCH64_TLSLE_MOVW_TPREL_G0",
elf.R_AARCH64_TLSLE_MOVW_TPREL_G0_NC => "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC",
elf.R_AARCH64_TLSLE_ADD_TPREL_HI12 => "R_AARCH64_TLSLE_ADD_TPREL_HI12",
elf.R_AARCH64_TLSLE_ADD_TPREL_LO12 => "R_AARCH64_TLSLE_ADD_TPREL_LO12",
elf.R_AARCH64_TLSLE_ADD_TPREL_LO12_NC => "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC",
elf.R_AARCH64_TLSLE_LDST8_TPREL_LO12 => "R_AARCH64_TLSLE_LDST8_TPREL_LO12",
elf.R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC => "R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC",
elf.R_AARCH64_TLSLE_LDST16_TPREL_LO12 => "R_AARCH64_TLSLE_LDST16_TPREL_LO12",
elf.R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC => "R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC",
elf.R_AARCH64_TLSLE_LDST32_TPREL_LO12 => "R_AARCH64_TLSLE_LDST32_TPREL_LO12",
elf.R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC => "R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC",
elf.R_AARCH64_TLSLE_LDST64_TPREL_LO12 => "R_AARCH64_TLSLE_LDST64_TPREL_LO12",
elf.R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC => "R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC",
elf.R_AARCH64_TLSDESC_LD_PREL19 => "R_AARCH64_TLSDESC_LD_PREL19",
elf.R_AARCH64_TLSDESC_ADR_PREL21 => "R_AARCH64_TLSDESC_ADR_PREL21",
elf.R_AARCH64_TLSDESC_ADR_PAGE21 => "R_AARCH64_TLSDESC_ADR_PAGE21",
elf.R_AARCH64_TLSDESC_LD64_LO12 => "R_AARCH64_TLSDESC_LD64_LO12",
elf.R_AARCH64_TLSDESC_ADD_LO12 => "R_AARCH64_TLSDESC_ADD_LO12",
elf.R_AARCH64_TLSDESC_OFF_G1 => "R_AARCH64_TLSDESC_OFF_G1",
elf.R_AARCH64_TLSDESC_OFF_G0_NC => "R_AARCH64_TLSDESC_OFF_G0_NC",
elf.R_AARCH64_TLSDESC_LDR => "R_AARCH64_TLSDESC_LDR",
elf.R_AARCH64_TLSDESC_ADD => "R_AARCH64_TLSDESC_ADD",
elf.R_AARCH64_TLSDESC_CALL => "R_AARCH64_TLSDESC_CALL",
elf.R_AARCH64_TLSLE_LDST128_TPREL_LO12 => "R_AARCH64_TLSLE_LDST128_TPREL_LO12",
elf.R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC => "R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC",
elf.R_AARCH64_TLSLD_LDST128_DTPREL_LO12 => "R_AARCH64_TLSLD_LDST128_DTPREL_LO12",
elf.R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC => "R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC",
elf.R_AARCH64_COPY => "R_AARCH64_COPY",
elf.R_AARCH64_GLOB_DAT => "R_AARCH64_GLOB_DAT",
elf.R_AARCH64_JUMP_SLOT => "R_AARCH64_JUMP_SLOT",
elf.R_AARCH64_RELATIVE => "R_AARCH64_RELATIVE",
elf.R_AARCH64_TLS_DTPMOD => "R_AARCH64_TLS_DTPMOD",
elf.R_AARCH64_TLS_DTPREL => "R_AARCH64_TLS_DTPREL",
elf.R_AARCH64_TLS_TPREL => "R_AARCH64_TLS_TPREL",
elf.R_AARCH64_TLSDESC => "R_AARCH64_TLSDESC",
elf.R_AARCH64_IRELATIVE => "R_AARCH64_IRELATIVE",
else => "R_AARCH64_UNKNOWN",
},
else => unreachable,
};
try writer.print("{s}", .{str});
}
const assert = std.debug.assert;
const elf = std.elf;
const std = @import("std");

View File

@ -292,7 +292,7 @@ pub const ZigGotSection = struct {
zig_got.flags.dirty = false;
}
const entry_size: u16 = elf_file.archPtrWidthBytes();
const target = elf_file.base.comp.root_mod.resolved_target.result;
const target = elf_file.getTarget();
const endian = target.cpu.arch.endian();
const off = zig_got.entryOffset(index, elf_file);
const vaddr = zig_got.entryAddress(index, elf_file);
@ -354,13 +354,14 @@ pub const ZigGotSection = struct {
pub fn addRela(zig_got: ZigGotSection, elf_file: *Elf) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const cpu_arch = elf_file.getTarget().cpu.arch;
try elf_file.rela_dyn.ensureUnusedCapacity(gpa, zig_got.numRela());
for (zig_got.entries.items) |entry| {
const symbol = elf_file.symbol(entry);
const offset = symbol.zigGotAddress(elf_file);
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.type = elf.R_X86_64_RELATIVE,
.type = relocation.encode(.rel, cpu_arch),
.addend = @intCast(symbol.address(.{ .plt = false }, elf_file)),
});
}
@ -644,6 +645,7 @@ pub const GotSection = struct {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const is_dyn_lib = elf_file.base.isDynLib();
const cpu_arch = elf_file.getTarget().cpu.arch;
try elf_file.rela_dyn.ensureUnusedCapacity(gpa, got.numRela(elf_file));
for (got.entries.items) |entry| {
@ -660,14 +662,14 @@ pub const GotSection = struct {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.sym = extra.?.dynamic,
.type = elf.R_X86_64_GLOB_DAT,
.type = relocation.encode(.glob_dat, cpu_arch),
});
continue;
}
if (symbol.?.isIFunc(elf_file)) {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.type = elf.R_X86_64_IRELATIVE,
.type = relocation.encode(.irel, cpu_arch),
.addend = @intCast(symbol.?.address(.{ .plt = false }, elf_file)),
});
continue;
@ -677,7 +679,7 @@ pub const GotSection = struct {
{
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.type = elf.R_X86_64_RELATIVE,
.type = relocation.encode(.rel, cpu_arch),
.addend = @intCast(symbol.?.address(.{ .plt = false }, elf_file)),
});
}
@ -688,7 +690,7 @@ pub const GotSection = struct {
const offset = entry.address(elf_file);
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.type = elf.R_X86_64_DTPMOD64,
.type = relocation.encode(.dtpmod, cpu_arch),
});
}
},
@ -699,18 +701,18 @@ pub const GotSection = struct {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.sym = extra.?.dynamic,
.type = elf.R_X86_64_DTPMOD64,
.type = relocation.encode(.dtpmod, cpu_arch),
});
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset + 8,
.sym = extra.?.dynamic,
.type = elf.R_X86_64_DTPOFF64,
.type = relocation.encode(.dtpoff, cpu_arch),
});
} else if (is_dyn_lib) {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.sym = extra.?.dynamic,
.type = elf.R_X86_64_DTPMOD64,
.type = relocation.encode(.dtpmod, cpu_arch),
});
}
},
@ -721,12 +723,12 @@ pub const GotSection = struct {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.sym = extra.?.dynamic,
.type = elf.R_X86_64_TPOFF64,
.type = relocation.encode(.tpoff, cpu_arch),
});
} else if (is_dyn_lib) {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.type = elf.R_X86_64_TPOFF64,
.type = relocation.encode(.tpoff, cpu_arch),
.addend = @intCast(symbol.?.address(.{}, elf_file) - elf_file.tlsAddress()),
});
}
@ -737,7 +739,7 @@ pub const GotSection = struct {
elf_file.addRelaDynAssumeCapacity(.{
.offset = offset,
.sym = extra.?.dynamic,
.type = elf.R_X86_64_TLSDESC,
.type = relocation.encode(.tlsdesc, cpu_arch),
});
},
}
@ -914,6 +916,7 @@ pub const PltSection = struct {
pub fn addRela(plt: PltSection, elf_file: *Elf) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const cpu_arch = elf_file.getTarget().cpu.arch;
try elf_file.rela_plt.ensureUnusedCapacity(gpa, plt.numRela());
for (plt.symbols.items) |sym_index| {
const sym = elf_file.symbol(sym_index);
@ -921,7 +924,7 @@ pub const PltSection = struct {
const extra = sym.extra(elf_file).?;
const r_offset = sym.gotPltAddress(elf_file);
const r_sym: u64 = extra.dynamic;
const r_type: u32 = elf.R_X86_64_JUMP_SLOT;
const r_type = relocation.encode(.jump_slot, cpu_arch);
elf_file.rela_plt.appendAssumeCapacity(.{
.r_offset = r_offset,
.r_info = (r_sym << 32) | r_type,
@ -1154,6 +1157,7 @@ pub const CopyRelSection = struct {
pub fn addRela(copy_rel: CopyRelSection, elf_file: *Elf) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const cpu_arch = elf_file.getTarget().cpu.arch;
try elf_file.rela_dyn.ensureUnusedCapacity(gpa, copy_rel.numRela());
for (copy_rel.symbols.items) |sym_index| {
const sym = elf_file.symbol(sym_index);
@ -1162,7 +1166,7 @@ pub const CopyRelSection = struct {
elf_file.addRelaDynAssumeCapacity(.{
.offset = sym.address(.{}, elf_file),
.sym = extra.dynamic,
.type = elf.R_X86_64_COPY,
.type = relocation.encode(.copy, cpu_arch),
});
}
}
@ -1612,7 +1616,7 @@ pub const ComdatGroupSection = struct {
fn writeInt(value: anytype, elf_file: *Elf, writer: anytype) !void {
const entry_size = elf_file.archPtrWidthBytes();
const target = elf_file.base.comp.root_mod.resolved_target.result;
const target = elf_file.getTarget();
const endian = target.cpu.arch.endian();
switch (entry_size) {
2 => try writer.writeInt(u16, @intCast(value), endian),
@ -1627,6 +1631,7 @@ const builtin = @import("builtin");
const elf = std.elf;
const mem = std.mem;
const log = std.log.scoped(.link);
const relocation = @import("relocation.zig");
const std = @import("std");
const Allocator = std.mem.Allocator;