Merge pull request #10451 from ziglang/cache-mode

stage2: introduce CacheMode
This commit is contained in:
Andrew Kelley 2022-01-03 16:50:29 -05:00 committed by GitHub
commit 81fa31c054
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
45 changed files with 2046 additions and 1438 deletions

View File

@ -1361,7 +1361,7 @@ pub const Dir = struct {
.share_access = share_access,
.creation = creation,
.io_mode = .blocking,
.open_dir = true,
.filter = .dir_only,
}) catch |er| switch (er) {
error.WouldBlock => unreachable,
else => |e2| return e2,

View File

@ -401,7 +401,7 @@ pub fn Watch(comptime V: type) type {
.access_mask = windows.FILE_LIST_DIRECTORY,
.creation = windows.FILE_OPEN,
.io_mode = .evented,
.open_dir = true,
.filter = .dir_only,
});
errdefer windows.CloseHandle(dir_handle);

View File

@ -1353,7 +1353,7 @@ fn openOptionsFromFlags(flags: u32) windows.OpenFileOptions {
access_mask |= w.GENERIC_READ | w.GENERIC_WRITE;
}
const open_dir: bool = flags & O.DIRECTORY != 0;
const filter: windows.OpenFileOptions.Filter = if (flags & O.DIRECTORY != 0) .dir_only else .file_only;
const follow_symlinks: bool = flags & O.NOFOLLOW == 0;
const creation: w.ULONG = blk: {
@ -1369,7 +1369,7 @@ fn openOptionsFromFlags(flags: u32) windows.OpenFileOptions {
.access_mask = access_mask,
.io_mode = .blocking,
.creation = creation,
.open_dir = open_dir,
.filter = filter,
.follow_symlinks = follow_symlinks,
};
}
@ -2324,6 +2324,7 @@ pub fn renameatW(
.access_mask = windows.SYNCHRONIZE | windows.GENERIC_WRITE | windows.DELETE,
.creation = windows.FILE_OPEN,
.io_mode = .blocking,
.filter = .any, // This function is supposed to rename both files and directories.
}) catch |err| switch (err) {
error.WouldBlock => unreachable, // Not possible without `.share_access_nonblocking = true`.
else => |e| return e,
@ -2435,7 +2436,7 @@ pub fn mkdiratW(dir_fd: fd_t, sub_path_w: []const u16, mode: u32) MakeDirError!v
.access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE,
.creation = windows.FILE_CREATE,
.io_mode = .blocking,
.open_dir = true,
.filter = .dir_only,
}) catch |err| switch (err) {
error.IsDir => unreachable,
error.PipeBusy => unreachable,
@ -2511,7 +2512,7 @@ pub fn mkdirW(dir_path_w: []const u16, mode: u32) MakeDirError!void {
.access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE,
.creation = windows.FILE_CREATE,
.io_mode = .blocking,
.open_dir = true,
.filter = .dir_only,
}) catch |err| switch (err) {
error.IsDir => unreachable,
error.PipeBusy => unreachable,
@ -4693,7 +4694,7 @@ pub fn realpathW(pathname: []const u16, out_buffer: *[MAX_PATH_BYTES]u8) RealPat
.share_access = share_access,
.creation = creation,
.io_mode = .blocking,
.open_dir = true,
.filter = .dir_only,
}) catch |er| switch (er) {
error.WouldBlock => unreachable,
else => |e2| return e2,

View File

@ -53,17 +53,26 @@ pub const OpenFileOptions = struct {
io_mode: std.io.ModeOverride,
/// If true, tries to open path as a directory.
/// Defaults to false.
open_dir: bool = false,
filter: Filter = .file_only,
/// If false, tries to open path as a reparse point without dereferencing it.
/// Defaults to true.
follow_symlinks: bool = true,
pub const Filter = enum {
/// Causes `OpenFile` to return `error.IsDir` if the opened handle would be a directory.
file_only,
/// Causes `OpenFile` to return `error.NotDir` if the opened handle would be a file.
dir_only,
/// `OpenFile` does not discriminate between opening files and directories.
any,
};
};
pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HANDLE {
if (mem.eql(u16, sub_path_w, &[_]u16{'.'}) and !options.open_dir) {
if (mem.eql(u16, sub_path_w, &[_]u16{'.'}) and options.filter == .file_only) {
return error.IsDir;
}
if (mem.eql(u16, sub_path_w, &[_]u16{ '.', '.' }) and !options.open_dir) {
if (mem.eql(u16, sub_path_w, &[_]u16{ '.', '.' }) and options.filter == .file_only) {
return error.IsDir;
}
@ -87,7 +96,11 @@ pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HAN
};
var io: IO_STATUS_BLOCK = undefined;
const blocking_flag: ULONG = if (options.io_mode == .blocking) FILE_SYNCHRONOUS_IO_NONALERT else 0;
const file_or_dir_flag: ULONG = if (options.open_dir) FILE_DIRECTORY_FILE else FILE_NON_DIRECTORY_FILE;
const file_or_dir_flag: ULONG = switch (options.filter) {
.file_only => FILE_NON_DIRECTORY_FILE,
.dir_only => FILE_DIRECTORY_FILE,
.any => 0,
};
// If we're not following symlinks, we need to ensure we don't pass in any synchronization flags such as FILE_SYNCHRONOUS_IO_NONALERT.
const flags: ULONG = if (options.follow_symlinks) file_or_dir_flag | blocking_flag else file_or_dir_flag | FILE_OPEN_REPARSE_POINT;
@ -695,7 +708,7 @@ pub fn CreateSymbolicLink(
.dir = dir,
.creation = FILE_CREATE,
.io_mode = .blocking,
.open_dir = is_directory,
.filter = if (is_directory) .dir_only else .file_only,
}) catch |err| switch (err) {
error.IsDir => return error.PathAlreadyExists,
error.NotDir => unreachable,

File diff suppressed because it is too large Load Diff

View File

@ -2,31 +2,36 @@
// * @panic, if value can not be represented
// - absvXi4_generic for unoptimized version
fn absvXi_generic(comptime ST: type) fn (a: ST) callconv(.C) ST {
return struct {
fn f(a: ST) callconv(.C) ST {
const UT = switch (ST) {
i32 => u32,
i64 => u64,
i128 => u128,
else => unreachable,
};
// taken from Bit Twiddling Hacks
// compute the integer absolute value (abs) without branching
var x: ST = a;
const N: UT = @bitSizeOf(ST);
const sign: ST = a >> N - 1;
x +%= sign;
x ^= sign;
if (x < 0)
@panic("compiler_rt absv: overflow");
return x;
}
}.f;
inline fn absvXi(comptime ST: type, a: ST) ST {
const UT = switch (ST) {
i32 => u32,
i64 => u64,
i128 => u128,
else => unreachable,
};
// taken from Bit Twiddling Hacks
// compute the integer absolute value (abs) without branching
var x: ST = a;
const N: UT = @bitSizeOf(ST);
const sign: ST = a >> N - 1;
x +%= sign;
x ^= sign;
if (x < 0)
@panic("compiler_rt absv: overflow");
return x;
}
pub fn __absvsi2(a: i32) callconv(.C) i32 {
return absvXi(i32, a);
}
pub fn __absvdi2(a: i64) callconv(.C) i64 {
return absvXi(i64, a);
}
pub fn __absvti2(a: i128) callconv(.C) i128 {
return absvXi(i128, a);
}
pub const __absvsi2 = absvXi_generic(i32);
pub const __absvdi2 = absvXi_generic(i64);
pub const __absvti2 = absvXi_generic(i128);
test {
_ = @import("absvsi2_test.zig");

View File

@ -119,225 +119,311 @@ fn __atomic_compare_exchange(
return 0;
}
// Specialized versions of the GCC atomic builtin functions.
// LLVM emits those iff the object size is known and the pointers are correctly
// aligned.
inline fn atomic_load_N(comptime T: type, src: *T, model: i32) T {
_ = model;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(src));
defer sl.release();
return src.*;
} else {
return @atomicLoad(T, src, .SeqCst);
}
}
fn __atomic_load_1(src: *u8, model: i32) callconv(.C) u8 {
return atomic_load_N(u8, src, model);
}
fn __atomic_load_2(src: *u16, model: i32) callconv(.C) u16 {
return atomic_load_N(u16, src, model);
}
fn __atomic_load_4(src: *u32, model: i32) callconv(.C) u32 {
return atomic_load_N(u32, src, model);
}
fn __atomic_load_8(src: *u64, model: i32) callconv(.C) u64 {
return atomic_load_N(u64, src, model);
}
inline fn atomic_store_N(comptime T: type, dst: *T, value: T, model: i32) void {
_ = model;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(dst));
defer sl.release();
dst.* = value;
} else {
@atomicStore(T, dst, value, .SeqCst);
}
}
fn __atomic_store_1(dst: *u8, value: u8, model: i32) callconv(.C) void {
return atomic_store_N(u8, dst, value, model);
}
fn __atomic_store_2(dst: *u16, value: u16, model: i32) callconv(.C) void {
return atomic_store_N(u16, dst, value, model);
}
fn __atomic_store_4(dst: *u32, value: u32, model: i32) callconv(.C) void {
return atomic_store_N(u32, dst, value, model);
}
fn __atomic_store_8(dst: *u64, value: u64, model: i32) callconv(.C) void {
return atomic_store_N(u64, dst, value, model);
}
inline fn atomic_exchange_N(comptime T: type, ptr: *T, val: T, model: i32) T {
_ = model;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(ptr));
defer sl.release();
const value = ptr.*;
ptr.* = val;
return value;
} else {
return @atomicRmw(T, ptr, .Xchg, val, .SeqCst);
}
}
fn __atomic_exchange_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
return atomic_exchange_N(u8, ptr, val, model);
}
fn __atomic_exchange_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
return atomic_exchange_N(u16, ptr, val, model);
}
fn __atomic_exchange_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
return atomic_exchange_N(u32, ptr, val, model);
}
fn __atomic_exchange_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
return atomic_exchange_N(u64, ptr, val, model);
}
inline fn atomic_compare_exchange_N(
comptime T: type,
ptr: *T,
expected: *T,
desired: T,
success: i32,
failure: i32,
) i32 {
_ = success;
_ = failure;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(ptr));
defer sl.release();
const value = ptr.*;
if (value == expected.*) {
ptr.* = desired;
return 1;
}
expected.* = value;
return 0;
} else {
if (@cmpxchgStrong(T, ptr, expected.*, desired, .SeqCst, .SeqCst)) |old_value| {
expected.* = old_value;
return 0;
}
return 1;
}
}
fn __atomic_compare_exchange_1(ptr: *u8, expected: *u8, desired: u8, success: i32, failure: i32) callconv(.C) i32 {
return atomic_compare_exchange_N(u8, ptr, expected, desired, success, failure);
}
fn __atomic_compare_exchange_2(ptr: *u16, expected: *u16, desired: u16, success: i32, failure: i32) callconv(.C) i32 {
return atomic_compare_exchange_N(u16, ptr, expected, desired, success, failure);
}
fn __atomic_compare_exchange_4(ptr: *u32, expected: *u32, desired: u32, success: i32, failure: i32) callconv(.C) i32 {
return atomic_compare_exchange_N(u32, ptr, expected, desired, success, failure);
}
fn __atomic_compare_exchange_8(ptr: *u64, expected: *u64, desired: u64, success: i32, failure: i32) callconv(.C) i32 {
return atomic_compare_exchange_N(u64, ptr, expected, desired, success, failure);
}
inline fn fetch_op_N(comptime T: type, comptime op: std.builtin.AtomicRmwOp, ptr: *T, val: T, model: i32) T {
_ = model;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(ptr));
defer sl.release();
const value = ptr.*;
ptr.* = switch (op) {
.Add => value +% val,
.Sub => value -% val,
.And => value & val,
.Nand => ~(value & val),
.Or => value | val,
.Xor => value ^ val,
else => @compileError("unsupported atomic op"),
};
return value;
}
return @atomicRmw(T, ptr, op, val, .SeqCst);
}
fn __atomic_fetch_add_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
return fetch_op_N(u8, .Add, ptr, val, model);
}
fn __atomic_fetch_add_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
return fetch_op_N(u16, .Add, ptr, val, model);
}
fn __atomic_fetch_add_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
return fetch_op_N(u32, .Add, ptr, val, model);
}
fn __atomic_fetch_add_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
return fetch_op_N(u64, .Add, ptr, val, model);
}
fn __atomic_fetch_sub_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
return fetch_op_N(u8, .Sub, ptr, val, model);
}
fn __atomic_fetch_sub_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
return fetch_op_N(u16, .Sub, ptr, val, model);
}
fn __atomic_fetch_sub_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
return fetch_op_N(u32, .Sub, ptr, val, model);
}
fn __atomic_fetch_sub_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
return fetch_op_N(u64, .Sub, ptr, val, model);
}
fn __atomic_fetch_and_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
return fetch_op_N(u8, .And, ptr, val, model);
}
fn __atomic_fetch_and_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
return fetch_op_N(u16, .And, ptr, val, model);
}
fn __atomic_fetch_and_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
return fetch_op_N(u32, .And, ptr, val, model);
}
fn __atomic_fetch_and_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
return fetch_op_N(u64, .And, ptr, val, model);
}
fn __atomic_fetch_or_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
return fetch_op_N(u8, .Or, ptr, val, model);
}
fn __atomic_fetch_or_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
return fetch_op_N(u16, .Or, ptr, val, model);
}
fn __atomic_fetch_or_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
return fetch_op_N(u32, .Or, ptr, val, model);
}
fn __atomic_fetch_or_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
return fetch_op_N(u64, .Or, ptr, val, model);
}
fn __atomic_fetch_xor_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
return fetch_op_N(u8, .Xor, ptr, val, model);
}
fn __atomic_fetch_xor_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
return fetch_op_N(u16, .Xor, ptr, val, model);
}
fn __atomic_fetch_xor_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
return fetch_op_N(u32, .Xor, ptr, val, model);
}
fn __atomic_fetch_xor_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
return fetch_op_N(u64, .Xor, ptr, val, model);
}
fn __atomic_fetch_nand_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
return fetch_op_N(u8, .Nand, ptr, val, model);
}
fn __atomic_fetch_nand_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
return fetch_op_N(u16, .Nand, ptr, val, model);
}
fn __atomic_fetch_nand_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
return fetch_op_N(u32, .Nand, ptr, val, model);
}
fn __atomic_fetch_nand_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
return fetch_op_N(u64, .Nand, ptr, val, model);
}
comptime {
if (supports_atomic_ops) {
@export(__atomic_load, .{ .name = "__atomic_load", .linkage = linkage });
@export(__atomic_store, .{ .name = "__atomic_store", .linkage = linkage });
@export(__atomic_exchange, .{ .name = "__atomic_exchange", .linkage = linkage });
@export(__atomic_compare_exchange, .{ .name = "__atomic_compare_exchange", .linkage = linkage });
}
}
// Specialized versions of the GCC atomic builtin functions.
// LLVM emits those iff the object size is known and the pointers are correctly
// aligned.
fn atomicLoadFn(comptime T: type) fn (*T, i32) callconv(.C) T {
return struct {
fn atomic_load_N(src: *T, model: i32) callconv(.C) T {
_ = model;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(src));
defer sl.release();
return src.*;
} else {
return @atomicLoad(T, src, .SeqCst);
}
}
}.atomic_load_N;
}
comptime {
if (supports_atomic_ops) {
const atomicLoad_u8 = atomicLoadFn(u8);
const atomicLoad_u16 = atomicLoadFn(u16);
const atomicLoad_u32 = atomicLoadFn(u32);
const atomicLoad_u64 = atomicLoadFn(u64);
@export(atomicLoad_u8, .{ .name = "__atomic_load_1", .linkage = linkage });
@export(atomicLoad_u16, .{ .name = "__atomic_load_2", .linkage = linkage });
@export(atomicLoad_u32, .{ .name = "__atomic_load_4", .linkage = linkage });
@export(atomicLoad_u64, .{ .name = "__atomic_load_8", .linkage = linkage });
}
}
fn atomicStoreFn(comptime T: type) fn (*T, T, i32) callconv(.C) void {
return struct {
fn atomic_store_N(dst: *T, value: T, model: i32) callconv(.C) void {
_ = model;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(dst));
defer sl.release();
dst.* = value;
} else {
@atomicStore(T, dst, value, .SeqCst);
}
}
}.atomic_store_N;
}
comptime {
if (supports_atomic_ops) {
const atomicStore_u8 = atomicStoreFn(u8);
const atomicStore_u16 = atomicStoreFn(u16);
const atomicStore_u32 = atomicStoreFn(u32);
const atomicStore_u64 = atomicStoreFn(u64);
@export(atomicStore_u8, .{ .name = "__atomic_store_1", .linkage = linkage });
@export(atomicStore_u16, .{ .name = "__atomic_store_2", .linkage = linkage });
@export(atomicStore_u32, .{ .name = "__atomic_store_4", .linkage = linkage });
@export(atomicStore_u64, .{ .name = "__atomic_store_8", .linkage = linkage });
}
}
fn atomicExchangeFn(comptime T: type) fn (*T, T, i32) callconv(.C) T {
return struct {
fn atomic_exchange_N(ptr: *T, val: T, model: i32) callconv(.C) T {
_ = model;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(ptr));
defer sl.release();
const value = ptr.*;
ptr.* = val;
return value;
} else {
return @atomicRmw(T, ptr, .Xchg, val, .SeqCst);
}
}
}.atomic_exchange_N;
}
comptime {
if (supports_atomic_ops) {
const atomicExchange_u8 = atomicExchangeFn(u8);
const atomicExchange_u16 = atomicExchangeFn(u16);
const atomicExchange_u32 = atomicExchangeFn(u32);
const atomicExchange_u64 = atomicExchangeFn(u64);
@export(atomicExchange_u8, .{ .name = "__atomic_exchange_1", .linkage = linkage });
@export(atomicExchange_u16, .{ .name = "__atomic_exchange_2", .linkage = linkage });
@export(atomicExchange_u32, .{ .name = "__atomic_exchange_4", .linkage = linkage });
@export(atomicExchange_u64, .{ .name = "__atomic_exchange_8", .linkage = linkage });
}
}
fn atomicCompareExchangeFn(comptime T: type) fn (*T, *T, T, i32, i32) callconv(.C) i32 {
return struct {
fn atomic_compare_exchange_N(ptr: *T, expected: *T, desired: T, success: i32, failure: i32) callconv(.C) i32 {
_ = success;
_ = failure;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(ptr));
defer sl.release();
const value = ptr.*;
if (value == expected.*) {
ptr.* = desired;
return 1;
}
expected.* = value;
return 0;
} else {
if (@cmpxchgStrong(T, ptr, expected.*, desired, .SeqCst, .SeqCst)) |old_value| {
expected.* = old_value;
return 0;
}
return 1;
}
}
}.atomic_compare_exchange_N;
}
comptime {
if (supports_atomic_ops) {
const atomicCompareExchange_u8 = atomicCompareExchangeFn(u8);
const atomicCompareExchange_u16 = atomicCompareExchangeFn(u16);
const atomicCompareExchange_u32 = atomicCompareExchangeFn(u32);
const atomicCompareExchange_u64 = atomicCompareExchangeFn(u64);
@export(atomicCompareExchange_u8, .{ .name = "__atomic_compare_exchange_1", .linkage = linkage });
@export(atomicCompareExchange_u16, .{ .name = "__atomic_compare_exchange_2", .linkage = linkage });
@export(atomicCompareExchange_u32, .{ .name = "__atomic_compare_exchange_4", .linkage = linkage });
@export(atomicCompareExchange_u64, .{ .name = "__atomic_compare_exchange_8", .linkage = linkage });
}
}
fn fetchFn(comptime T: type, comptime op: std.builtin.AtomicRmwOp) fn (*T, T, i32) callconv(.C) T {
return struct {
pub fn fetch_op_N(ptr: *T, val: T, model: i32) callconv(.C) T {
_ = model;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(ptr));
defer sl.release();
const value = ptr.*;
ptr.* = switch (op) {
.Add => value +% val,
.Sub => value -% val,
.And => value & val,
.Nand => ~(value & val),
.Or => value | val,
.Xor => value ^ val,
else => @compileError("unsupported atomic op"),
};
return value;
}
return @atomicRmw(T, ptr, op, val, .SeqCst);
}
}.fetch_op_N;
}
comptime {
if (supports_atomic_ops) {
const fetch_add_u8 = fetchFn(u8, .Add);
const fetch_add_u16 = fetchFn(u16, .Add);
const fetch_add_u32 = fetchFn(u32, .Add);
const fetch_add_u64 = fetchFn(u64, .Add);
@export(fetch_add_u8, .{ .name = "__atomic_fetch_add_1", .linkage = linkage });
@export(fetch_add_u16, .{ .name = "__atomic_fetch_add_2", .linkage = linkage });
@export(fetch_add_u32, .{ .name = "__atomic_fetch_add_4", .linkage = linkage });
@export(fetch_add_u64, .{ .name = "__atomic_fetch_add_8", .linkage = linkage });
const fetch_sub_u8 = fetchFn(u8, .Sub);
const fetch_sub_u16 = fetchFn(u16, .Sub);
const fetch_sub_u32 = fetchFn(u32, .Sub);
const fetch_sub_u64 = fetchFn(u64, .Sub);
@export(fetch_sub_u8, .{ .name = "__atomic_fetch_sub_1", .linkage = linkage });
@export(fetch_sub_u16, .{ .name = "__atomic_fetch_sub_2", .linkage = linkage });
@export(fetch_sub_u32, .{ .name = "__atomic_fetch_sub_4", .linkage = linkage });
@export(fetch_sub_u64, .{ .name = "__atomic_fetch_sub_8", .linkage = linkage });
const fetch_and_u8 = fetchFn(u8, .And);
const fetch_and_u16 = fetchFn(u16, .And);
const fetch_and_u32 = fetchFn(u32, .And);
const fetch_and_u64 = fetchFn(u64, .And);
@export(fetch_and_u8, .{ .name = "__atomic_fetch_and_1", .linkage = linkage });
@export(fetch_and_u16, .{ .name = "__atomic_fetch_and_2", .linkage = linkage });
@export(fetch_and_u32, .{ .name = "__atomic_fetch_and_4", .linkage = linkage });
@export(fetch_and_u64, .{ .name = "__atomic_fetch_and_8", .linkage = linkage });
const fetch_or_u8 = fetchFn(u8, .Or);
const fetch_or_u16 = fetchFn(u16, .Or);
const fetch_or_u32 = fetchFn(u32, .Or);
const fetch_or_u64 = fetchFn(u64, .Or);
@export(fetch_or_u8, .{ .name = "__atomic_fetch_or_1", .linkage = linkage });
@export(fetch_or_u16, .{ .name = "__atomic_fetch_or_2", .linkage = linkage });
@export(fetch_or_u32, .{ .name = "__atomic_fetch_or_4", .linkage = linkage });
@export(fetch_or_u64, .{ .name = "__atomic_fetch_or_8", .linkage = linkage });
const fetch_xor_u8 = fetchFn(u8, .Xor);
const fetch_xor_u16 = fetchFn(u16, .Xor);
const fetch_xor_u32 = fetchFn(u32, .Xor);
const fetch_xor_u64 = fetchFn(u64, .Xor);
@export(fetch_xor_u8, .{ .name = "__atomic_fetch_xor_1", .linkage = linkage });
@export(fetch_xor_u16, .{ .name = "__atomic_fetch_xor_2", .linkage = linkage });
@export(fetch_xor_u32, .{ .name = "__atomic_fetch_xor_4", .linkage = linkage });
@export(fetch_xor_u64, .{ .name = "__atomic_fetch_xor_8", .linkage = linkage });
const fetch_nand_u8 = fetchFn(u8, .Nand);
const fetch_nand_u16 = fetchFn(u16, .Nand);
const fetch_nand_u32 = fetchFn(u32, .Nand);
const fetch_nand_u64 = fetchFn(u64, .Nand);
@export(fetch_nand_u8, .{ .name = "__atomic_fetch_nand_1", .linkage = linkage });
@export(fetch_nand_u16, .{ .name = "__atomic_fetch_nand_2", .linkage = linkage });
@export(fetch_nand_u32, .{ .name = "__atomic_fetch_nand_4", .linkage = linkage });
@export(fetch_nand_u64, .{ .name = "__atomic_fetch_nand_8", .linkage = linkage });
@export(__atomic_fetch_add_1, .{ .name = "__atomic_fetch_add_1", .linkage = linkage });
@export(__atomic_fetch_add_2, .{ .name = "__atomic_fetch_add_2", .linkage = linkage });
@export(__atomic_fetch_add_4, .{ .name = "__atomic_fetch_add_4", .linkage = linkage });
@export(__atomic_fetch_add_8, .{ .name = "__atomic_fetch_add_8", .linkage = linkage });
@export(__atomic_fetch_sub_1, .{ .name = "__atomic_fetch_sub_1", .linkage = linkage });
@export(__atomic_fetch_sub_2, .{ .name = "__atomic_fetch_sub_2", .linkage = linkage });
@export(__atomic_fetch_sub_4, .{ .name = "__atomic_fetch_sub_4", .linkage = linkage });
@export(__atomic_fetch_sub_8, .{ .name = "__atomic_fetch_sub_8", .linkage = linkage });
@export(__atomic_fetch_and_1, .{ .name = "__atomic_fetch_and_1", .linkage = linkage });
@export(__atomic_fetch_and_2, .{ .name = "__atomic_fetch_and_2", .linkage = linkage });
@export(__atomic_fetch_and_4, .{ .name = "__atomic_fetch_and_4", .linkage = linkage });
@export(__atomic_fetch_and_8, .{ .name = "__atomic_fetch_and_8", .linkage = linkage });
@export(__atomic_fetch_or_1, .{ .name = "__atomic_fetch_or_1", .linkage = linkage });
@export(__atomic_fetch_or_2, .{ .name = "__atomic_fetch_or_2", .linkage = linkage });
@export(__atomic_fetch_or_4, .{ .name = "__atomic_fetch_or_4", .linkage = linkage });
@export(__atomic_fetch_or_8, .{ .name = "__atomic_fetch_or_8", .linkage = linkage });
@export(__atomic_fetch_xor_1, .{ .name = "__atomic_fetch_xor_1", .linkage = linkage });
@export(__atomic_fetch_xor_2, .{ .name = "__atomic_fetch_xor_2", .linkage = linkage });
@export(__atomic_fetch_xor_4, .{ .name = "__atomic_fetch_xor_4", .linkage = linkage });
@export(__atomic_fetch_xor_8, .{ .name = "__atomic_fetch_xor_8", .linkage = linkage });
@export(__atomic_fetch_nand_1, .{ .name = "__atomic_fetch_nand_1", .linkage = linkage });
@export(__atomic_fetch_nand_2, .{ .name = "__atomic_fetch_nand_2", .linkage = linkage });
@export(__atomic_fetch_nand_4, .{ .name = "__atomic_fetch_nand_4", .linkage = linkage });
@export(__atomic_fetch_nand_8, .{ .name = "__atomic_fetch_nand_8", .linkage = linkage });
@export(__atomic_load_1, .{ .name = "__atomic_load_1", .linkage = linkage });
@export(__atomic_load_2, .{ .name = "__atomic_load_2", .linkage = linkage });
@export(__atomic_load_4, .{ .name = "__atomic_load_4", .linkage = linkage });
@export(__atomic_load_8, .{ .name = "__atomic_load_8", .linkage = linkage });
@export(__atomic_store_1, .{ .name = "__atomic_store_1", .linkage = linkage });
@export(__atomic_store_2, .{ .name = "__atomic_store_2", .linkage = linkage });
@export(__atomic_store_4, .{ .name = "__atomic_store_4", .linkage = linkage });
@export(__atomic_store_8, .{ .name = "__atomic_store_8", .linkage = linkage });
@export(__atomic_exchange_1, .{ .name = "__atomic_exchange_1", .linkage = linkage });
@export(__atomic_exchange_2, .{ .name = "__atomic_exchange_2", .linkage = linkage });
@export(__atomic_exchange_4, .{ .name = "__atomic_exchange_4", .linkage = linkage });
@export(__atomic_exchange_8, .{ .name = "__atomic_exchange_8", .linkage = linkage });
@export(__atomic_compare_exchange_1, .{ .name = "__atomic_compare_exchange_1", .linkage = linkage });
@export(__atomic_compare_exchange_2, .{ .name = "__atomic_compare_exchange_2", .linkage = linkage });
@export(__atomic_compare_exchange_4, .{ .name = "__atomic_compare_exchange_4", .linkage = linkage });
@export(__atomic_compare_exchange_8, .{ .name = "__atomic_compare_exchange_8", .linkage = linkage });
}
}

View File

@ -2,7 +2,7 @@ const std = @import("std");
const builtin = @import("builtin");
// bswap - byteswap
// - bswapXi2_generic for unoptimized big and little endian
// - bswapXi2 for unoptimized big and little endian
// ie for u32
// DE AD BE EF <- little|big endian
// FE BE AD DE <- big|little endian
@ -11,64 +11,64 @@ const builtin = @import("builtin");
// 00 00 ff 00 << 1*8 (2n right byte)
// 00 00 00 ff << 3*8 (rightmost byte)
fn bswapXi2_generic(comptime T: type) fn (a: T) callconv(.C) T {
return struct {
fn f(a: T) callconv(.C) T {
@setRuntimeSafety(builtin.is_test);
switch (@bitSizeOf(T)) {
32 => {
// zig fmt: off
return (((a & 0xff000000) >> 24)
| ((a & 0x00ff0000) >> 8 )
| ((a & 0x0000ff00) << 8 )
| ((a & 0x000000ff) << 24));
// zig fmt: on
},
64 => {
// zig fmt: off
return (((a & 0xff00000000000000) >> 56)
| ((a & 0x00ff000000000000) >> 40 )
| ((a & 0x0000ff0000000000) >> 24 )
| ((a & 0x000000ff00000000) >> 8 )
| ((a & 0x00000000ff000000) << 8 )
| ((a & 0x0000000000ff0000) << 24 )
| ((a & 0x000000000000ff00) << 40 )
| ((a & 0x00000000000000ff) << 56));
// zig fmt: on
},
128 => {
// zig fmt: off
return (((a & 0xff000000000000000000000000000000) >> 120)
| ((a & 0x00ff0000000000000000000000000000) >> 104)
| ((a & 0x0000ff00000000000000000000000000) >> 88 )
| ((a & 0x000000ff000000000000000000000000) >> 72 )
| ((a & 0x00000000ff0000000000000000000000) >> 56 )
| ((a & 0x0000000000ff00000000000000000000) >> 40 )
| ((a & 0x000000000000ff000000000000000000) >> 24 )
| ((a & 0x00000000000000ff0000000000000000) >> 8 )
| ((a & 0x0000000000000000ff00000000000000) << 8 )
| ((a & 0x000000000000000000ff000000000000) << 24 )
| ((a & 0x00000000000000000000ff0000000000) << 40 )
| ((a & 0x0000000000000000000000ff00000000) << 56 )
| ((a & 0x000000000000000000000000ff000000) << 72 )
| ((a & 0x00000000000000000000000000ff0000) << 88 )
| ((a & 0x0000000000000000000000000000ff00) << 104)
| ((a & 0x000000000000000000000000000000ff) << 120));
// zig fmt: on
},
else => {
unreachable;
},
}
}
}.f;
inline fn bswapXi2(comptime T: type, a: T) T {
@setRuntimeSafety(builtin.is_test);
switch (@bitSizeOf(T)) {
32 => {
// zig fmt: off
return (((a & 0xff000000) >> 24)
| ((a & 0x00ff0000) >> 8 )
| ((a & 0x0000ff00) << 8 )
| ((a & 0x000000ff) << 24));
// zig fmt: on
},
64 => {
// zig fmt: off
return (((a & 0xff00000000000000) >> 56)
| ((a & 0x00ff000000000000) >> 40 )
| ((a & 0x0000ff0000000000) >> 24 )
| ((a & 0x000000ff00000000) >> 8 )
| ((a & 0x00000000ff000000) << 8 )
| ((a & 0x0000000000ff0000) << 24 )
| ((a & 0x000000000000ff00) << 40 )
| ((a & 0x00000000000000ff) << 56));
// zig fmt: on
},
128 => {
// zig fmt: off
return (((a & 0xff000000000000000000000000000000) >> 120)
| ((a & 0x00ff0000000000000000000000000000) >> 104)
| ((a & 0x0000ff00000000000000000000000000) >> 88 )
| ((a & 0x000000ff000000000000000000000000) >> 72 )
| ((a & 0x00000000ff0000000000000000000000) >> 56 )
| ((a & 0x0000000000ff00000000000000000000) >> 40 )
| ((a & 0x000000000000ff000000000000000000) >> 24 )
| ((a & 0x00000000000000ff0000000000000000) >> 8 )
| ((a & 0x0000000000000000ff00000000000000) << 8 )
| ((a & 0x000000000000000000ff000000000000) << 24 )
| ((a & 0x00000000000000000000ff0000000000) << 40 )
| ((a & 0x0000000000000000000000ff00000000) << 56 )
| ((a & 0x000000000000000000000000ff000000) << 72 )
| ((a & 0x00000000000000000000000000ff0000) << 88 )
| ((a & 0x0000000000000000000000000000ff00) << 104)
| ((a & 0x000000000000000000000000000000ff) << 120));
// zig fmt: on
},
else => unreachable,
}
}
pub const __bswapsi2 = bswapXi2_generic(u32);
pub fn __bswapsi2(a: u32) callconv(.C) u32 {
return bswapXi2(u32, a);
}
pub const __bswapdi2 = bswapXi2_generic(u64);
pub fn __bswapdi2(a: u64) callconv(.C) u64 {
return bswapXi2(u64, a);
}
pub const __bswapti2 = bswapXi2_generic(u128);
pub fn __bswapti2(a: u128) callconv(.C) u128 {
return bswapXi2(u128, a);
}
test {
_ = @import("bswapsi2_test.zig");

View File

@ -11,28 +11,40 @@ const builtin = @import("builtin");
// a == b => 1
// a > b => 2
fn XcmpXi2_generic(comptime T: type) fn (a: T, b: T) callconv(.C) i32 {
return struct {
fn f(a: T, b: T) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
var cmp1: i32 = 0;
var cmp2: i32 = 0;
if (a > b)
cmp1 = 1;
if (a < b)
cmp2 = 1;
return cmp1 - cmp2 + 1;
}
}.f;
inline fn XcmpXi2(comptime T: type, a: T, b: T) i32 {
@setRuntimeSafety(builtin.is_test);
var cmp1: i32 = 0;
var cmp2: i32 = 0;
if (a > b)
cmp1 = 1;
if (a < b)
cmp2 = 1;
return cmp1 - cmp2 + 1;
}
pub const __cmpsi2 = XcmpXi2_generic(i32);
pub const __cmpdi2 = XcmpXi2_generic(i64);
pub const __cmpti2 = XcmpXi2_generic(i128);
pub fn __cmpsi2(a: i32, b: i32) callconv(.C) i32 {
return XcmpXi2(i32, a, b);
}
pub const __ucmpsi2 = XcmpXi2_generic(u32);
pub const __ucmpdi2 = XcmpXi2_generic(u64);
pub const __ucmpti2 = XcmpXi2_generic(u128);
pub fn __cmpdi2(a: i64, b: i64) callconv(.C) i32 {
return XcmpXi2(i64, a, b);
}
pub fn __cmpti2(a: i128, b: i128) callconv(.C) i32 {
return XcmpXi2(i128, a, b);
}
pub fn __ucmpsi2(a: u32, b: u32) callconv(.C) i32 {
return XcmpXi2(u32, a, b);
}
pub fn __ucmpdi2(a: u64, b: u64) callconv(.C) i32 {
return XcmpXi2(u64, a, b);
}
pub fn __ucmpti2(a: u128, b: u128) callconv(.C) i32 {
return XcmpXi2(u128, a, b);
}
test {
_ = @import("cmpsi2_test.zig");

View File

@ -2,44 +2,40 @@ const std = @import("std");
const builtin = @import("builtin");
// clz - count leading zeroes
// - clzXi2_generic for unoptimized little and big endian
// - clzXi2 for unoptimized little and big endian
// - __clzsi2_thumb1: assume a != 0
// - __clzsi2_arm32: assume a != 0
// ctz - count trailing zeroes
// - ctzXi2_generic for unoptimized little and big endian
// - ctzXi2 for unoptimized little and big endian
// ffs - find first set
// * ffs = (a == 0) => 0, (a != 0) => ctz + 1
// * dont pay for `if (x == 0) return shift;` inside ctz
// - ffsXi2_generic for unoptimized little and big endian
// - ffsXi2 for unoptimized little and big endian
fn clzXi2_generic(comptime T: type) fn (a: T) callconv(.C) i32 {
return struct {
fn f(a: T) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
inline fn clzXi2(comptime T: type, a: T) i32 {
@setRuntimeSafety(builtin.is_test);
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),
128 => @bitCast(u128, a),
else => unreachable,
};
var n: T = @bitSizeOf(T);
// Count first bit set using binary search, from Hacker's Delight
var y: @TypeOf(x) = 0;
comptime var shift: u8 = @bitSizeOf(T);
inline while (shift > 0) {
shift = shift >> 1;
y = x >> shift;
if (y != 0) {
n = n - shift;
x = y;
}
}
return @intCast(i32, n - @bitCast(T, x));
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),
128 => @bitCast(u128, a),
else => unreachable,
};
var n: T = @bitSizeOf(T);
// Count first bit set using binary search, from Hacker's Delight
var y: @TypeOf(x) = 0;
comptime var shift: u8 = @bitSizeOf(T);
inline while (shift > 0) {
shift = shift >> 1;
y = x >> shift;
if (y != 0) {
n = n - shift;
x = y;
}
}.f;
}
return @intCast(i32, n - @bitCast(T, x));
}
fn __clzsi2_thumb1() callconv(.Naked) void {
@ -125,103 +121,113 @@ fn __clzsi2_arm32() callconv(.Naked) void {
unreachable;
}
pub const __clzsi2 = impl: {
switch (builtin.cpu.arch) {
.arm, .armeb, .thumb, .thumbeb => {
const use_thumb1 =
(builtin.cpu.arch.isThumb() or
std.Target.arm.featureSetHas(builtin.cpu.features, .noarm)) and
!std.Target.arm.featureSetHas(builtin.cpu.features, .thumb2);
fn clzsi2_generic(a: i32) callconv(.C) i32 {
return clzXi2(i32, a);
}
if (use_thumb1) {
break :impl __clzsi2_thumb1;
}
// From here on we're either targeting Thumb2 or ARM.
else if (!builtin.cpu.arch.isThumb()) {
break :impl __clzsi2_arm32;
}
// Use the generic implementation otherwise.
else break :impl clzXi2_generic(i32);
},
else => break :impl clzXi2_generic(i32),
}
pub const __clzsi2 = switch (builtin.cpu.arch) {
.arm, .armeb, .thumb, .thumbeb => impl: {
const use_thumb1 =
(builtin.cpu.arch.isThumb() or
std.Target.arm.featureSetHas(builtin.cpu.features, .noarm)) and
!std.Target.arm.featureSetHas(builtin.cpu.features, .thumb2);
if (use_thumb1) {
break :impl __clzsi2_thumb1;
}
// From here on we're either targeting Thumb2 or ARM.
else if (!builtin.cpu.arch.isThumb()) {
break :impl __clzsi2_arm32;
}
// Use the generic implementation otherwise.
else break :impl clzsi2_generic;
},
else => clzsi2_generic,
};
pub const __clzdi2 = clzXi2_generic(i64);
pub const __clzti2 = clzXi2_generic(i128);
fn ctzXi2_generic(comptime T: type) fn (a: T) callconv(.C) i32 {
return struct {
fn f(a: T) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),
128 => @bitCast(u128, a),
else => unreachable,
};
var n: T = 1;
// Number of trailing zeroes as binary search, from Hacker's Delight
var mask: @TypeOf(x) = std.math.maxInt(@TypeOf(x));
comptime var shift = @bitSizeOf(T);
if (x == 0) return shift;
inline while (shift > 1) {
shift = shift >> 1;
mask = mask >> shift;
if ((x & mask) == 0) {
n = n + shift;
x = x >> shift;
}
}
return @intCast(i32, n - @bitCast(T, (x & 1)));
}
}.f;
pub fn __clzdi2(a: i64) callconv(.C) i32 {
return clzXi2(i64, a);
}
pub const __ctzsi2 = ctzXi2_generic(i32);
pub const __ctzdi2 = ctzXi2_generic(i64);
pub const __ctzti2 = ctzXi2_generic(i128);
fn ffsXi2_generic(comptime T: type) fn (a: T) callconv(.C) i32 {
return struct {
fn f(a: T) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),
128 => @bitCast(u128, a),
else => unreachable,
};
var n: T = 1;
// adapted from Number of trailing zeroes (see ctzXi2_generic)
var mask: @TypeOf(x) = std.math.maxInt(@TypeOf(x));
comptime var shift = @bitSizeOf(T);
// In contrast to ctz return 0
if (x == 0) return 0;
inline while (shift > 1) {
shift = shift >> 1;
mask = mask >> shift;
if ((x & mask) == 0) {
n = n + shift;
x = x >> shift;
}
}
// return ctz + 1
return @intCast(i32, n - @bitCast(T, (x & 1))) + @as(i32, 1);
}
}.f;
pub fn __clzti2(a: i128) callconv(.C) i32 {
return clzXi2(i128, a);
}
pub const __ffssi2 = ffsXi2_generic(i32);
inline fn ctzXi2(comptime T: type, a: T) i32 {
@setRuntimeSafety(builtin.is_test);
pub const __ffsdi2 = ffsXi2_generic(i64);
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),
128 => @bitCast(u128, a),
else => unreachable,
};
var n: T = 1;
// Number of trailing zeroes as binary search, from Hacker's Delight
var mask: @TypeOf(x) = std.math.maxInt(@TypeOf(x));
comptime var shift = @bitSizeOf(T);
if (x == 0) return shift;
inline while (shift > 1) {
shift = shift >> 1;
mask = mask >> shift;
if ((x & mask) == 0) {
n = n + shift;
x = x >> shift;
}
}
return @intCast(i32, n - @bitCast(T, (x & 1)));
}
pub const __ffsti2 = ffsXi2_generic(i128);
pub fn __ctzsi2(a: i32) callconv(.C) i32 {
return ctzXi2(i32, a);
}
pub fn __ctzdi2(a: i64) callconv(.C) i32 {
return ctzXi2(i64, a);
}
pub fn __ctzti2(a: i128) callconv(.C) i32 {
return ctzXi2(i128, a);
}
inline fn ffsXi2(comptime T: type, a: T) i32 {
@setRuntimeSafety(builtin.is_test);
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),
128 => @bitCast(u128, a),
else => unreachable,
};
var n: T = 1;
// adapted from Number of trailing zeroes (see ctzXi2)
var mask: @TypeOf(x) = std.math.maxInt(@TypeOf(x));
comptime var shift = @bitSizeOf(T);
// In contrast to ctz return 0
if (x == 0) return 0;
inline while (shift > 1) {
shift = shift >> 1;
mask = mask >> shift;
if ((x & mask) == 0) {
n = n + shift;
x = x >> shift;
}
}
// return ctz + 1
return @intCast(i32, n - @bitCast(T, (x & 1))) + @as(i32, 1);
}
pub fn __ffssi2(a: i32) callconv(.C) i32 {
return ffsXi2(i32, a);
}
pub fn __ffsdi2(a: i64) callconv(.C) i32 {
return ffsXi2(i64, a);
}
pub fn __ffsti2(a: i128) callconv(.C) i32 {
return ffsXi2(i128, a);
}
test {
_ = @import("clzsi2_test.zig");

View File

@ -35,7 +35,7 @@ pub fn __divdf3(a: f64, b: f64) callconv(.C) f64 {
var scale: i32 = 0;
// Detect if a or b is zero, denormal, infinity, or NaN.
if (aExponent -% 1 >= maxExponent -% 1 or bExponent -% 1 >= maxExponent -% 1) {
if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) {
const aAbs: Z = @bitCast(Z, a) & absMask;
const bAbs: Z = @bitCast(Z, b) & absMask;

View File

@ -34,7 +34,7 @@ pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
var scale: i32 = 0;
// Detect if a or b is zero, denormal, infinity, or NaN.
if (aExponent -% 1 >= maxExponent -% 1 or bExponent -% 1 >= maxExponent -% 1) {
if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) {
const aAbs: Z = @bitCast(Z, a) & absMask;
const bAbs: Z = @bitCast(Z, b) & absMask;

View File

@ -33,7 +33,7 @@ pub fn __divtf3(a: f128, b: f128) callconv(.C) f128 {
var scale: i32 = 0;
// Detect if a or b is zero, denormal, infinity, or NaN.
if (aExponent -% 1 >= maxExponent -% 1 or bExponent -% 1 >= maxExponent -% 1) {
if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) {
const aAbs: Z = @bitCast(Z, a) & absMask;
const bAbs: Z = @bitCast(Z, b) & absMask;

View File

@ -1,7 +1,7 @@
const is_test = @import("builtin").is_test;
const Log2Int = @import("std").math.Log2Int;
pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t {
pub inline fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t {
@setRuntimeSafety(is_test);
const rep_t = switch (fp_t) {

View File

@ -4,7 +4,7 @@ const maxInt = std.math.maxInt;
const FLT_MANT_DIG = 24;
fn __floatXisf(comptime T: type, arg: T) f32 {
inline fn floatXisf(comptime T: type, arg: T) f32 {
@setRuntimeSafety(builtin.is_test);
const bits = @typeInfo(T).Int.bits;
@ -71,18 +71,15 @@ fn __floatXisf(comptime T: type, arg: T) f32 {
}
pub fn __floatdisf(arg: i64) callconv(.C) f32 {
@setRuntimeSafety(builtin.is_test);
return @call(.{ .modifier = .always_inline }, __floatXisf, .{ i64, arg });
return floatXisf(i64, arg);
}
pub fn __floattisf(arg: i128) callconv(.C) f32 {
@setRuntimeSafety(builtin.is_test);
return @call(.{ .modifier = .always_inline }, __floatXisf, .{ i128, arg });
return floatXisf(i128, arg);
}
pub fn __aeabi_l2f(arg: i64) callconv(.AAPCS) f32 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, __floatdisf, .{arg});
return floatXisf(i64, arg);
}
test {

View File

@ -2,7 +2,7 @@ const builtin = @import("builtin");
const std = @import("std");
const maxInt = std.math.maxInt;
fn floatsiXf(comptime T: type, a: i32) T {
inline fn floatsiXf(comptime T: type, a: i32) T {
@setRuntimeSafety(builtin.is_test);
const bits = @typeInfo(T).Float.bits;
@ -56,27 +56,27 @@ fn floatsiXf(comptime T: type, a: i32) T {
pub fn __floatsisf(arg: i32) callconv(.C) f32 {
@setRuntimeSafety(builtin.is_test);
return @call(.{ .modifier = .always_inline }, floatsiXf, .{ f32, arg });
return floatsiXf(f32, arg);
}
pub fn __floatsidf(arg: i32) callconv(.C) f64 {
@setRuntimeSafety(builtin.is_test);
return @call(.{ .modifier = .always_inline }, floatsiXf, .{ f64, arg });
return floatsiXf(f64, arg);
}
pub fn __floatsitf(arg: i32) callconv(.C) f128 {
@setRuntimeSafety(builtin.is_test);
return @call(.{ .modifier = .always_inline }, floatsiXf, .{ f128, arg });
return floatsiXf(f128, arg);
}
pub fn __aeabi_i2d(arg: i32) callconv(.AAPCS) f64 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, __floatsidf, .{arg});
return floatsiXf(f64, arg);
}
pub fn __aeabi_i2f(arg: i32) callconv(.AAPCS) f32 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, __floatsisf, .{arg});
return floatsiXf(f32, arg);
}
fn test_one_floatsitf(a: i32, expected: u128) !void {

View File

@ -4,7 +4,7 @@ const maxInt = std.math.maxInt;
const FLT_MANT_DIG = 24;
pub fn __floatundisf(arg: u64) callconv(.C) f32 {
inline fn floatundisf(arg: u64) f32 {
@setRuntimeSafety(builtin.is_test);
if (arg == 0) return 0;
@ -56,9 +56,12 @@ pub fn __floatundisf(arg: u64) callconv(.C) f32 {
return @bitCast(f32, result);
}
pub fn __floatundisf(arg: u64) callconv(.C) f32 {
return floatundisf(arg);
}
pub fn __aeabi_ul2f(arg: u64) callconv(.AAPCS) f32 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, __floatundisf, .{arg});
return floatundisf(arg);
}
fn test__floatundisf(a: u64, expected: f32) !void {

View File

@ -4,7 +4,7 @@ const maxInt = std.math.maxInt;
const implicitBit = @as(u64, 1) << 52;
pub fn __floatunsidf(arg: u32) callconv(.C) f64 {
inline fn floatunsidf(arg: u32) f64 {
@setRuntimeSafety(builtin.is_test);
if (arg == 0) return 0.0;
@ -18,9 +18,12 @@ pub fn __floatunsidf(arg: u32) callconv(.C) f64 {
return @bitCast(f64, mant | (exp + 1023) << 52);
}
pub fn __floatunsidf(arg: u32) callconv(.C) f64 {
return floatunsidf(arg);
}
pub fn __aeabi_ui2d(arg: u32) callconv(.AAPCS) f64 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, __floatunsidf, .{arg});
return floatunsidf(arg);
}
fn test_one_floatunsidf(a: u32, expected: u64) !void {

View File

@ -6,7 +6,7 @@ const significandBits = 23;
const exponentBias = 127;
const implicitBit = @as(u32, 1) << significandBits;
pub fn __floatunsisf(arg: u32) callconv(.C) f32 {
inline fn floatunsisf(arg: u32) f32 {
@setRuntimeSafety(builtin.is_test);
if (arg == 0) return 0.0;
@ -38,9 +38,12 @@ pub fn __floatunsisf(arg: u32) callconv(.C) f32 {
return @bitCast(f32, result);
}
pub fn __floatunsisf(arg: u32) callconv(.C) f32 {
return floatunsisf(arg);
}
pub fn __aeabi_ui2f(arg: u32) callconv(.AAPCS) f32 {
@setRuntimeSafety(false);
return @call(.{ .modifier = .always_inline }, __floatunsisf, .{arg});
return floatunsisf(arg);
}
fn test_one_floatunsisf(a: u32, expected: u32) !void {

View File

@ -56,7 +56,7 @@ fn mulXf3(comptime T: type, a: T, b: T) T {
var scale: i32 = 0;
// Detect if a or b is zero, denormal, infinity, or NaN.
if (aExponent -% 1 >= maxExponent -% 1 or bExponent -% 1 >= maxExponent -% 1) {
if (aExponent -% 1 >= maxExponent - 1 or bExponent -% 1 >= maxExponent - 1) {
const aAbs: Z = @bitCast(Z, a) & absMask;
const bAbs: Z = @bitCast(Z, b) & absMask;

View File

@ -2,7 +2,7 @@ const std = @import("std");
const builtin = @import("builtin");
// neg - negate (the number)
// - negXi2_generic for unoptimized little and big endian
// - negXi2 for unoptimized little and big endian
// sfffffff = 2^31-1
// two's complement inverting bits and add 1 would result in -INT_MIN == 0
@ -11,20 +11,22 @@ const builtin = @import("builtin");
// * size optimized builds
// * machines that dont support carry operations
fn negXi2_generic(comptime T: type) fn (a: T) callconv(.C) T {
return struct {
fn f(a: T) callconv(.C) T {
@setRuntimeSafety(builtin.is_test);
return -a;
}
}.f;
inline fn negXi2(comptime T: type, a: T) T {
@setRuntimeSafety(builtin.is_test);
return -a;
}
pub const __negsi2 = negXi2_generic(i32);
pub fn __negsi2(a: i32) callconv(.C) i32 {
return negXi2(i32, a);
}
pub const __negdi2 = negXi2_generic(i64);
pub fn __negdi2(a: i64) callconv(.C) i64 {
return negXi2(i64, a);
}
pub const __negti2 = negXi2_generic(i128);
pub fn __negti2(a: i128) callconv(.C) i128 {
return negXi2(i128, a);
}
test {
_ = @import("negsi2_test.zig");

View File

@ -3,26 +3,31 @@
// - negvXi4_generic for unoptimized version
// assume -0 == 0 is gracefully handled by the hardware
fn negvXi_generic(comptime ST: type) fn (a: ST) callconv(.C) ST {
return struct {
fn f(a: ST) callconv(.C) ST {
const UT = switch (ST) {
i32 => u32,
i64 => u64,
i128 => u128,
else => unreachable,
};
const N: UT = @bitSizeOf(ST);
const min: ST = @bitCast(ST, (@as(UT, 1) << (N - 1)));
if (a == min)
@panic("compiler_rt negv: overflow");
return -a;
}
}.f;
inline fn negvXi(comptime ST: type, a: ST) ST {
const UT = switch (ST) {
i32 => u32,
i64 => u64,
i128 => u128,
else => unreachable,
};
const N: UT = @bitSizeOf(ST);
const min: ST = @bitCast(ST, (@as(UT, 1) << (N - 1)));
if (a == min)
@panic("compiler_rt negv: overflow");
return -a;
}
pub fn __negvsi2(a: i32) callconv(.C) i32 {
return negvXi(i32, a);
}
pub fn __negvdi2(a: i64) callconv(.C) i64 {
return negvXi(i64, a);
}
pub fn __negvti2(a: i128) callconv(.C) i128 {
return negvXi(i128, a);
}
pub const __negvsi2 = negvXi_generic(i32);
pub const __negvdi2 = negvXi_generic(i64);
pub const __negvti2 = negvXi_generic(i128);
test {
_ = @import("negvsi2_test.zig");

View File

@ -4,34 +4,36 @@ const builtin = @import("builtin");
// parity - if number of bits set is even => 0, else => 1
// - pariytXi2_generic for big and little endian
fn parityXi2_generic(comptime T: type) fn (a: T) callconv(.C) i32 {
return struct {
fn f(a: T) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
inline fn parityXi2(comptime T: type, a: T) i32 {
@setRuntimeSafety(builtin.is_test);
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),
128 => @bitCast(u128, a),
else => unreachable,
};
// Bit Twiddling Hacks: Compute parity in parallel
comptime var shift: u8 = @bitSizeOf(T) / 2;
inline while (shift > 2) {
x ^= x >> shift;
shift = shift >> 1;
}
x &= 0xf;
return (@intCast(u16, 0x6996) >> @intCast(u4, x)) & 1; // optimization for >>2 and >>1
}
}.f;
var x = switch (@bitSizeOf(T)) {
32 => @bitCast(u32, a),
64 => @bitCast(u64, a),
128 => @bitCast(u128, a),
else => unreachable,
};
// Bit Twiddling Hacks: Compute parity in parallel
comptime var shift: u8 = @bitSizeOf(T) / 2;
inline while (shift > 2) {
x ^= x >> shift;
shift = shift >> 1;
}
x &= 0xf;
return (@intCast(u16, 0x6996) >> @intCast(u4, x)) & 1; // optimization for >>2 and >>1
}
pub const __paritysi2 = parityXi2_generic(i32);
pub fn __paritysi2(a: i32) callconv(.C) i32 {
return parityXi2(i32, a);
}
pub const __paritydi2 = parityXi2_generic(i64);
pub fn __paritydi2(a: i64) callconv(.C) i32 {
return parityXi2(i64, a);
}
pub const __parityti2 = parityXi2_generic(i128);
pub fn __parityti2(a: i128) callconv(.C) i32 {
return parityXi2(i128, a);
}
test {
_ = @import("paritysi2_test.zig");

View File

@ -10,35 +10,37 @@ const std = @import("std");
// TAOCP: Combinational Algorithms, Bitwise Tricks And Techniques,
// subsubsection "Working with the rightmost bits" and "Sideways addition".
fn popcountXi2_generic(comptime ST: type) fn (a: ST) callconv(.C) i32 {
return struct {
fn f(a: ST) callconv(.C) i32 {
@setRuntimeSafety(builtin.is_test);
const UT = switch (ST) {
i32 => u32,
i64 => u64,
i128 => u128,
else => unreachable,
};
var x = @bitCast(UT, a);
x -= (x >> 1) & (~@as(UT, 0) / 3); // 0x55...55, aggregate duos
x = ((x >> 2) & (~@as(UT, 0) / 5)) // 0x33...33, aggregate nibbles
+ (x & (~@as(UT, 0) / 5));
x += x >> 4;
x &= ~@as(UT, 0) / 17; // 0x0F...0F, aggregate bytes
// 8 most significant bits of x + (x<<8) + (x<<16) + ..
x *%= ~@as(UT, 0) / 255; // 0x01...01
x >>= (@bitSizeOf(ST) - 8);
return @intCast(i32, x);
}
}.f;
inline fn popcountXi2(comptime ST: type, a: ST) i32 {
@setRuntimeSafety(builtin.is_test);
const UT = switch (ST) {
i32 => u32,
i64 => u64,
i128 => u128,
else => unreachable,
};
var x = @bitCast(UT, a);
x -= (x >> 1) & (~@as(UT, 0) / 3); // 0x55...55, aggregate duos
x = ((x >> 2) & (~@as(UT, 0) / 5)) // 0x33...33, aggregate nibbles
+ (x & (~@as(UT, 0) / 5));
x += x >> 4;
x &= ~@as(UT, 0) / 17; // 0x0F...0F, aggregate bytes
// 8 most significant bits of x + (x<<8) + (x<<16) + ..
x *%= ~@as(UT, 0) / 255; // 0x01...01
x >>= (@bitSizeOf(ST) - 8);
return @intCast(i32, x);
}
pub const __popcountsi2 = popcountXi2_generic(i32);
pub fn __popcountsi2(a: i32) callconv(.C) i32 {
return popcountXi2(i32, a);
}
pub const __popcountdi2 = popcountXi2_generic(i64);
pub fn __popcountdi2(a: i64) callconv(.C) i32 {
return popcountXi2(i64, a);
}
pub const __popcountti2 = popcountXi2_generic(i128);
pub fn __popcountti2(a: i128) callconv(.C) i32 {
return popcountXi2(i128, a);
}
test {
_ = @import("popcountsi2_test.zig");

View File

@ -19,7 +19,7 @@ fn Dwords(comptime T: type, comptime signed_half: bool) type {
// Arithmetic shift left
// Precondition: 0 <= b < bits_in_dword
pub fn ashlXi3(comptime T: type, a: T, b: i32) T {
pub inline fn ashlXi3(comptime T: type, a: T, b: i32) T {
const dwords = Dwords(T, false);
const S = Log2Int(dwords.HalfT);
@ -42,7 +42,7 @@ pub fn ashlXi3(comptime T: type, a: T, b: i32) T {
// Arithmetic shift right
// Precondition: 0 <= b < T.bit_count
pub fn ashrXi3(comptime T: type, a: T, b: i32) T {
pub inline fn ashrXi3(comptime T: type, a: T, b: i32) T {
const dwords = Dwords(T, true);
const S = Log2Int(dwords.HalfT);
@ -69,7 +69,7 @@ pub fn ashrXi3(comptime T: type, a: T, b: i32) T {
// Logical shift right
// Precondition: 0 <= b < T.bit_count
pub fn lshrXi3(comptime T: type, a: T, b: i32) T {
pub inline fn lshrXi3(comptime T: type, a: T, b: i32) T {
const dwords = Dwords(T, false);
const S = Log2Int(dwords.HalfT);
@ -91,32 +91,32 @@ pub fn lshrXi3(comptime T: type, a: T, b: i32) T {
}
pub fn __ashldi3(a: i64, b: i32) callconv(.C) i64 {
return @call(.{ .modifier = .always_inline }, ashlXi3, .{ i64, a, b });
return ashlXi3(i64, a, b);
}
pub fn __ashlti3(a: i128, b: i32) callconv(.C) i128 {
return @call(.{ .modifier = .always_inline }, ashlXi3, .{ i128, a, b });
return ashlXi3(i128, a, b);
}
pub fn __ashrdi3(a: i64, b: i32) callconv(.C) i64 {
return @call(.{ .modifier = .always_inline }, ashrXi3, .{ i64, a, b });
return ashrXi3(i64, a, b);
}
pub fn __ashrti3(a: i128, b: i32) callconv(.C) i128 {
return @call(.{ .modifier = .always_inline }, ashrXi3, .{ i128, a, b });
return ashrXi3(i128, a, b);
}
pub fn __lshrdi3(a: i64, b: i32) callconv(.C) i64 {
return @call(.{ .modifier = .always_inline }, lshrXi3, .{ i64, a, b });
return lshrXi3(i64, a, b);
}
pub fn __lshrti3(a: i128, b: i32) callconv(.C) i128 {
return @call(.{ .modifier = .always_inline }, lshrXi3, .{ i128, a, b });
return lshrXi3(i128, a, b);
}
pub fn __aeabi_llsl(a: i64, b: i32) callconv(.AAPCS) i64 {
return __ashldi3(a, b);
return ashlXi3(i64, a, b);
}
pub fn __aeabi_lasr(a: i64, b: i32) callconv(.AAPCS) i64 {
return __ashrdi3(a, b);
return ashrXi3(i64, a, b);
}
pub fn __aeabi_llsr(a: i64, b: i32) callconv(.AAPCS) i64 {
return __lshrdi3(a, b);
return lshrXi3(i64, a, b);
}
test {

View File

@ -47,10 +47,16 @@ pub const hasher_init: Hasher = Hasher.init(&[_]u8{0} ** Hasher.key_length);
pub const File = struct {
path: ?[]const u8,
max_file_size: ?usize,
stat: fs.File.Stat,
stat: Stat,
bin_digest: BinDigest,
contents: ?[]const u8,
pub const Stat = struct {
inode: fs.File.INode,
size: u64,
mtime: i128,
};
pub fn deinit(self: *File, allocator: Allocator) void {
if (self.path) |owned_slice| {
allocator.free(owned_slice);
@ -424,7 +430,11 @@ pub const Manifest = struct {
if (!size_match or !mtime_match or !inode_match) {
self.manifest_dirty = true;
cache_hash_file.stat = actual_stat;
cache_hash_file.stat = .{
.size = actual_stat.size,
.mtime = actual_stat.mtime,
.inode = actual_stat.inode,
};
if (self.isProblematicTimestamp(cache_hash_file.stat.mtime)) {
// The actual file has an unreliable timestamp, force it to be hashed
@ -530,7 +540,12 @@ pub const Manifest = struct {
const file = try fs.cwd().openFile(ch_file.path.?, .{});
defer file.close();
ch_file.stat = try file.stat();
const actual_stat = try file.stat();
ch_file.stat = .{
.size = actual_stat.size,
.mtime = actual_stat.mtime,
.inode = actual_stat.inode,
};
if (self.isProblematicTimestamp(ch_file.stat.mtime)) {
// The actual file has an unreliable timestamp, force it to be hashed
@ -615,6 +630,42 @@ pub const Manifest = struct {
try self.populateFileHash(new_ch_file);
}
/// Like `addFilePost` but when the file contents have already been loaded from disk.
/// On success, cache takes ownership of `resolved_path`.
pub fn addFilePostContents(
self: *Manifest,
resolved_path: []const u8,
bytes: []const u8,
stat: File.Stat,
) error{OutOfMemory}!void {
assert(self.manifest_file != null);
const ch_file = try self.files.addOne(self.cache.gpa);
errdefer self.files.shrinkRetainingCapacity(self.files.items.len - 1);
ch_file.* = .{
.path = resolved_path,
.max_file_size = null,
.stat = stat,
.bin_digest = undefined,
.contents = null,
};
if (self.isProblematicTimestamp(ch_file.stat.mtime)) {
// The actual file has an unreliable timestamp, force it to be hashed
ch_file.stat.mtime = 0;
ch_file.stat.inode = 0;
}
{
var hasher = hasher_init;
hasher.update(bytes);
hasher.final(&ch_file.bin_digest);
}
self.hash.hasher.update(&ch_file.bin_digest);
}
pub fn addDepFilePost(self: *Manifest, dir: fs.Dir, dep_file_basename: []const u8) !void {
assert(self.manifest_file != null);

View File

@ -41,8 +41,8 @@ gpa: Allocator,
arena_state: std.heap.ArenaAllocator.State,
bin_file: *link.File,
c_object_table: std.AutoArrayHashMapUnmanaged(*CObject, void) = .{},
stage1_lock: ?Cache.Lock = null,
stage1_cache_manifest: *Cache.Manifest = undefined,
/// This is a pointer to a local variable inside `update()`.
whole_cache_manifest: ?*Cache.Manifest = null,
link_error_flags: link.File.ErrorFlags = .{},
@ -98,6 +98,13 @@ clang_argv: []const []const u8,
cache_parent: *Cache,
/// Path to own executable for invoking `zig clang`.
self_exe_path: ?[]const u8,
/// null means -fno-emit-bin.
/// This is mutable memory allocated into the Compilation-lifetime arena (`arena_state`)
/// of exactly the correct size for "o/[digest]/[basename]".
/// The basename is of the outputted binary file in case we don't know the directory yet.
whole_bin_sub_path: ?[]u8,
/// Same as `whole_bin_sub_path` but for implibs.
whole_implib_sub_path: ?[]u8,
zig_lib_directory: Directory,
local_cache_directory: Directory,
global_cache_directory: Directory,
@ -418,7 +425,7 @@ pub const AllErrors = struct {
const module_note = module_err_msg.notes[i];
const source = try module_note.src_loc.file_scope.getSource(module.gpa);
const byte_offset = try module_note.src_loc.byteOffset(module.gpa);
const loc = std.zig.findLineColumn(source, byte_offset);
const loc = std.zig.findLineColumn(source.bytes, byte_offset);
const file_path = try module_note.src_loc.file_scope.fullPath(allocator);
note.* = .{
.src = .{
@ -441,7 +448,7 @@ pub const AllErrors = struct {
}
const source = try module_err_msg.src_loc.file_scope.getSource(module.gpa);
const byte_offset = try module_err_msg.src_loc.byteOffset(module.gpa);
const loc = std.zig.findLineColumn(source, byte_offset);
const loc = std.zig.findLineColumn(source.bytes, byte_offset);
const file_path = try module_err_msg.src_loc.file_scope.fullPath(allocator);
try errors.append(.{
.src = .{
@ -612,6 +619,15 @@ pub const Directory = struct {
return std.fs.path.joinZ(allocator, paths);
}
}
/// Whether or not the handle should be closed, or the path should be freed
/// is determined by usage, however this function is provided for convenience
/// if it happens to be what the caller needs.
pub fn closeAndFree(self: *Directory, gpa: Allocator) void {
self.handle.close();
if (self.path) |p| gpa.free(p);
self.* = undefined;
}
};
pub const EmitLoc = struct {
@ -631,6 +647,7 @@ pub const ClangPreprocessorMode = enum {
};
pub const SystemLib = link.SystemLib;
pub const CacheMode = link.CacheMode;
pub const InitOptions = struct {
zig_lib_directory: Directory,
@ -668,6 +685,7 @@ pub const InitOptions = struct {
/// is externally modified - essentially anything other than zig-cache - then
/// this flag would be set to disable this machinery to avoid false positives.
disable_lld_caching: bool = false,
cache_mode: CacheMode = .incremental,
object_format: ?std.Target.ObjectFormat = null,
optimize_mode: std.builtin.Mode = .Debug,
keep_source_files_loaded: bool = false,
@ -885,6 +903,11 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
break :blk build_options.is_stage1;
};
const cache_mode = if (use_stage1 and !options.disable_lld_caching)
CacheMode.whole
else
options.cache_mode;
// Make a decision on whether to use LLVM or our own backend.
const use_llvm = build_options.have_llvm and blk: {
if (options.use_llvm) |explicit|
@ -1219,39 +1242,75 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
// modified between incremental updates.
var hash = cache.hash;
// Here we put the root source file path name, but *not* with addFile. We want the
// hash to be the same regardless of the contents of the source file, because
// incremental compilation will handle it, but we do want to namespace different
// source file names because they are likely different compilations and therefore this
// would be likely to cause cache hits.
hash.addBytes(main_pkg.root_src_path);
hash.addOptionalBytes(main_pkg.root_src_directory.path);
{
var local_arena = std.heap.ArenaAllocator.init(gpa);
defer local_arena.deinit();
var seen_table = std.AutoHashMap(*Package, void).init(local_arena.allocator());
try addPackageTableToCacheHash(&hash, &local_arena, main_pkg.table, &seen_table, .path_bytes);
switch (cache_mode) {
.incremental => {
// Here we put the root source file path name, but *not* with addFile.
// We want the hash to be the same regardless of the contents of the
// source file, because incremental compilation will handle it, but we
// do want to namespace different source file names because they are
// likely different compilations and therefore this would be likely to
// cause cache hits.
hash.addBytes(main_pkg.root_src_path);
hash.addOptionalBytes(main_pkg.root_src_directory.path);
{
var seen_table = std.AutoHashMap(*Package, void).init(arena);
try addPackageTableToCacheHash(&hash, &arena_allocator, main_pkg.table, &seen_table, .path_bytes);
}
},
.whole => {
// In this case, we postpone adding the input source file until
// we create the cache manifest, in update(), because we want to
// track it and packages as files.
},
}
// Synchronize with other matching comments: ZigOnlyHashStuff
hash.add(valgrind);
hash.add(single_threaded);
hash.add(use_stage1);
hash.add(use_llvm);
hash.add(dll_export_fns);
hash.add(options.is_test);
hash.add(options.test_evented_io);
hash.addOptionalBytes(options.test_filter);
hash.addOptionalBytes(options.test_name_prefix);
hash.add(options.skip_linker_dependencies);
hash.add(options.parent_compilation_link_libc);
// In the case of incremental cache mode, this `zig_cache_artifact_directory`
// is computed based on a hash of non-linker inputs, and it is where all
// build artifacts are stored (even while in-progress).
//
// For whole cache mode, it is still used for builtin.zig so that the file
// path to builtin.zig can remain consistent during a debugging session at
// runtime. However, we don't know where to put outputs from the linker
// or stage1 backend object files until the final cache hash, which is available
// after the compilation is complete.
//
// Therefore, in whole cache mode, we additionally create a temporary cache
// directory for these two kinds of build artifacts, and then rename it
// into place after the final hash is known. However, we don't want
// to create the temporary directory here, because in the case of a cache hit,
// this would have been wasted syscalls to make the directory and then not
// use it (or delete it).
//
// In summary, for whole cache mode, we simulate `-fno-emit-bin` in this
// function, and `zig_cache_artifact_directory` is *wrong* except for builtin.zig,
// and then at the beginning of `update()` when we find out whether we need
// a temporary directory, we patch up all the places that the incorrect
// `zig_cache_artifact_directory` was passed to various components of the compiler.
const digest = hash.final();
const artifact_sub_dir = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest });
var artifact_dir = try options.local_cache_directory.handle.makeOpenPath(artifact_sub_dir, .{});
errdefer artifact_dir.close();
const zig_cache_artifact_directory: Directory = .{
.handle = artifact_dir,
.path = if (options.local_cache_directory.path) |p|
try std.fs.path.join(arena, &[_][]const u8{ p, artifact_sub_dir })
else
artifact_sub_dir,
.path = try options.local_cache_directory.join(arena, &[_][]const u8{artifact_sub_dir}),
};
log.debug("zig_cache_artifact_directory='{s}' use_stage1={}", .{
zig_cache_artifact_directory.path, use_stage1,
});
const builtin_pkg = try Package.createWithDir(
gpa,
@ -1374,6 +1433,11 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
};
}
switch (cache_mode) {
.whole => break :blk null,
.incremental => {},
}
if (module) |zm| {
break :blk link.Emit{
.directory = zm.zig_cache_artifact_directory,
@ -1417,6 +1481,12 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
};
}
// This is here for the same reason as in `bin_file_emit` above.
switch (cache_mode) {
.whole => break :blk null,
.incremental => {},
}
// Use the same directory as the bin. The CLI already emits an
// error if -fno-emit-bin is combined with -femit-implib.
break :blk link.Emit{
@ -1425,6 +1495,16 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
};
};
// This is so that when doing `CacheMode.whole`, the mechanism in update()
// can use it for communicating the result directory via `bin_file.emit`.
// This is used to distinguish between -fno-emit-bin and -femit-bin
// for `CacheMode.whole`.
// This memory will be overwritten with the real digest in update() but
// the basename will be preserved.
const whole_bin_sub_path: ?[]u8 = try prepareWholeEmitSubPath(arena, options.emit_bin);
// Same thing but for implibs.
const whole_implib_sub_path: ?[]u8 = try prepareWholeEmitSubPath(arena, options.emit_implib);
var system_libs: std.StringArrayHashMapUnmanaged(SystemLib) = .{};
errdefer system_libs.deinit(gpa);
try system_libs.ensureTotalCapacity(gpa, options.system_lib_names.len);
@ -1512,7 +1592,8 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
.skip_linker_dependencies = options.skip_linker_dependencies,
.parent_compilation_link_libc = options.parent_compilation_link_libc,
.each_lib_rpath = options.each_lib_rpath orelse options.is_native_os,
.disable_lld_caching = options.disable_lld_caching,
.cache_mode = cache_mode,
.disable_lld_caching = options.disable_lld_caching or cache_mode == .whole,
.subsystem = options.subsystem,
.is_test = options.is_test,
.wasi_exec_model = wasi_exec_model,
@ -1529,6 +1610,8 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
.local_cache_directory = options.local_cache_directory,
.global_cache_directory = options.global_cache_directory,
.bin_file = bin_file,
.whole_bin_sub_path = whole_bin_sub_path,
.whole_implib_sub_path = whole_implib_sub_path,
.emit_asm = options.emit_asm,
.emit_llvm_ir = options.emit_llvm_ir,
.emit_llvm_bc = options.emit_llvm_bc,
@ -1593,7 +1676,9 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
comp.c_object_table.putAssumeCapacityNoClobber(c_object, {});
}
if (comp.bin_file.options.emit != null and !comp.bin_file.options.skip_linker_dependencies) {
const have_bin_emit = comp.bin_file.options.emit != null or comp.whole_bin_sub_path != null;
if (have_bin_emit and !comp.bin_file.options.skip_linker_dependencies) {
// If we need to build glibc for the target, add work items for it.
// We go through the work queue so that building can be done in parallel.
if (comp.wantBuildGLibCFromSource()) {
@ -1698,8 +1783,10 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
if (comp.bin_file.options.include_compiler_rt and capable_of_building_compiler_rt) {
if (is_exe_or_dyn_lib) {
log.debug("queuing a job to build compiler_rt_lib", .{});
try comp.work_queue.writeItem(.{ .compiler_rt_lib = {} });
} else if (options.output_mode != .Obj) {
log.debug("queuing a job to build compiler_rt_obj", .{});
// If build-obj with -fcompiler-rt is requested, that is handled specially
// elsewhere. In this case we are making a static library, so we ask
// for a compiler-rt object to put in it.
@ -1725,20 +1812,11 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
return comp;
}
fn releaseStage1Lock(comp: *Compilation) void {
if (comp.stage1_lock) |*lock| {
lock.release();
comp.stage1_lock = null;
}
}
pub fn destroy(self: *Compilation) void {
const optional_module = self.bin_file.options.module;
self.bin_file.destroy();
if (optional_module) |module| module.deinit();
self.releaseStage1Lock();
const gpa = self.gpa;
self.work_queue.deinit();
self.anon_work_queue.deinit();
@ -1815,22 +1893,126 @@ pub fn getTarget(self: Compilation) Target {
return self.bin_file.options.target;
}
fn restorePrevZigCacheArtifactDirectory(comp: *Compilation, directory: *Directory) void {
if (directory.path) |p| comp.gpa.free(p);
// Restore the Module's previous zig_cache_artifact_directory
// This is only for cleanup purposes; Module.deinit calls close
// on the handle of zig_cache_artifact_directory.
if (comp.bin_file.options.module) |module| {
const builtin_pkg = module.main_pkg.table.get("builtin").?;
module.zig_cache_artifact_directory = builtin_pkg.root_src_directory;
}
}
fn cleanupTmpArtifactDirectory(
comp: *Compilation,
tmp_artifact_directory: *?Directory,
tmp_dir_sub_path: []const u8,
) void {
comp.gpa.free(tmp_dir_sub_path);
if (tmp_artifact_directory.*) |*directory| {
directory.handle.close();
restorePrevZigCacheArtifactDirectory(comp, directory);
}
}
/// Detect changes to source files, perform semantic analysis, and update the output files.
pub fn update(self: *Compilation) !void {
pub fn update(comp: *Compilation) !void {
const tracy_trace = trace(@src());
defer tracy_trace.end();
self.clearMiscFailures();
comp.clearMiscFailures();
var man: Cache.Manifest = undefined;
defer if (comp.whole_cache_manifest != null) man.deinit();
var tmp_dir_sub_path: []const u8 = &.{};
var tmp_artifact_directory: ?Directory = null;
defer cleanupTmpArtifactDirectory(comp, &tmp_artifact_directory, tmp_dir_sub_path);
// If using the whole caching strategy, we check for *everything* up front, including
// C source files.
if (comp.bin_file.options.cache_mode == .whole) {
// We are about to obtain this lock, so here we give other processes a chance first.
comp.bin_file.releaseLock();
comp.whole_cache_manifest = &man;
man = comp.cache_parent.obtain();
try comp.addNonIncrementalStuffToCacheManifest(&man);
const is_hit = man.hit() catch |err| {
// TODO properly bubble these up instead of emitting a warning
const i = man.failed_file_index orelse return err;
const file_path = man.files.items[i].path orelse return err;
std.log.warn("{s}: {s}", .{ @errorName(err), file_path });
return err;
};
if (is_hit) {
log.debug("CacheMode.whole cache hit for {s}", .{comp.bin_file.options.root_name});
const digest = man.final();
comp.wholeCacheModeSetBinFilePath(&digest);
assert(comp.bin_file.lock == null);
comp.bin_file.lock = man.toOwnedLock();
return;
}
log.debug("CacheMode.whole cache miss for {s}", .{comp.bin_file.options.root_name});
// Initialize `bin_file.emit` with a temporary Directory so that compilation can
// continue on the same path as incremental, using the temporary Directory.
tmp_artifact_directory = d: {
const s = std.fs.path.sep_str;
const rand_int = std.crypto.random.int(u64);
tmp_dir_sub_path = try std.fmt.allocPrint(comp.gpa, "tmp" ++ s ++ "{x}", .{rand_int});
const path = try comp.local_cache_directory.join(comp.gpa, &.{tmp_dir_sub_path});
errdefer comp.gpa.free(path);
const handle = try comp.local_cache_directory.handle.makeOpenPath(tmp_dir_sub_path, .{});
errdefer handle.close();
break :d .{
.path = path,
.handle = handle,
};
};
// This updates the output directory for stage1 backend and linker outputs.
if (comp.bin_file.options.module) |module| {
module.zig_cache_artifact_directory = tmp_artifact_directory.?;
}
// This resets the link.File to operate as if we called openPath() in create()
// instead of simulating -fno-emit-bin.
var options = comp.bin_file.options.move();
if (comp.whole_bin_sub_path) |sub_path| {
options.emit = .{
.directory = tmp_artifact_directory.?,
.sub_path = std.fs.path.basename(sub_path),
};
}
if (comp.whole_implib_sub_path) |sub_path| {
options.implib_emit = .{
.directory = tmp_artifact_directory.?,
.sub_path = std.fs.path.basename(sub_path),
};
}
comp.bin_file.destroy();
comp.bin_file = try link.File.openPath(comp.gpa, options);
}
// For compiling C objects, we rely on the cache hash system to avoid duplicating work.
// Add a Job for each C object.
try self.c_object_work_queue.ensureUnusedCapacity(self.c_object_table.count());
for (self.c_object_table.keys()) |key| {
self.c_object_work_queue.writeItemAssumeCapacity(key);
try comp.c_object_work_queue.ensureUnusedCapacity(comp.c_object_table.count());
for (comp.c_object_table.keys()) |key| {
comp.c_object_work_queue.writeItemAssumeCapacity(key);
}
const use_stage1 = build_options.is_stage1 and self.bin_file.options.use_stage1;
if (self.bin_file.options.module) |module| {
const use_stage1 = build_options.is_stage1 and comp.bin_file.options.use_stage1;
if (comp.bin_file.options.module) |module| {
module.compile_log_text.shrinkAndFree(module.gpa, 0);
module.generation += 1;
@ -1845,7 +2027,7 @@ pub fn update(self: *Compilation) !void {
// import_table here.
// Likewise, in the case of `zig test`, the test runner is the root source file,
// and so there is nothing to import the main file.
if (use_stage1 or self.bin_file.options.is_test) {
if (use_stage1 or comp.bin_file.options.is_test) {
_ = try module.importPkg(module.main_pkg);
}
@ -1854,34 +2036,34 @@ pub fn update(self: *Compilation) !void {
// to update it.
// We still want AstGen work items for stage1 so that we expose compile errors
// that are implemented in stage2 but not stage1.
try self.astgen_work_queue.ensureUnusedCapacity(module.import_table.count());
try comp.astgen_work_queue.ensureUnusedCapacity(module.import_table.count());
for (module.import_table.values()) |value| {
self.astgen_work_queue.writeItemAssumeCapacity(value);
comp.astgen_work_queue.writeItemAssumeCapacity(value);
}
if (!use_stage1) {
// Put a work item in for checking if any files used with `@embedFile` changed.
{
try self.embed_file_work_queue.ensureUnusedCapacity(module.embed_table.count());
try comp.embed_file_work_queue.ensureUnusedCapacity(module.embed_table.count());
var it = module.embed_table.iterator();
while (it.next()) |entry| {
const embed_file = entry.value_ptr.*;
self.embed_file_work_queue.writeItemAssumeCapacity(embed_file);
comp.embed_file_work_queue.writeItemAssumeCapacity(embed_file);
}
}
try self.work_queue.writeItem(.{ .analyze_pkg = std_pkg });
if (self.bin_file.options.is_test) {
try self.work_queue.writeItem(.{ .analyze_pkg = module.main_pkg });
try comp.work_queue.writeItem(.{ .analyze_pkg = std_pkg });
if (comp.bin_file.options.is_test) {
try comp.work_queue.writeItem(.{ .analyze_pkg = module.main_pkg });
}
}
}
try self.performAllTheWork();
try comp.performAllTheWork();
if (!use_stage1) {
if (self.bin_file.options.module) |module| {
if (self.bin_file.options.is_test and self.totalErrorCount() == 0) {
if (comp.bin_file.options.module) |module| {
if (comp.bin_file.options.is_test and comp.totalErrorCount() == 0) {
// The `test_functions` decl has been intentionally postponed until now,
// at which point we must populate it with the list of test functions that
// have been discovered and not filtered out.
@ -1910,41 +2092,241 @@ pub fn update(self: *Compilation) !void {
}
}
if (self.totalErrorCount() != 0) {
// Skip flushing.
self.link_error_flags = .{};
if (comp.totalErrorCount() != 0) {
// Skip flushing and keep source files loaded for error reporting.
comp.link_error_flags = .{};
return;
}
// This is needed before reading the error flags.
try self.bin_file.flush(self);
self.link_error_flags = self.bin_file.errorFlags();
if (!use_stage1) {
if (self.bin_file.options.module) |module| {
try link.File.C.flushEmitH(module);
}
}
// Flush takes care of -femit-bin, but we still have -femit-llvm-ir, -femit-llvm-bc, and
// -femit-asm to handle, in the case of C objects.
self.emitOthers();
comp.emitOthers();
// If there are any errors, we anticipate the source files being loaded
// to report error messages. Otherwise we unload all source files to save memory.
if (comp.whole_cache_manifest != null) {
const digest = man.final();
// Rename the temporary directory into place.
var directory = tmp_artifact_directory.?;
tmp_artifact_directory = null;
directory.handle.close();
defer restorePrevZigCacheArtifactDirectory(comp, &directory);
const o_sub_path = try std.fs.path.join(comp.gpa, &[_][]const u8{ "o", &digest });
defer comp.gpa.free(o_sub_path);
try comp.bin_file.renameTmpIntoCache(comp.local_cache_directory, tmp_dir_sub_path, o_sub_path);
comp.wholeCacheModeSetBinFilePath(&digest);
// This is intentionally sandwiched between renameTmpIntoCache() and writeManifest().
if (comp.bin_file.options.module) |module| {
// We need to set the zig_cache_artifact_directory for -femit-asm, -femit-llvm-ir,
// etc to know where to output to.
var artifact_dir = try comp.local_cache_directory.handle.openDir(o_sub_path, .{});
defer artifact_dir.close();
var dir_path = try comp.local_cache_directory.join(comp.gpa, &.{o_sub_path});
defer comp.gpa.free(dir_path);
module.zig_cache_artifact_directory = .{
.handle = artifact_dir,
.path = dir_path,
};
try comp.flush();
} else {
try comp.flush();
}
// Failure here only means an unnecessary cache miss.
man.writeManifest() catch |err| {
log.warn("failed to write cache manifest: {s}", .{@errorName(err)});
};
assert(comp.bin_file.lock == null);
comp.bin_file.lock = man.toOwnedLock();
} else {
try comp.flush();
}
// Unload all source files to save memory.
// The ZIR needs to stay loaded in memory because (1) Decl objects contain references
// to it, and (2) generic instantiations, comptime calls, inline calls will need
// to reference the ZIR.
if (self.totalErrorCount() == 0 and !self.keep_source_files_loaded) {
if (self.bin_file.options.module) |module| {
if (!comp.keep_source_files_loaded) {
if (comp.bin_file.options.module) |module| {
for (module.import_table.values()) |file| {
file.unloadTree(self.gpa);
file.unloadSource(self.gpa);
file.unloadTree(comp.gpa);
file.unloadSource(comp.gpa);
}
}
}
}
fn flush(comp: *Compilation) !void {
try comp.bin_file.flush(comp); // This is needed before reading the error flags.
comp.link_error_flags = comp.bin_file.errorFlags();
const use_stage1 = build_options.is_stage1 and comp.bin_file.options.use_stage1;
if (!use_stage1) {
if (comp.bin_file.options.module) |module| {
try link.File.C.flushEmitH(module);
}
}
}
/// Communicate the output binary location to parent Compilations.
fn wholeCacheModeSetBinFilePath(comp: *Compilation, digest: *const [Cache.hex_digest_len]u8) void {
const digest_start = 2; // "o/[digest]/[basename]"
if (comp.whole_bin_sub_path) |sub_path| {
mem.copy(u8, sub_path[digest_start..], digest);
comp.bin_file.options.emit = .{
.directory = comp.local_cache_directory,
.sub_path = sub_path,
};
}
if (comp.whole_implib_sub_path) |sub_path| {
mem.copy(u8, sub_path[digest_start..], digest);
comp.bin_file.options.implib_emit = .{
.directory = comp.local_cache_directory,
.sub_path = sub_path,
};
}
}
fn prepareWholeEmitSubPath(arena: Allocator, opt_emit: ?EmitLoc) error{OutOfMemory}!?[]u8 {
const emit = opt_emit orelse return null;
if (emit.directory != null) return null;
const s = std.fs.path.sep_str;
const format = "o" ++ s ++ ("x" ** Cache.hex_digest_len) ++ s ++ "{s}";
return try std.fmt.allocPrint(arena, format, .{emit.basename});
}
/// This is only observed at compile-time and used to emit a compile error
/// to remind the programmer to update multiple related pieces of code that
/// are in different locations. Bump this number when adding or deleting
/// anything from the link cache manifest.
pub const link_hash_implementation_version = 1;
fn addNonIncrementalStuffToCacheManifest(comp: *Compilation, man: *Cache.Manifest) !void {
const gpa = comp.gpa;
const target = comp.getTarget();
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
comptime assert(link_hash_implementation_version == 1);
if (comp.bin_file.options.module) |mod| {
const main_zig_file = try mod.main_pkg.root_src_directory.join(arena, &[_][]const u8{
mod.main_pkg.root_src_path,
});
_ = try man.addFile(main_zig_file, null);
{
var seen_table = std.AutoHashMap(*Package, void).init(arena);
// Skip builtin.zig; it is useless as an input, and we don't want to have to
// write it before checking for a cache hit.
const builtin_pkg = mod.main_pkg.table.get("builtin").?;
try seen_table.put(builtin_pkg, {});
try addPackageTableToCacheHash(&man.hash, &arena_allocator, mod.main_pkg.table, &seen_table, .{ .files = man });
}
// Synchronize with other matching comments: ZigOnlyHashStuff
man.hash.add(comp.bin_file.options.valgrind);
man.hash.add(comp.bin_file.options.single_threaded);
man.hash.add(comp.bin_file.options.use_stage1);
man.hash.add(comp.bin_file.options.use_llvm);
man.hash.add(comp.bin_file.options.dll_export_fns);
man.hash.add(comp.bin_file.options.is_test);
man.hash.add(comp.test_evented_io);
man.hash.addOptionalBytes(comp.test_filter);
man.hash.addOptionalBytes(comp.test_name_prefix);
man.hash.add(comp.bin_file.options.skip_linker_dependencies);
man.hash.add(comp.bin_file.options.parent_compilation_link_libc);
man.hash.add(mod.emit_h != null);
}
try man.addOptionalFile(comp.bin_file.options.linker_script);
try man.addOptionalFile(comp.bin_file.options.version_script);
try man.addListOfFiles(comp.bin_file.options.objects);
for (comp.c_object_table.keys()) |key| {
_ = try man.addFile(key.src.src_path, null);
man.hash.addListOfBytes(key.src.extra_flags);
}
man.hash.addOptionalEmitLoc(comp.emit_asm);
man.hash.addOptionalEmitLoc(comp.emit_llvm_ir);
man.hash.addOptionalEmitLoc(comp.emit_llvm_bc);
man.hash.addOptionalEmitLoc(comp.emit_analysis);
man.hash.addOptionalEmitLoc(comp.emit_docs);
man.hash.addListOfBytes(comp.clang_argv);
man.hash.addOptional(comp.bin_file.options.stack_size_override);
man.hash.addOptional(comp.bin_file.options.image_base_override);
man.hash.addOptional(comp.bin_file.options.gc_sections);
man.hash.add(comp.bin_file.options.eh_frame_hdr);
man.hash.add(comp.bin_file.options.emit_relocs);
man.hash.add(comp.bin_file.options.rdynamic);
man.hash.addListOfBytes(comp.bin_file.options.lib_dirs);
man.hash.addListOfBytes(comp.bin_file.options.rpath_list);
man.hash.add(comp.bin_file.options.each_lib_rpath);
man.hash.add(comp.bin_file.options.skip_linker_dependencies);
man.hash.add(comp.bin_file.options.z_nodelete);
man.hash.add(comp.bin_file.options.z_notext);
man.hash.add(comp.bin_file.options.z_defs);
man.hash.add(comp.bin_file.options.z_origin);
man.hash.add(comp.bin_file.options.z_noexecstack);
man.hash.add(comp.bin_file.options.z_now);
man.hash.add(comp.bin_file.options.z_relro);
man.hash.add(comp.bin_file.options.include_compiler_rt);
if (comp.bin_file.options.link_libc) {
man.hash.add(comp.bin_file.options.libc_installation != null);
if (comp.bin_file.options.libc_installation) |libc_installation| {
man.hash.addBytes(libc_installation.crt_dir.?);
if (target.abi == .msvc) {
man.hash.addBytes(libc_installation.msvc_lib_dir.?);
man.hash.addBytes(libc_installation.kernel32_lib_dir.?);
}
}
man.hash.addOptionalBytes(comp.bin_file.options.dynamic_linker);
}
man.hash.addOptionalBytes(comp.bin_file.options.soname);
man.hash.addOptional(comp.bin_file.options.version);
link.hashAddSystemLibs(&man.hash, comp.bin_file.options.system_libs);
man.hash.addOptional(comp.bin_file.options.allow_shlib_undefined);
man.hash.add(comp.bin_file.options.bind_global_refs_locally);
man.hash.add(comp.bin_file.options.tsan);
man.hash.addOptionalBytes(comp.bin_file.options.sysroot);
man.hash.add(comp.bin_file.options.linker_optimization);
// WASM specific stuff
man.hash.add(comp.bin_file.options.import_memory);
man.hash.addOptional(comp.bin_file.options.initial_memory);
man.hash.addOptional(comp.bin_file.options.max_memory);
man.hash.addOptional(comp.bin_file.options.global_base);
// Mach-O specific stuff
man.hash.addListOfBytes(comp.bin_file.options.framework_dirs);
man.hash.addListOfBytes(comp.bin_file.options.frameworks);
// COFF specific stuff
man.hash.addOptional(comp.bin_file.options.subsystem);
man.hash.add(comp.bin_file.options.tsaware);
man.hash.add(comp.bin_file.options.nxcompat);
man.hash.add(comp.bin_file.options.dynamicbase);
man.hash.addOptional(comp.bin_file.options.major_subsystem_version);
man.hash.addOptional(comp.bin_file.options.minor_subsystem_version);
}
fn emitOthers(comp: *Compilation) void {
if (comp.bin_file.options.output_mode != .Obj or comp.bin_file.options.module != null or
comp.c_object_table.count() == 0)
@ -2988,7 +3370,9 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
const dep_basename = std.fs.path.basename(out_dep_path);
try man.addDepFilePost(zig_cache_tmp_dir, dep_basename);
if (build_options.is_stage1 and comp.bin_file.options.use_stage1) try comp.stage1_cache_manifest.addDepFilePost(zig_cache_tmp_dir, dep_basename);
if (comp.whole_cache_manifest) |whole_cache_manifest| {
try whole_cache_manifest.addDepFilePost(zig_cache_tmp_dir, dep_basename);
}
const digest = man.final();
const o_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest });
@ -3351,13 +3735,13 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
};
}
pub fn tmpFilePath(comp: *Compilation, arena: Allocator, suffix: []const u8) error{OutOfMemory}![]const u8 {
pub fn tmpFilePath(comp: *Compilation, ally: Allocator, suffix: []const u8) error{OutOfMemory}![]const u8 {
const s = std.fs.path.sep_str;
const rand_int = std.crypto.random.int(u64);
if (comp.local_cache_directory.path) |p| {
return std.fmt.allocPrint(arena, "{s}" ++ s ++ "tmp" ++ s ++ "{x}-{s}", .{ p, rand_int, suffix });
return std.fmt.allocPrint(ally, "{s}" ++ s ++ "tmp" ++ s ++ "{x}-{s}", .{ p, rand_int, suffix });
} else {
return std.fmt.allocPrint(arena, "tmp" ++ s ++ "{x}-{s}", .{ rand_int, suffix });
return std.fmt.allocPrint(ally, "tmp" ++ s ++ "{x}-{s}", .{ rand_int, suffix });
}
}
@ -4424,6 +4808,7 @@ fn buildOutputFromZig(
.global_cache_directory = comp.global_cache_directory,
.local_cache_directory = comp.global_cache_directory,
.zig_lib_directory = comp.zig_lib_directory,
.cache_mode = .whole,
.target = target,
.root_name = root_name,
.main_pkg = &main_pkg,
@ -4501,10 +4886,7 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
mod.main_pkg.root_src_path,
});
const zig_lib_dir = comp.zig_lib_directory.path.?;
const builtin_zig_path = try directory.join(arena, &[_][]const u8{"builtin.zig"});
const target = comp.getTarget();
const id_symlink_basename = "stage1.id";
const libs_txt_basename = "libs.txt";
// The include_compiler_rt stored in the bin file options here means that we need
// compiler-rt symbols *somehow*. However, in the context of using the stage1 backend
@ -4516,115 +4898,6 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
const include_compiler_rt = comp.bin_file.options.output_mode == .Obj and
comp.bin_file.options.include_compiler_rt;
// We are about to obtain this lock, so here we give other processes a chance first.
comp.releaseStage1Lock();
// Unlike with the self-hosted Zig module, stage1 does not support incremental compilation,
// so we input all the zig source files into the cache hash system. We're going to keep
// the artifact directory the same, however, so we take the same strategy as linking
// does where we have a file which specifies the hash of the output directory so that we can
// skip the expensive compilation step if the hash matches.
var man = comp.cache_parent.obtain();
defer man.deinit();
_ = try man.addFile(main_zig_file, null);
{
var seen_table = std.AutoHashMap(*Package, void).init(arena_allocator.allocator());
try addPackageTableToCacheHash(&man.hash, &arena_allocator, mod.main_pkg.table, &seen_table, .{ .files = &man });
}
man.hash.add(comp.bin_file.options.valgrind);
man.hash.add(comp.bin_file.options.single_threaded);
man.hash.add(target.os.getVersionRange());
man.hash.add(comp.bin_file.options.dll_export_fns);
man.hash.add(comp.bin_file.options.function_sections);
man.hash.add(include_compiler_rt);
man.hash.add(comp.bin_file.options.is_test);
man.hash.add(comp.bin_file.options.emit != null);
man.hash.add(mod.emit_h != null);
if (mod.emit_h) |emit_h| {
man.hash.addEmitLoc(emit_h.loc);
}
man.hash.addOptionalEmitLoc(comp.emit_asm);
man.hash.addOptionalEmitLoc(comp.emit_llvm_ir);
man.hash.addOptionalEmitLoc(comp.emit_llvm_bc);
man.hash.addOptionalEmitLoc(comp.emit_analysis);
man.hash.addOptionalEmitLoc(comp.emit_docs);
man.hash.add(comp.test_evented_io);
man.hash.addOptionalBytes(comp.test_filter);
man.hash.addOptionalBytes(comp.test_name_prefix);
man.hash.addListOfBytes(comp.clang_argv);
// Capture the state in case we come back from this branch where the hash doesn't match.
const prev_hash_state = man.hash.peekBin();
const input_file_count = man.files.items.len;
const hit = man.hit() catch |err| {
const i = man.failed_file_index orelse return err;
const file_path = man.files.items[i].path orelse return err;
fatal("unable to build stage1 zig object: {s}: {s}", .{ @errorName(err), file_path });
};
if (hit) {
const digest = man.final();
// We use an extra hex-encoded byte here to store some flags.
var prev_digest_buf: [digest.len + 2]u8 = undefined;
const prev_digest: []u8 = Cache.readSmallFile(
directory.handle,
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
log.debug("stage1 {s} new_digest={s} error: {s}", .{
mod.main_pkg.root_src_path,
std.fmt.fmtSliceHexLower(&digest),
@errorName(err),
});
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
if (prev_digest.len >= digest.len + 2) hit: {
if (!mem.eql(u8, prev_digest[0..digest.len], &digest))
break :hit;
log.debug("stage1 {s} digest={s} match - skipping invocation", .{
mod.main_pkg.root_src_path,
std.fmt.fmtSliceHexLower(&digest),
});
var flags_bytes: [1]u8 = undefined;
_ = std.fmt.hexToBytes(&flags_bytes, prev_digest[digest.len..]) catch {
log.warn("bad cache stage1 digest: '{s}'", .{std.fmt.fmtSliceHexLower(prev_digest)});
break :hit;
};
if (directory.handle.readFileAlloc(comp.gpa, libs_txt_basename, 10 * 1024 * 1024)) |libs_txt| {
var it = mem.tokenize(u8, libs_txt, "\n");
while (it.next()) |lib_name| {
try comp.stage1AddLinkLib(lib_name);
}
} else |err| switch (err) {
error.FileNotFound => {}, // That's OK, it just means 0 libs.
else => {
log.warn("unable to read cached list of link libs: {s}", .{@errorName(err)});
break :hit;
},
}
comp.stage1_lock = man.toOwnedLock();
mod.stage1_flags = @bitCast(@TypeOf(mod.stage1_flags), flags_bytes[0]);
return;
}
log.debug("stage1 {s} prev_digest={s} new_digest={s}", .{
mod.main_pkg.root_src_path,
std.fmt.fmtSliceHexLower(prev_digest),
std.fmt.fmtSliceHexLower(&digest),
});
man.unhit(prev_hash_state, input_file_count);
}
// We are about to change the output file to be different, so we invalidate the build hash now.
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
error.FileNotFound => {},
else => |e| return e,
};
const stage2_target = try arena.create(stage1.Stage2Target);
stage2_target.* = .{
.arch = @enumToInt(target.cpu.arch) + 1, // skip over ZigLLVM_UnknownArch
@ -4637,9 +4910,9 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
.llvm_target_abi = if (target_util.llvmMachineAbi(target)) |s| s.ptr else null,
};
comp.stage1_cache_manifest = &man;
const main_pkg_path = mod.main_pkg.root_src_directory.path orelse "";
const builtin_pkg = mod.main_pkg.table.get("builtin").?;
const builtin_zig_path = try builtin_pkg.root_src_directory.join(arena, &.{builtin_pkg.root_src_path});
const stage1_module = stage1.create(
@enumToInt(comp.bin_file.options.optimize_mode),
@ -4740,19 +5013,8 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
.have_dllmain_crt_startup = false,
};
const inferred_lib_start_index = comp.bin_file.options.system_libs.count();
stage1_module.build_object();
if (comp.bin_file.options.system_libs.count() > inferred_lib_start_index) {
// We need to save the inferred link libs to the cache, otherwise if we get a cache hit
// next time we will be missing these libs.
var libs_txt = std.ArrayList(u8).init(arena);
for (comp.bin_file.options.system_libs.keys()[inferred_lib_start_index..]) |key| {
try libs_txt.writer().print("{s}\n", .{key});
}
try directory.handle.writeFile(libs_txt_basename, libs_txt.items);
}
mod.stage1_flags = .{
.have_c_main = stage1_module.have_c_main,
.have_winmain = stage1_module.have_winmain,
@ -4763,34 +5025,6 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
};
stage1_module.destroy();
const digest = man.final();
// Update the small file with the digest. If it fails we can continue; it only
// means that the next invocation will have an unnecessary cache miss.
const stage1_flags_byte = @bitCast(u8, mod.stage1_flags);
log.debug("stage1 {s} final digest={s} flags={x}", .{
mod.main_pkg.root_src_path, std.fmt.fmtSliceHexLower(&digest), stage1_flags_byte,
});
var digest_plus_flags: [digest.len + 2]u8 = undefined;
digest_plus_flags[0..digest.len].* = digest;
assert(std.fmt.formatIntBuf(digest_plus_flags[digest.len..], stage1_flags_byte, 16, .lower, .{
.width = 2,
.fill = '0',
}) == 2);
log.debug("saved digest + flags: '{s}' (byte = {}) have_winmain_crt_startup={}", .{
digest_plus_flags, stage1_flags_byte, mod.stage1_flags.have_winmain_crt_startup,
});
Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest_plus_flags) catch |err| {
log.warn("failed to save stage1 hash digest file: {s}", .{@errorName(err)});
};
// Failure here only means an unnecessary cache miss.
man.writeManifest() catch |err| {
log.warn("failed to write cache manifest when linking: {s}", .{@errorName(err)});
};
// We hang on to this lock so that the output file path can be used without
// other processes clobbering it.
comp.stage1_lock = man.toOwnedLock();
}
fn stage1LocPath(arena: Allocator, opt_loc: ?EmitLoc, cache_directory: Directory) ![]const u8 {
@ -4862,6 +5096,7 @@ pub fn build_crt_file(
.local_cache_directory = comp.global_cache_directory,
.global_cache_directory = comp.global_cache_directory,
.zig_lib_directory = comp.zig_lib_directory,
.cache_mode = .whole,
.target = target,
.root_name = root_name,
.main_pkg = null,

View File

@ -33,7 +33,7 @@ const build_options = @import("build_options");
gpa: Allocator,
comp: *Compilation,
/// Where our incremental compilation metadata serialization will go.
/// Where build artifacts and incremental compilation metadata serialization go.
zig_cache_artifact_directory: Compilation.Directory,
/// Pointer to externally managed resource.
root_pkg: *Package,
@ -1463,11 +1463,7 @@ pub const File = struct {
/// Whether this is populated depends on `source_loaded`.
source: [:0]const u8,
/// Whether this is populated depends on `status`.
stat_size: u64,
/// Whether this is populated depends on `status`.
stat_inode: std.fs.File.INode,
/// Whether this is populated depends on `status`.
stat_mtime: i128,
stat: Cache.File.Stat,
/// Whether this is populated or not depends on `tree_loaded`.
tree: Ast,
/// Whether this is populated or not depends on `zir_loaded`.
@ -1535,8 +1531,16 @@ pub const File = struct {
file.* = undefined;
}
pub fn getSource(file: *File, gpa: Allocator) ![:0]const u8 {
if (file.source_loaded) return file.source;
pub const Source = struct {
bytes: [:0]const u8,
stat: Cache.File.Stat,
};
pub fn getSource(file: *File, gpa: Allocator) !Source {
if (file.source_loaded) return Source{
.bytes = file.source,
.stat = file.stat,
};
const root_dir_path = file.pkg.root_src_directory.path orelse ".";
log.debug("File.getSource, not cached. pkgdir={s} sub_file_path={s}", .{
@ -1565,14 +1569,21 @@ pub const File = struct {
file.source = source;
file.source_loaded = true;
return source;
return Source{
.bytes = source,
.stat = .{
.size = stat.size,
.inode = stat.inode,
.mtime = stat.mtime,
},
};
}
pub fn getTree(file: *File, gpa: Allocator) !*const Ast {
if (file.tree_loaded) return &file.tree;
const source = try file.getSource(gpa);
file.tree = try std.zig.parse(gpa, source);
file.tree = try std.zig.parse(gpa, source.bytes);
file.tree_loaded = true;
return &file.tree;
}
@ -1631,9 +1642,7 @@ pub const EmbedFile = struct {
/// Memory is stored in gpa, owned by EmbedFile.
sub_file_path: []const u8,
bytes: [:0]const u8,
stat_size: u64,
stat_inode: std.fs.File.INode,
stat_mtime: i128,
stat: Cache.File.Stat,
/// Package that this file is a part of, managed externally.
pkg: *Package,
/// The Decl that was created from the `@embedFile` to own this resource.
@ -2704,9 +2713,11 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
keep_zir = true;
file.zir = zir;
file.zir_loaded = true;
file.stat_size = header.stat_size;
file.stat_inode = header.stat_inode;
file.stat_mtime = header.stat_mtime;
file.stat = .{
.size = header.stat_size,
.inode = header.stat_inode,
.mtime = header.stat_mtime,
};
file.status = .success_zir;
log.debug("AstGen cached success: {s}", .{file.sub_file_path});
@ -2724,9 +2735,9 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
},
.parse_failure, .astgen_failure, .success_zir => {
const unchanged_metadata =
stat.size == file.stat_size and
stat.mtime == file.stat_mtime and
stat.inode == file.stat_inode;
stat.size == file.stat.size and
stat.mtime == file.stat.mtime and
stat.inode == file.stat.inode;
if (unchanged_metadata) {
log.debug("unmodified metadata of file: {s}", .{file.sub_file_path});
@ -2787,9 +2798,11 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
if (amt != stat.size)
return error.UnexpectedEndOfFile;
file.stat_size = stat.size;
file.stat_inode = stat.inode;
file.stat_mtime = stat.mtime;
file.stat = .{
.size = stat.size,
.inode = stat.inode,
.mtime = stat.mtime,
};
file.source = source;
file.source_loaded = true;
@ -3069,9 +3082,11 @@ pub fn populateBuiltinFile(mod: *Module) !void {
try writeBuiltinFile(file, builtin_pkg);
} else {
file.stat_size = stat.size;
file.stat_inode = stat.inode;
file.stat_mtime = stat.mtime;
file.stat = .{
.size = stat.size,
.inode = stat.inode,
.mtime = stat.mtime,
};
}
} else |err| switch (err) {
error.BadPathName => unreachable, // it's always "builtin.zig"
@ -3099,9 +3114,11 @@ pub fn writeBuiltinFile(file: *File, builtin_pkg: *Package) !void {
try af.file.writeAll(file.source);
try af.finish();
file.stat_size = file.source.len;
file.stat_inode = 0; // dummy value
file.stat_mtime = 0; // dummy value
file.stat = .{
.size = file.source.len,
.inode = 0, // dummy value
.mtime = 0, // dummy value
};
}
pub fn mapOldZirToNew(
@ -3380,6 +3397,19 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {},
}
if (mod.comp.whole_cache_manifest) |man| {
const source = file.getSource(gpa) catch |err| {
try reportRetryableFileError(mod, file, "unable to load source: {s}", .{@errorName(err)});
return error.AnalysisFail;
};
const resolved_path = try file.pkg.root_src_directory.join(gpa, &.{
file.sub_file_path,
});
errdefer gpa.free(resolved_path);
try man.addFilePostContents(resolved_path, source.bytes, source.stat);
}
} else {
new_decl.analysis = .file_failure;
}
@ -3710,9 +3740,7 @@ pub fn importPkg(mod: *Module, pkg: *Package) !ImportFileResult {
.source_loaded = false,
.tree_loaded = false,
.zir_loaded = false,
.stat_size = undefined,
.stat_inode = undefined,
.stat_mtime = undefined,
.stat = undefined,
.tree = undefined,
.zir = undefined,
.status = .never_loaded,
@ -3780,9 +3808,7 @@ pub fn importFile(
.source_loaded = false,
.tree_loaded = false,
.zir_loaded = false,
.stat_size = undefined,
.stat_inode = undefined,
.stat_mtime = undefined,
.stat = undefined,
.tree = undefined,
.zir = undefined,
.status = .never_loaded,
@ -3827,8 +3853,13 @@ pub fn embedFile(mod: *Module, cur_file: *File, rel_file_path: []const u8) !*Emb
var file = try cur_file.pkg.root_src_directory.handle.openFile(sub_file_path, .{});
defer file.close();
const stat = try file.stat();
const size_usize = try std.math.cast(usize, stat.size);
const actual_stat = try file.stat();
const stat: Cache.File.Stat = .{
.size = actual_stat.size,
.inode = actual_stat.inode,
.mtime = actual_stat.mtime,
};
const size_usize = try std.math.cast(usize, actual_stat.size);
const bytes = try file.readToEndAllocOptions(gpa, std.math.maxInt(u32), size_usize, 1, 0);
errdefer gpa.free(bytes);
@ -3836,14 +3867,18 @@ pub fn embedFile(mod: *Module, cur_file: *File, rel_file_path: []const u8) !*Emb
resolved_root_path, resolved_path, sub_file_path, rel_file_path,
});
if (mod.comp.whole_cache_manifest) |man| {
const copied_resolved_path = try gpa.dupe(u8, resolved_path);
errdefer gpa.free(copied_resolved_path);
try man.addFilePostContents(copied_resolved_path, bytes, stat);
}
keep_resolved_path = true; // It's now owned by embed_table.
gop.value_ptr.* = new_file;
new_file.* = .{
.sub_file_path = sub_file_path,
.bytes = bytes,
.stat_size = stat.size,
.stat_inode = stat.inode,
.stat_mtime = stat.mtime,
.stat = stat,
.pkg = cur_file.pkg,
.owner_decl = undefined, // Set by Sema immediately after this function returns.
};
@ -3857,9 +3892,9 @@ pub fn detectEmbedFileUpdate(mod: *Module, embed_file: *EmbedFile) !void {
const stat = try file.stat();
const unchanged_metadata =
stat.size == embed_file.stat_size and
stat.mtime == embed_file.stat_mtime and
stat.inode == embed_file.stat_inode;
stat.size == embed_file.stat.size and
stat.mtime == embed_file.stat.mtime and
stat.inode == embed_file.stat.inode;
if (unchanged_metadata) return;
@ -3868,9 +3903,11 @@ pub fn detectEmbedFileUpdate(mod: *Module, embed_file: *EmbedFile) !void {
const bytes = try file.readToEndAllocOptions(gpa, std.math.maxInt(u32), size_usize, 1, 0);
gpa.free(embed_file.bytes);
embed_file.bytes = bytes;
embed_file.stat_size = stat.size;
embed_file.stat_mtime = stat.mtime;
embed_file.stat_inode = stat.inode;
embed_file.stat = .{
.size = stat.size,
.mtime = stat.mtime,
.inode = stat.inode,
};
mod.comp.mutex.lock();
defer mod.comp.mutex.unlock();
@ -5001,3 +5038,35 @@ pub fn linkerUpdateDecl(mod: *Module, decl: *Decl) !void {
},
};
}
fn reportRetryableFileError(
mod: *Module,
file: *File,
comptime format: []const u8,
args: anytype,
) error{OutOfMemory}!void {
file.status = .retryable_failure;
const err_msg = try ErrorMsg.create(
mod.gpa,
.{
.file_scope = file,
.parent_decl_node = 0,
.lazy = .entire_file,
},
format,
args,
);
errdefer err_msg.destroy(mod.gpa);
mod.comp.mutex.lock();
defer mod.comp.mutex.unlock();
const gop = try mod.failed_files.getOrPut(mod.gpa, file);
if (gop.found_existing) {
if (gop.value_ptr.*) |old_err_msg| {
old_err_msg.destroy(mod.gpa);
}
}
gop.value_ptr.* = err_msg;
}

View File

@ -14680,11 +14680,11 @@ fn resolvePeerTypes(
instructions: []Air.Inst.Ref,
candidate_srcs: Module.PeerTypeCandidateSrc,
) !Type {
if (instructions.len == 0)
return Type.initTag(.noreturn);
if (instructions.len == 1)
return sema.typeOf(instructions[0]);
switch (instructions.len) {
0 => return Type.initTag(.noreturn),
1 => return sema.typeOf(instructions[0]),
else => {},
}
const target = sema.mod.getTarget();
@ -14714,13 +14714,14 @@ fn resolvePeerTypes(
continue;
},
.Int => {
if (chosen_ty.isSignedInt() == candidate_ty.isSignedInt()) {
if (chosen_ty.intInfo(target).bits < candidate_ty.intInfo(target).bits) {
chosen = candidate;
chosen_i = candidate_i + 1;
}
continue;
const chosen_info = chosen_ty.intInfo(target);
const candidate_info = candidate_ty.intInfo(target);
if (chosen_info.bits < candidate_info.bits) {
chosen = candidate;
chosen_i = candidate_i + 1;
}
continue;
},
.Pointer => if (chosen_ty.ptrSize() == .C) continue,
else => {},

View File

@ -324,10 +324,12 @@ pub const Object = struct {
const mod = comp.bin_file.options.module.?;
const cache_dir = mod.zig_cache_artifact_directory;
const emit_bin_path: ?[*:0]const u8 = if (comp.bin_file.options.emit) |emit|
try emit.directory.joinZ(arena, &[_][]const u8{self.sub_path})
else
null;
const emit_bin_path: ?[*:0]const u8 = if (comp.bin_file.options.emit) |emit| blk: {
const full_out_path = try emit.directory.join(arena, &[_][]const u8{emit.sub_path});
break :blk try std.fs.path.joinZ(arena, &.{
std.fs.path.dirname(full_out_path).?, self.sub_path,
});
} else null;
const emit_asm_path = try locPath(arena, comp.emit_asm, cache_dir);
const emit_llvm_ir_path = try locPath(arena, comp.emit_llvm_ir, cache_dir);

View File

@ -1062,6 +1062,7 @@ fn buildSharedLib(
.local_cache_directory = zig_cache_directory,
.global_cache_directory = comp.global_cache_directory,
.zig_lib_directory = comp.zig_lib_directory,
.cache_mode = .whole,
.target = comp.getTarget(),
.root_name = lib.name,
.main_pkg = null,

View File

@ -177,6 +177,7 @@ pub fn buildLibCXX(comp: *Compilation) !void {
.local_cache_directory = comp.global_cache_directory,
.global_cache_directory = comp.global_cache_directory,
.zig_lib_directory = comp.zig_lib_directory,
.cache_mode = .whole,
.target = target,
.root_name = root_name,
.main_pkg = null,
@ -218,10 +219,9 @@ pub fn buildLibCXX(comp: *Compilation) !void {
assert(comp.libcxx_static_lib == null);
comp.libcxx_static_lib = Compilation.CRTFile{
.full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(
comp.gpa,
&[_][]const u8{basename},
),
.full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(comp.gpa, &[_][]const u8{
sub_compilation.bin_file.options.emit.?.sub_path,
}),
.lock = sub_compilation.bin_file.toOwnedLock(),
};
}
@ -309,6 +309,7 @@ pub fn buildLibCXXABI(comp: *Compilation) !void {
.local_cache_directory = comp.global_cache_directory,
.global_cache_directory = comp.global_cache_directory,
.zig_lib_directory = comp.zig_lib_directory,
.cache_mode = .whole,
.target = target,
.root_name = root_name,
.main_pkg = null,
@ -350,10 +351,9 @@ pub fn buildLibCXXABI(comp: *Compilation) !void {
assert(comp.libcxxabi_static_lib == null);
comp.libcxxabi_static_lib = Compilation.CRTFile{
.full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(
comp.gpa,
&[_][]const u8{basename},
),
.full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(comp.gpa, &[_][]const u8{
sub_compilation.bin_file.options.emit.?.sub_path,
}),
.lock = sub_compilation.bin_file.toOwnedLock(),
};
}

View File

@ -199,6 +199,7 @@ pub fn buildTsan(comp: *Compilation) !void {
.local_cache_directory = comp.global_cache_directory,
.global_cache_directory = comp.global_cache_directory,
.zig_lib_directory = comp.zig_lib_directory,
.cache_mode = .whole,
.target = target,
.root_name = root_name,
.main_pkg = null,
@ -237,10 +238,9 @@ pub fn buildTsan(comp: *Compilation) !void {
assert(comp.tsan_static_lib == null);
comp.tsan_static_lib = Compilation.CRTFile{
.full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(
comp.gpa,
&[_][]const u8{basename},
),
.full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(comp.gpa, &[_][]const u8{
sub_compilation.bin_file.options.emit.?.sub_path,
}),
.lock = sub_compilation.bin_file.toOwnedLock(),
};
}

View File

@ -101,6 +101,7 @@ pub fn buildStaticLib(comp: *Compilation) !void {
.local_cache_directory = comp.global_cache_directory,
.global_cache_directory = comp.global_cache_directory,
.zig_lib_directory = comp.zig_lib_directory,
.cache_mode = .whole,
.target = target,
.root_name = root_name,
.main_pkg = null,
@ -141,11 +142,11 @@ pub fn buildStaticLib(comp: *Compilation) !void {
try sub_compilation.updateSubCompilation();
assert(comp.libunwind_static_lib == null);
comp.libunwind_static_lib = Compilation.CRTFile{
.full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(
comp.gpa,
&[_][]const u8{basename},
),
.full_object_path = try sub_compilation.bin_file.options.emit.?.directory.join(comp.gpa, &[_][]const u8{
sub_compilation.bin_file.options.emit.?.sub_path,
}),
.lock = sub_compilation.bin_file.toOwnedLock(),
};
}

View File

@ -22,6 +22,8 @@ pub const SystemLib = struct {
needed: bool = false,
};
pub const CacheMode = enum { incremental, whole };
pub fn hashAddSystemLibs(
hh: *Cache.HashHelper,
hm: std.StringArrayHashMapUnmanaged(SystemLib),
@ -44,10 +46,9 @@ pub const Emit = struct {
};
pub const Options = struct {
/// This is `null` when -fno-emit-bin is used. When `openPath` or `flush` is called,
/// it will have already been null-checked.
/// This is `null` when `-fno-emit-bin` is used.
emit: ?Emit,
/// This is `null` not building a Windows DLL, or when -fno-emit-implib is used.
/// This is `null` not building a Windows DLL, or when `-fno-emit-implib` is used.
implib_emit: ?Emit,
target: std.Target,
output_mode: std.builtin.OutputMode,
@ -70,6 +71,7 @@ pub const Options = struct {
entry_addr: ?u64 = null,
stack_size_override: ?u64,
image_base_override: ?u64,
cache_mode: CacheMode,
include_compiler_rt: bool,
/// Set to `true` to omit debug info.
strip: bool,
@ -165,6 +167,12 @@ pub const Options = struct {
pub fn effectiveOutputMode(options: Options) std.builtin.OutputMode {
return if (options.use_lld) .Obj else options.output_mode;
}
pub fn move(self: *Options) Options {
const copied_state = self.*;
self.system_libs = .{};
return copied_state;
}
};
pub const File = struct {
@ -628,6 +636,36 @@ pub const File = struct {
}
}
/// This function is called by the frontend before flush(). It communicates that
/// `options.bin_file.emit` directory needs to be renamed from
/// `[zig-cache]/tmp/[random]` to `[zig-cache]/o/[digest]`.
/// The frontend would like to simply perform a file system rename, however,
/// some linker backends care about the file paths of the objects they are linking.
/// So this function call tells linker backends to rename the paths of object files
/// to observe the new directory path.
/// Linker backends which do not have this requirement can fall back to the simple
/// implementation at the bottom of this function.
/// This function is only called when CacheMode is `whole`.
pub fn renameTmpIntoCache(
base: *File,
cache_directory: Compilation.Directory,
tmp_dir_sub_path: []const u8,
o_sub_path: []const u8,
) !void {
// So far, none of the linker backends need to respond to this event, however,
// it makes sense that they might want to. So we leave this mechanism here
// for now. Once the linker backends get more mature, if it turns out this
// is not needed we can refactor this into having the frontend do the rename
// directly, and remove this function from link.zig.
_ = base;
try std.fs.rename(
cache_directory.handle,
tmp_dir_sub_path,
cache_directory.handle,
o_sub_path,
);
}
pub fn linkAsArchive(base: *File, comp: *Compilation) !void {
const tracy = trace(@src());
defer tracy.end();
@ -637,9 +675,11 @@ pub const File = struct {
const arena = arena_allocator.allocator();
const directory = base.options.emit.?.directory; // Just an alias to make it shorter to type.
const full_out_path = try directory.join(arena, &[_][]const u8{base.options.emit.?.sub_path});
const full_out_path_z = try arena.dupeZ(u8, full_out_path);
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
// If there is no Zig code to compile, then we should skip flushing the output file
// because it will not be part of the linker line anyway.
const module_obj_path: ?[]const u8 = if (base.options.module) |module| blk: {
const use_stage1 = build_options.is_stage1 and base.options.use_stage1;
if (use_stage1) {
@ -648,20 +688,28 @@ pub const File = struct {
.target = base.options.target,
.output_mode = .Obj,
});
const o_directory = module.zig_cache_artifact_directory;
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
switch (base.options.cache_mode) {
.incremental => break :blk try module.zig_cache_artifact_directory.join(
arena,
&[_][]const u8{obj_basename},
),
.whole => break :blk try fs.path.join(arena, &.{
fs.path.dirname(full_out_path_z).?, obj_basename,
}),
}
}
if (base.options.object_format == .macho) {
try base.cast(MachO).?.flushObject(comp);
} else {
try base.flushModule(comp);
}
const obj_basename = base.intermediary_basename.?;
const full_obj_path = try directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
break :blk try fs.path.join(arena, &.{
fs.path.dirname(full_out_path_z).?, base.intermediary_basename.?,
});
} else null;
log.debug("module_obj_path={s}", .{if (module_obj_path) |s| s else "(null)"});
const compiler_rt_path: ?[]const u8 = if (base.options.include_compiler_rt)
comp.compiler_rt_obj.?.full_object_path
else
@ -734,9 +782,6 @@ pub const File = struct {
object_files.appendAssumeCapacity(try arena.dupeZ(u8, p));
}
const full_out_path = try directory.join(arena, &[_][]const u8{base.options.emit.?.sub_path});
const full_out_path_z = try arena.dupeZ(u8, full_out_path);
if (base.options.verbose_link) {
std.debug.print("ar rcs {s}", .{full_out_path_z});
for (object_files.items) |arg| {

View File

@ -880,6 +880,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
const arena = arena_allocator.allocator();
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
@ -891,15 +892,22 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
.target = self.base.options.target,
.output_mode = .Obj,
});
const o_directory = module.zig_cache_artifact_directory;
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
switch (self.base.options.cache_mode) {
.incremental => break :blk try module.zig_cache_artifact_directory.join(
arena,
&[_][]const u8{obj_basename},
),
.whole => break :blk try fs.path.join(arena, &.{
fs.path.dirname(full_out_path).?, obj_basename,
}),
}
}
try self.flushModule(comp);
const obj_basename = self.base.intermediary_basename.?;
const full_obj_path = try directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
break :blk try fs.path.join(arena, &.{
fs.path.dirname(full_out_path).?, self.base.intermediary_basename.?,
});
} else null;
const is_lib = self.base.options.output_mode == .Lib;
@ -920,6 +928,8 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
man = comp.cache_parent.obtain();
self.base.releaseLock();
comptime assert(Compilation.link_hash_implementation_version == 1);
try man.addListOfFiles(self.base.options.objects);
for (comp.c_object_table.keys()) |key| {
_ = try man.addFile(key.status.success.object_path, null);
@ -976,7 +986,6 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
};
}
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
if (self.base.options.output_mode == .Obj) {
// LLD's COFF driver does not support the equivalent of `-r` so we do a simple file copy
// here. TODO: think carefully about how we can avoid this redundant operation when doing

View File

@ -297,6 +297,7 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Elf {
else => return error.UnsupportedELFArchitecture,
};
const self = try gpa.create(Elf);
errdefer gpa.destroy(self);
self.* = .{
.base = .{
.tag = .elf,
@ -306,6 +307,9 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Elf {
},
.ptr_width = ptr_width,
};
// TODO get rid of the sub_path parameter to LlvmObject.create
// and create the llvm_object here. Also openPath needs to
// not override this field or there will be a memory leak.
return self;
}
@ -1298,6 +1302,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
const arena = arena_allocator.allocator();
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
@ -1309,15 +1314,22 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
.target = self.base.options.target,
.output_mode = .Obj,
});
const o_directory = module.zig_cache_artifact_directory;
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
switch (self.base.options.cache_mode) {
.incremental => break :blk try module.zig_cache_artifact_directory.join(
arena,
&[_][]const u8{obj_basename},
),
.whole => break :blk try fs.path.join(arena, &.{
fs.path.dirname(full_out_path).?, obj_basename,
}),
}
}
try self.flushModule(comp);
const obj_basename = self.base.intermediary_basename.?;
const full_obj_path = try directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
break :blk try fs.path.join(arena, &.{
fs.path.dirname(full_out_path).?, self.base.intermediary_basename.?,
});
} else null;
const is_obj = self.base.options.output_mode == .Obj;
@ -1357,6 +1369,8 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
// We are about to obtain this lock, so here we give other processes a chance first.
self.base.releaseLock();
comptime assert(Compilation.link_hash_implementation_version == 1);
try man.addOptionalFile(self.base.options.linker_script);
try man.addOptionalFile(self.base.options.version_script);
try man.addListOfFiles(self.base.options.objects);
@ -1432,8 +1446,6 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
};
}
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
// Due to a deficiency in LLD, we need to special-case BPF to a simple file copy when generating
// relocatables. Normally, we would expect `lld -r` to work. However, because LLD wants to resolve
// BPF relocations which it shouldn't, it fails before even generating the relocatable.

View File

@ -423,6 +423,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
const arena = arena_allocator.allocator();
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
@ -433,15 +434,24 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
.target = self.base.options.target,
.output_mode = .Obj,
});
const o_directory = module.zig_cache_artifact_directory;
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
switch (self.base.options.cache_mode) {
.incremental => break :blk try module.zig_cache_artifact_directory.join(
arena,
&[_][]const u8{obj_basename},
),
.whole => break :blk try fs.path.join(arena, &.{
fs.path.dirname(full_out_path).?, obj_basename,
}),
}
}
const obj_basename = self.base.intermediary_basename orelse break :blk null;
try self.flushObject(comp);
const full_obj_path = try directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
break :blk try fs.path.join(arena, &.{
fs.path.dirname(full_out_path).?, obj_basename,
});
} else null;
const is_lib = self.base.options.output_mode == .Lib;
@ -466,6 +476,8 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
// We are about to obtain this lock, so here we give other processes a chance first.
self.base.releaseLock();
comptime assert(Compilation.link_hash_implementation_version == 1);
try man.addListOfFiles(self.base.options.objects);
for (comp.c_object_table.keys()) |key| {
_ = try man.addFile(key.status.success.object_path, null);
@ -532,7 +544,6 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
else => |e| return e,
};
}
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
if (self.base.options.output_mode == .Obj) {
// LLD's MachO driver does not support the equivalent of `-r` so we do a simple file copy
@ -1267,7 +1278,7 @@ fn parseInputFiles(self: *MachO, files: []const []const u8, syslibroot: ?[]const
for (files) |file_name| {
const full_path = full_path: {
var buffer: [fs.MAX_PATH_BYTES]u8 = undefined;
const path = try std.fs.realpath(file_name, &buffer);
const path = try fs.realpath(file_name, &buffer);
break :full_path try self.base.allocator.dupe(u8, path);
};
defer self.base.allocator.free(full_path);

View File

@ -1050,6 +1050,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
const arena = arena_allocator.allocator();
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
@ -1061,15 +1062,22 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
.target = self.base.options.target,
.output_mode = .Obj,
});
const o_directory = module.zig_cache_artifact_directory;
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
switch (self.base.options.cache_mode) {
.incremental => break :blk try module.zig_cache_artifact_directory.join(
arena,
&[_][]const u8{obj_basename},
),
.whole => break :blk try fs.path.join(arena, &.{
fs.path.dirname(full_out_path).?, obj_basename,
}),
}
}
try self.flushModule(comp);
const obj_basename = self.base.intermediary_basename.?;
const full_obj_path = try directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
break :blk try fs.path.join(arena, &.{
fs.path.dirname(full_out_path).?, self.base.intermediary_basename.?,
});
} else null;
const is_obj = self.base.options.output_mode == .Obj;
@ -1094,6 +1102,8 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
// We are about to obtain this lock, so here we give other processes a chance first.
self.base.releaseLock();
comptime assert(Compilation.link_hash_implementation_version == 1);
try man.addListOfFiles(self.base.options.objects);
for (comp.c_object_table.keys()) |key| {
_ = try man.addFile(key.status.success.object_path, null);
@ -1141,8 +1151,6 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
};
}
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
if (self.base.options.output_mode == .Obj) {
// LLD's WASM driver does not support the equivalent of `-r` so we do a simple file copy
// here. TODO: think carefully about how we can avoid this redundant operation when doing

View File

@ -2564,9 +2564,7 @@ fn buildOutputType(
switch (emit_bin) {
.no => break :blk .none,
.yes_default_path => break :blk .{
.print = comp.bin_file.options.emit.?.directory.path orelse ".",
},
.yes_default_path => break :blk .print_emit_bin_dir_path,
.yes => |full_path| break :blk .{ .update = full_path },
.yes_a_out => break :blk .{ .update = a_out_basename },
}
@ -2578,10 +2576,6 @@ fn buildOutputType(
};
try comp.makeBinFileExecutable();
if (build_options.is_stage1 and comp.stage1_lock != null and watch) {
warn("--watch is not recommended with the stage1 backend; it leaks memory and is not capable of incremental compilation", .{});
}
if (test_exec_args.items.len == 0 and object_format == .c) default_exec_args: {
// Default to using `zig run` to execute the produced .c code from `zig test`.
const c_code_loc = emit_bin_loc orelse break :default_exec_args;
@ -2602,7 +2596,6 @@ fn buildOutputType(
comp,
gpa,
arena,
emit_bin_loc,
test_exec_args.items,
self_exe_path,
arg_mode,
@ -2675,7 +2668,6 @@ fn buildOutputType(
comp,
gpa,
arena,
emit_bin_loc,
test_exec_args.items,
self_exe_path,
arg_mode,
@ -2701,7 +2693,6 @@ fn buildOutputType(
comp,
gpa,
arena,
emit_bin_loc,
test_exec_args.items,
self_exe_path,
arg_mode,
@ -2766,7 +2757,6 @@ fn runOrTest(
comp: *Compilation,
gpa: Allocator,
arena: Allocator,
emit_bin_loc: ?Compilation.EmitLoc,
test_exec_args: []const ?[]const u8,
self_exe_path: []const u8,
arg_mode: ArgMode,
@ -2777,10 +2767,11 @@ fn runOrTest(
runtime_args_start: ?usize,
link_libc: bool,
) !void {
const exe_loc = emit_bin_loc orelse return;
const exe_directory = exe_loc.directory orelse comp.bin_file.options.emit.?.directory;
const exe_emit = comp.bin_file.options.emit orelse return;
// A naive `directory.join` here will indeed get the correct path to the binary,
// however, in the case of cwd, we actually want `./foo` so that the path can be executed.
const exe_path = try fs.path.join(arena, &[_][]const u8{
exe_directory.path orelse ".", exe_loc.basename,
exe_emit.directory.path orelse ".", exe_emit.sub_path,
});
var argv = std.ArrayList([]const u8).init(gpa);
@ -2884,7 +2875,7 @@ fn runOrTest(
const AfterUpdateHook = union(enum) {
none,
print: []const u8,
print_emit_bin_dir_path,
update: []const u8,
};
@ -2910,7 +2901,13 @@ fn updateModule(gpa: Allocator, comp: *Compilation, hook: AfterUpdateHook) !void
return error.SemanticAnalyzeFail;
} else switch (hook) {
.none => {},
.print => |bin_path| try io.getStdOut().writer().print("{s}\n", .{bin_path}),
.print_emit_bin_dir_path => {
const emit = comp.bin_file.options.emit.?;
const full_path = try emit.directory.join(gpa, &.{emit.sub_path});
defer gpa.free(full_path);
const dir_path = fs.path.dirname(full_path).?;
try io.getStdOut().writer().print("{s}\n", .{dir_path});
},
.update => |full_path| {
const bin_sub_path = comp.bin_file.options.emit.?.sub_path;
const cwd = fs.cwd();
@ -3473,9 +3470,10 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
};
try comp.makeBinFileExecutable();
child_argv.items[argv_index_exe] = try comp.bin_file.options.emit.?.directory.join(
const emit = comp.bin_file.options.emit.?;
child_argv.items[argv_index_exe] = try emit.directory.join(
arena,
&[_][]const u8{exe_basename},
&[_][]const u8{emit.sub_path},
);
break :argv child_argv.items;
@ -3666,9 +3664,7 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
.zir_loaded = false,
.sub_file_path = "<stdin>",
.source = source_code,
.stat_size = undefined,
.stat_inode = undefined,
.stat_mtime = undefined,
.stat = undefined,
.tree = tree,
.tree_loaded = true,
.zir = undefined,
@ -3862,9 +3858,11 @@ fn fmtPathFile(
.zir_loaded = false,
.sub_file_path = file_path,
.source = source_code,
.stat_size = stat.size,
.stat_inode = stat.inode,
.stat_mtime = stat.mtime,
.stat = .{
.size = stat.size,
.inode = stat.inode,
.mtime = stat.mtime,
},
.tree = tree,
.tree_loaded = true,
.zir = undefined,
@ -4460,9 +4458,7 @@ pub fn cmdAstCheck(
.zir_loaded = false,
.sub_file_path = undefined,
.source = undefined,
.stat_size = undefined,
.stat_inode = undefined,
.stat_mtime = undefined,
.stat = undefined,
.tree = undefined,
.zir = undefined,
.pkg = undefined,
@ -4487,9 +4483,11 @@ pub fn cmdAstCheck(
file.sub_file_path = file_name;
file.source = source;
file.source_loaded = true;
file.stat_size = stat.size;
file.stat_inode = stat.inode;
file.stat_mtime = stat.mtime;
file.stat = .{
.size = stat.size,
.inode = stat.inode,
.mtime = stat.mtime,
};
} else {
const stdin = io.getStdIn();
const source = readSourceFileToEndAlloc(arena, &stdin, null) catch |err| {
@ -4498,7 +4496,7 @@ pub fn cmdAstCheck(
file.sub_file_path = "<stdin>";
file.source = source;
file.source_loaded = true;
file.stat_size = source.len;
file.stat.size = source.len;
}
file.pkg = try Package.create(gpa, null, file.sub_file_path);
@ -4611,9 +4609,11 @@ pub fn cmdChangelist(
.zir_loaded = false,
.sub_file_path = old_source_file,
.source = undefined,
.stat_size = stat.size,
.stat_inode = stat.inode,
.stat_mtime = stat.mtime,
.stat = .{
.size = stat.size,
.inode = stat.inode,
.mtime = stat.mtime,
},
.tree = undefined,
.zir = undefined,
.pkg = undefined,

View File

@ -203,6 +203,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
const sub_compilation = try Compilation.create(comp.gpa, .{
.local_cache_directory = comp.global_cache_directory,
.global_cache_directory = comp.global_cache_directory,
.cache_mode = .whole,
.zig_lib_directory = comp.zig_lib_directory,
.target = target,
.root_name = "c",

View File

@ -458,7 +458,10 @@ export fn stage2_fetch_file(
const comp = @intToPtr(*Compilation, stage1.userdata);
const file_path = path_ptr[0..path_len];
const max_file_size = std.math.maxInt(u32);
const contents = comp.stage1_cache_manifest.addFilePostFetch(file_path, max_file_size) catch return null;
const contents = if (comp.whole_cache_manifest) |man|
man.addFilePostFetch(file_path, max_file_size) catch return null
else
std.fs.cwd().readFileAlloc(comp.gpa, file_path, max_file_size) catch return null;
result_len.* = contents.len;
// TODO https://github.com/ziglang/zig/issues/3328#issuecomment-716749475
if (contents.len == 0) return @intToPtr(?[*]const u8, 0x1);

View File

@ -3106,9 +3106,9 @@ pub const Type = extern union {
.c_ulonglong => return .{ .signedness = .unsigned, .bits = CType.ulonglong.sizeInBits(target) },
.enum_full, .enum_nonexhaustive => ty = ty.cast(Payload.EnumFull).?.data.tag_ty,
.enum_numbered => ty = self.castTag(.enum_numbered).?.data.tag_ty,
.enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty,
.enum_simple => {
const enum_obj = self.castTag(.enum_simple).?.data;
const enum_obj = ty.castTag(.enum_simple).?.data;
const field_count = enum_obj.fields.count();
if (field_count == 0) return .{ .signedness = .unsigned, .bits = 0 };
return .{ .signedness = .unsigned, .bits = smallestUnsignedBits(field_count - 1) };
@ -4603,7 +4603,18 @@ pub const CType = enum {
.longlong,
.ulonglong,
=> return 64,
.longdouble => @panic("TODO figure out what kind of float `long double` is on this target"),
.longdouble => switch (target.cpu.arch) {
.riscv64,
.aarch64,
.aarch64_be,
.aarch64_32,
.s390x,
.mips64,
.mips64el,
=> return 128,
else => return 80,
},
},
.windows, .uefi => switch (self) {

View File

@ -295,3 +295,12 @@ test "cast from ?[*]T to ??[*]T" {
const a: ??[*]u8 = @as(?[*]u8, null);
try expect(a != null and a.? == null);
}
test "peer type unsigned int to signed" {
var w: u31 = 5;
var x: u8 = 7;
var y: i32 = -5;
var a = w + y + x;
comptime try expect(@TypeOf(a) == i32);
try expect(a == 7);
}

View File

@ -383,15 +383,6 @@ test "peer type resolve string lit with sentinel-terminated mutable slice" {
comptime try expect(@TypeOf("hi", slice) == [:0]const u8);
}
test "peer type unsigned int to signed" {
var w: u31 = 5;
var x: u8 = 7;
var y: i32 = -5;
var a = w + y + x;
comptime try expect(@TypeOf(a) == i32);
try expect(a == 7);
}
test "peer type resolve array pointers, one of them const" {
var array1: [4]u8 = undefined;
const array2: [5]u8 = undefined;