migrate from std.Target.current to @import("builtin").target

closes #9388
closes #9321
This commit is contained in:
Andrew Kelley 2021-10-04 23:47:27 -07:00
parent 78902db68b
commit 6115cf2240
147 changed files with 624 additions and 596 deletions

View File

@ -1334,7 +1334,7 @@ fn genHtml(
if (mem.startsWith(u8, triple, "wasm32") or
mem.startsWith(u8, triple, "riscv64-linux") or
(mem.startsWith(u8, triple, "x86_64-linux") and
std.Target.current.os.tag != .linux or std.Target.current.cpu.arch != .x86_64))
builtin.os.tag != .linux or builtin.cpu.arch != .x86_64))
{
// skip execution
break :code_block;
@ -1602,7 +1602,7 @@ fn genHtml(
Code.Id.Lib => {
const bin_basename = try std.zig.binNameAlloc(allocator, .{
.root_name = code.name,
.target = std.Target.current,
.target = builtin.target,
.output_mode = .Lib,
});

View File

@ -2681,6 +2681,7 @@ test "pointer child type" {
</p>
{#code_begin|test|variable_alignment#}
const std = @import("std");
const builtin = @import("builtin");
const expect = std.testing.expect;
test "variable alignment" {
@ -2688,7 +2689,7 @@ test "variable alignment" {
const align_of_i32 = @alignOf(@TypeOf(x));
try expect(@TypeOf(&x) == *i32);
try expect(*i32 == *align(align_of_i32) i32);
if (std.Target.current.cpu.arch == .x86_64) {
if (builtin.target.cpu.arch == .x86_64) {
try expect(@typeInfo(*i32).Pointer.alignment == 4);
}
}
@ -3878,6 +3879,7 @@ test "separate scopes" {
{#header_open|switch#}
{#code_begin|test|switch#}
const std = @import("std");
const builtin = @import("builtin");
const expect = std.testing.expect;
test "switch simple" {
@ -3922,7 +3924,7 @@ test "switch simple" {
}
// Switch expressions can be used outside a function:
const os_msg = switch (std.Target.current.os.tag) {
const os_msg = switch (builtin.target.os.tag) {
.linux => "we found a linux user",
else => "not a linux user",
};
@ -3930,7 +3932,7 @@ const os_msg = switch (std.Target.current.os.tag) {
// Inside a function, switch statements implicitly are compile-time
// evaluated if the target expression is compile-time known.
test "switch inside function" {
switch (std.Target.current.os.tag) {
switch (builtin.target.os.tag) {
.fuchsia => {
// On an OS other than fuchsia, block is not even analyzed,
// so this compile error is not triggered.
@ -5690,6 +5692,7 @@ test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
</p>
{#code_begin|test|test_integer_widening#}
const std = @import("std");
const builtin = @import("builtin");
const expect = std.testing.expect;
const mem = std.mem;
@ -5712,7 +5715,7 @@ test "implicit unsigned integer to signed integer" {
test "float widening" {
// Note: there is an open issue preventing this from working on aarch64:
// https://github.com/ziglang/zig/issues/3282
if (std.Target.current.cpu.arch == .aarch64) return error.SkipZigTest;
if (builtin.target.cpu.arch == .aarch64) return error.SkipZigTest;
var a: f16 = 12.34;
var b: f32 = a;

View File

@ -7,6 +7,7 @@
//! * `initial_delay_ms`
const std = @import("std");
const builtin = @import("builtin");
const windows = std.os.windows;
const testing = std.testing;
const assert = std.debug.assert;
@ -144,10 +145,10 @@ pub fn start(self: *Progress, name: []const u8, estimated_total_items: usize) !*
if (stderr.supportsAnsiEscapeCodes()) {
self.terminal = stderr;
self.supports_ansi_escape_codes = true;
} else if (std.builtin.os.tag == .windows and stderr.isTty()) {
} else if (builtin.os.tag == .windows and stderr.isTty()) {
self.is_windows_terminal = true;
self.terminal = stderr;
} else if (std.builtin.os.tag != .windows) {
} else if (builtin.os.tag != .windows) {
// we are in a "dumb" terminal like in acme or writing to a file
self.terminal = stderr;
}
@ -200,7 +201,7 @@ fn refreshWithHeldLock(self: *Progress) void {
if (self.supports_ansi_escape_codes) {
end += (std.fmt.bufPrint(self.output_buffer[end..], "\x1b[{d}D", .{self.columns_written}) catch unreachable).len;
end += (std.fmt.bufPrint(self.output_buffer[end..], "\x1b[0K", .{}) catch unreachable).len;
} else if (std.builtin.os.tag == .windows) winapi: {
} else if (builtin.os.tag == .windows) winapi: {
std.debug.assert(self.is_windows_terminal);
var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined;

View File

@ -26,7 +26,7 @@
state: usize = UNSET,
const std = @import("../std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const testing = std.testing;
const assert = std.debug.assert;
const StaticResetEvent = std.Thread.StaticResetEvent;

View File

@ -5,6 +5,7 @@
impl: Impl = .{},
const std = @import("../std.zig");
const builtin = @import("builtin");
const Condition = @This();
const windows = std.os.windows;
const linux = std.os.linux;
@ -23,9 +24,9 @@ pub fn broadcast(cond: *Condition) void {
cond.impl.broadcast();
}
const Impl = if (std.builtin.single_threaded)
const Impl = if (builtin.single_threaded)
SingleThreadedCondition
else if (std.Target.current.os.tag == .windows)
else if (builtin.os.tag == .windows)
WindowsCondition
else if (std.Thread.use_pthreads)
PthreadCondition
@ -101,7 +102,7 @@ pub const AtomicCondition = struct {
fn wait(cond: *@This()) void {
while (@atomicLoad(i32, &cond.futex, .Acquire) == 0) {
switch (std.Target.current.os.tag) {
switch (builtin.os.tag) {
.linux => {
switch (linux.getErrno(linux.futex_wait(
&cond.futex,
@ -123,7 +124,7 @@ pub const AtomicCondition = struct {
fn notify(cond: *@This()) void {
@atomicStore(i32, &cond.futex, 1, .Release);
switch (std.Target.current.os.tag) {
switch (builtin.os.tag) {
.linux => {
switch (linux.getErrno(linux.futex_wake(
&cond.futex,

View File

@ -4,10 +4,11 @@
//! Using Futex, other Thread synchronization primitives can be built which efficiently wait for cross-thread events or signals.
const std = @import("../std.zig");
const builtin = @import("builtin");
const Futex = @This();
const target = std.Target.current;
const single_threaded = std.builtin.single_threaded;
const target = builtin.target;
const single_threaded = builtin.single_threaded;
const assert = std.debug.assert;
const testing = std.testing;
@ -70,7 +71,7 @@ else if (target.os.tag == .linux)
LinuxFutex
else if (target.isDarwin())
DarwinFutex
else if (std.builtin.link_libc)
else if (builtin.link_libc)
PosixFutex
else
UnsupportedFutex;

View File

@ -24,7 +24,7 @@ impl: Impl = .{},
const Mutex = @This();
const std = @import("../std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const os = std.os;
const assert = std.debug.assert;
const windows = os.windows;
@ -160,7 +160,7 @@ pub const AtomicMutex = struct {
.unlocked => return,
else => {},
}
switch (std.Target.current.os.tag) {
switch (builtin.os.tag) {
.linux => {
switch (linux.getErrno(linux.futex_wait(
@ptrCast(*const i32, &m.state),
@ -182,7 +182,7 @@ pub const AtomicMutex = struct {
fn unlockSlow(m: *AtomicMutex) void {
@setCold(true);
switch (std.Target.current.os.tag) {
switch (builtin.os.tag) {
.linux => {
switch (linux.getErrno(linux.futex_wake(
@ptrCast(*const i32, &m.state),

View File

@ -8,7 +8,7 @@
const ResetEvent = @This();
const std = @import("../std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const testing = std.testing;
const assert = std.debug.assert;
const c = std.c;
@ -19,7 +19,7 @@ impl: Impl,
pub const Impl = if (builtin.single_threaded)
std.Thread.StaticResetEvent.DebugEvent
else if (std.Target.current.isDarwin())
else if (builtin.target.isDarwin())
DarwinEvent
else if (std.Thread.use_pthreads)
PosixEvent

View File

@ -8,6 +8,7 @@
//! the logic needs stronger API guarantees.
const std = @import("../std.zig");
const builtin = @import("builtin");
const StaticResetEvent = @This();
const assert = std.debug.assert;
const os = std.os;
@ -18,7 +19,7 @@ const testing = std.testing;
impl: Impl = .{},
pub const Impl = if (std.builtin.single_threaded)
pub const Impl = if (builtin.single_threaded)
DebugEvent
else
AtomicEvent;
@ -162,7 +163,7 @@ pub const AtomicEvent = struct {
@atomicStore(u32, &ev.waiters, 0, .Monotonic);
}
pub const Futex = switch (std.Target.current.os.tag) {
pub const Futex = switch (builtin.os.tag) {
.windows => WindowsFutex,
.linux => LinuxFutex,
else => SpinFutex,
@ -322,7 +323,7 @@ test "basic usage" {
try testing.expectEqual(TimedWaitResult.event_set, event.timedWait(1));
// test cross-thread signaling
if (std.builtin.single_threaded)
if (builtin.single_threaded)
return;
const Context = struct {

View File

@ -9,7 +9,6 @@ const trait = meta.trait;
const autoHash = std.hash.autoHash;
const Wyhash = std.hash.Wyhash;
const Allocator = mem.Allocator;
const builtin = std.builtin;
const hash_map = @This();
/// An ArrayHashMap with default hash and equal functions.

View File

@ -1,5 +1,5 @@
const std = @import("std.zig");
const target = std.Target.current;
const target = @import("builtin").target;
pub const Ordering = std.builtin.AtomicOrder;

View File

@ -1,7 +1,7 @@
const std = @import("../std.zig");
const testing = std.testing;
const target = std.Target.current;
const target = @import("builtin").target;
const Ordering = std.atomic.Ordering;
pub fn Atomic(comptime T: type) type {

View File

@ -1,5 +1,5 @@
const std = @import("../std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const assert = std.debug.assert;
const expect = std.testing.expect;

View File

@ -1,5 +1,6 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
const assert = std.debug.assert;
const builtin = std.builtin;
const expect = std.testing.expect;
/// Many reader, many writer, non-allocating, thread-safe
@ -67,7 +68,6 @@ pub fn Stack(comptime T: type) type {
};
}
const std = @import("../std.zig");
const Context = struct {
allocator: *std.mem.Allocator,
stack: *Stack(i32),

View File

@ -1,5 +1,5 @@
const std = @import("std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const io = std.io;
const fs = std.fs;
const mem = std.mem;
@ -62,7 +62,7 @@ pub const Builder = struct {
build_root: []const u8,
cache_root: []const u8,
global_cache_root: []const u8,
release_mode: ?builtin.Mode,
release_mode: ?std.builtin.Mode,
is_release: bool,
override_lib_dir: ?[]const u8,
vcpkg_root: VcpkgRoot,
@ -633,18 +633,18 @@ pub const Builder = struct {
}
/// This provides the -Drelease option to the build user and does not give them the choice.
pub fn setPreferredReleaseMode(self: *Builder, mode: builtin.Mode) void {
pub fn setPreferredReleaseMode(self: *Builder, mode: std.builtin.Mode) void {
if (self.release_mode != null) {
@panic("setPreferredReleaseMode must be called before standardReleaseOptions and may not be called twice");
}
const description = self.fmt("Create a release build ({s})", .{@tagName(mode)});
self.is_release = self.option(bool, "release", description) orelse false;
self.release_mode = if (self.is_release) mode else builtin.Mode.Debug;
self.release_mode = if (self.is_release) mode else std.builtin.Mode.Debug;
}
/// If you call this without first calling `setPreferredReleaseMode` then it gives the build user
/// the choice of what kind of release.
pub fn standardReleaseOptions(self: *Builder) builtin.Mode {
pub fn standardReleaseOptions(self: *Builder) std.builtin.Mode {
if (self.release_mode) |mode| return mode;
const release_safe = self.option(bool, "release-safe", "Optimizations on and safety on") orelse false;
@ -652,17 +652,17 @@ pub const Builder = struct {
const release_small = self.option(bool, "release-small", "Size optimizations on and safety off") orelse false;
const mode = if (release_safe and !release_fast and !release_small)
builtin.Mode.ReleaseSafe
std.builtin.Mode.ReleaseSafe
else if (release_fast and !release_safe and !release_small)
builtin.Mode.ReleaseFast
std.builtin.Mode.ReleaseFast
else if (release_small and !release_fast and !release_safe)
builtin.Mode.ReleaseSmall
std.builtin.Mode.ReleaseSmall
else if (!release_fast and !release_safe and !release_small)
builtin.Mode.Debug
std.builtin.Mode.Debug
else x: {
warn("Multiple release modes (of -Drelease-safe, -Drelease-fast and -Drelease-small)\n\n", .{});
self.markInvalidUserInput();
break :x builtin.Mode.Debug;
break :x std.builtin.Mode.Debug;
};
self.is_release = mode != .Debug;
self.release_mode = mode;
@ -1290,7 +1290,7 @@ test "builder.findProgram compiles" {
}
/// Deprecated. Use `std.builtin.Version`.
pub const Version = builtin.Version;
pub const Version = std.builtin.Version;
/// Deprecated. Use `std.zig.CrossTarget`.
pub const Target = std.zig.CrossTarget;
@ -1417,8 +1417,8 @@ pub const LibExeObjStep = struct {
version_script: ?[]const u8 = null,
out_filename: []const u8,
linkage: ?Linkage = null,
version: ?Version,
build_mode: builtin.Mode,
version: ?std.builtin.Version,
build_mode: std.builtin.Mode,
kind: Kind,
major_only_filename: ?[]const u8,
name_only_filename: ?[]const u8,
@ -1447,8 +1447,8 @@ pub const LibExeObjStep = struct {
filter: ?[]const u8,
single_threaded: bool,
test_evented_io: bool = false,
code_model: builtin.CodeModel = .default,
wasi_exec_model: ?builtin.WasiExecModel = null,
code_model: std.builtin.CodeModel = .default,
wasi_exec_model: ?std.builtin.WasiExecModel = null,
root_src: ?FileSource,
out_h_filename: []const u8,
@ -1550,7 +1550,7 @@ pub const LibExeObjStep = struct {
};
const SharedLibKind = union(enum) {
versioned: Version,
versioned: std.builtin.Version,
unversioned: void,
};
@ -1585,7 +1585,7 @@ pub const LibExeObjStep = struct {
root_src_raw: ?FileSource,
kind: Kind,
linkage: ?Linkage,
ver: ?Version,
ver: ?std.builtin.Version,
) *LibExeObjStep {
const name = builder.dupe(name_raw);
const root_src: ?FileSource = if (root_src_raw) |rsrc| rsrc.dupe(builder) else null;
@ -1599,7 +1599,7 @@ pub const LibExeObjStep = struct {
.builder = builder,
.verbose_link = false,
.verbose_cc = false,
.build_mode = builtin.Mode.Debug,
.build_mode = std.builtin.Mode.Debug,
.linkage = linkage,
.kind = kind,
.root_src = root_src,
@ -1988,7 +1988,7 @@ pub const LibExeObjStep = struct {
self.verbose_cc = value;
}
pub fn setBuildMode(self: *LibExeObjStep, mode: builtin.Mode) void {
pub fn setBuildMode(self: *LibExeObjStep, mode: std.builtin.Mode) void {
self.build_mode = mode;
}
@ -2553,7 +2553,7 @@ pub const LibExeObjStep = struct {
const resolved_include_path = self.builder.pathFromRoot(include_path);
const common_include_path = if (std.Target.current.os.tag == .windows and builder.sysroot != null and fs.path.isAbsolute(resolved_include_path)) blk: {
const common_include_path = if (builtin.os.tag == .windows and builder.sysroot != null and fs.path.isAbsolute(resolved_include_path)) blk: {
// We need to check for disk designator and strip it out from dir path so
// that zig/clang can concat resolved_include_path with sysroot.
const disk_designator = fs.path.diskDesignatorWindows(resolved_include_path);
@ -3237,7 +3237,7 @@ test "LibExeObjStep.addPackage" {
test {
// The only purpose of this test is to get all these untested functions
// to be referenced to avoid regression so it is okay to skip some targets.
if (comptime std.Target.current.cpu.arch.ptrBitWidth() == 64) {
if (comptime builtin.cpu.arch.ptrBitWidth() == 64) {
std.testing.refAllDecls(@This());
std.testing.refAllDecls(Builder);

View File

@ -1,4 +1,5 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
const build = std.build;
const fs = std.fs;
const Step = build.Step;
@ -219,7 +220,7 @@ const OptionFileSourceArg = struct {
};
test "OptionsStep" {
if (std.builtin.os.tag == .wasi) return error.SkipZigTest;
if (builtin.os.tag == .wasi) return error.SkipZigTest;
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();

View File

@ -1,5 +1,5 @@
const std = @import("../std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const build = std.build;
const Step = build.Step;
const Builder = build.Builder;

View File

@ -1,26 +1,27 @@
const builtin = @import("builtin");
// These are all deprecated.
pub const zig_version = builtin.zig_version;
pub const zig_is_stage2 = builtin.zig_is_stage2;
pub const output_mode = builtin.output_mode;
pub const link_mode = builtin.link_mode;
pub const is_test = builtin.is_test;
pub const single_threaded = builtin.single_threaded;
pub const abi = builtin.abi;
pub const cpu = builtin.cpu;
pub const os = builtin.os;
pub const target = builtin.target;
pub const object_format = builtin.object_format;
pub const mode = builtin.mode;
pub const link_libc = builtin.link_libc;
pub const link_libcpp = builtin.link_libcpp;
pub const have_error_return_tracing = builtin.have_error_return_tracing;
pub const valgrind_support = builtin.valgrind_support;
pub const position_independent_code = builtin.position_independent_code;
pub const position_independent_executable = builtin.position_independent_executable;
pub const strip_debug_info = builtin.strip_debug_info;
pub const code_model = builtin.code_model;
// TODO delete these after releasing 0.9.0
pub const zig_version = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const zig_is_stage2 = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const output_mode = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const link_mode = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const is_test = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const single_threaded = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const abi = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const cpu = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const os = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const target = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const object_format = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const mode = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const link_libc = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const link_libcpp = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const have_error_return_tracing = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const valgrind_support = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const position_independent_code = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const position_independent_executable = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const strip_debug_info = @compileError("get this from @import(\"builtin\") instead of std.builtin");
pub const code_model = @compileError("get this from @import(\"builtin\") instead of std.builtin");
/// `explicit_subsystem` is missing when the subsystem is automatically detected,
/// so Zig standard library has the subsystem detection logic here. This should generally be
@ -694,7 +695,7 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace) noreturn
@breakpoint();
}
}
switch (os.tag) {
switch (builtin.os.tag) {
.freestanding => {
while (true) {
@breakpoint();

View File

@ -1,5 +1,5 @@
const std = @import("std");
const builtin = std.builtin;
const builtin = @import("builtin");
const c = @This();
const page_size = std.mem.page_size;
const iovec = std.os.iovec;
@ -20,7 +20,7 @@ pub const Tokenizer = tokenizer.Tokenizer;
/// If linking gnu libc (glibc), the `ok` value will be true if the target
/// version is greater than or equal to `glibc_version`.
/// If linking a libc other than these, returns `false`.
pub fn versionCheck(glibc_version: builtin.Version) type {
pub fn versionCheck(glibc_version: std.builtin.Version) type {
return struct {
pub const ok = blk: {
if (!builtin.link_libc) break :blk false;

View File

@ -1,6 +1,6 @@
const std = @import("../std.zig");
const assert = std.debug.assert;
const builtin = @import("builtin");
const assert = std.debug.assert;
const macho = std.macho;
const native_arch = builtin.target.cpu.arch;
const maxInt = std.math.maxInt;
@ -72,7 +72,7 @@ const mach_hdr = if (@sizeOf(usize) == 8) mach_header_64 else mach_header;
var dummy_execute_header: mach_hdr = undefined;
pub extern var _mh_execute_header: mach_hdr;
comptime {
if (std.Target.current.isDarwin()) {
if (builtin.target.isDarwin()) {
@export(dummy_execute_header, .{ .name = "_mh_execute_header", .linkage = .Weak });
}
}

View File

@ -1,4 +1,5 @@
const std = @import("std.zig");
const builtin = @import("builtin");
const cstr = std.cstr;
const unicode = std.unicode;
const io = std.io;
@ -12,8 +13,7 @@ const mem = std.mem;
const math = std.math;
const debug = std.debug;
const BufMap = std.BufMap;
const builtin = std.builtin;
const Os = builtin.Os;
const Os = std.builtin.Os;
const TailQueue = std.TailQueue;
const maxInt = std.math.maxInt;
const assert = std.debug.assert;
@ -561,9 +561,9 @@ pub const ChildProcess = struct {
if (self.env_map) |env_map| {
const envp_buf = try createNullDelimitedEnvMap(arena, env_map);
break :m envp_buf.ptr;
} else if (std.builtin.link_libc) {
} else if (builtin.link_libc) {
break :m std.c.environ;
} else if (std.builtin.output_mode == .Exe) {
} else if (builtin.output_mode == .Exe) {
// Then we have Zig start code and this works.
// TODO type-safety for null-termination of `os.environ`.
break :m @ptrCast([*:null]?[*:0]u8, os.environ.ptr);

View File

@ -1,4 +1,3 @@
const builtin = std.builtin;
const std = @import("std.zig");
const io = std.io;
const mem = std.mem;

View File

@ -163,7 +163,7 @@ const std = @import("std.zig");
pub const errors = @import("crypto/errors.zig");
test "crypto" {
const please_windows_dont_oom = std.Target.current.os.tag == .windows;
const please_windows_dont_oom = @import("builtin").os.tag == .windows;
if (please_windows_dont_oom) return error.SkipZigTest;
inline for (std.meta.declarations(@This())) |decl| {

View File

@ -1,13 +1,13 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
const testing = std.testing;
const builtin = std.builtin;
const has_aesni = std.Target.x86.featureSetHas(std.Target.current.cpu.features, .aes);
const has_avx = std.Target.x86.featureSetHas(std.Target.current.cpu.features, .avx);
const has_armaes = std.Target.aarch64.featureSetHas(std.Target.current.cpu.features, .aes);
const impl = if (std.Target.current.cpu.arch == .x86_64 and has_aesni and has_avx) impl: {
const has_aesni = std.Target.x86.featureSetHas(builtin.cpu.features, .aes);
const has_avx = std.Target.x86.featureSetHas(builtin.cpu.features, .avx);
const has_armaes = std.Target.aarch64.featureSetHas(builtin.cpu.features, .aes);
const impl = if (builtin.cpu.arch == .x86_64 and has_aesni and has_avx) impl: {
break :impl @import("aes/aesni.zig");
} else if (std.Target.current.cpu.arch == .aarch64 and has_armaes)
} else if (builtin.cpu.arch == .aarch64 and has_armaes)
impl: {
break :impl @import("aes/armcrypto.zig");
} else impl: {
@ -41,7 +41,7 @@ test "ctr" {
var out: [exp_out.len]u8 = undefined;
var ctx = Aes128.initEnc(key);
ctr(AesEncryptCtx(Aes128), ctx, out[0..], in[0..], iv, builtin.Endian.Big);
ctr(AesEncryptCtx(Aes128), ctx, out[0..], in[0..], iv, std.builtin.Endian.Big);
try testing.expectEqualSlices(u8, exp_out[0..], out[0..]);
}

View File

@ -1,4 +1,5 @@
const std = @import("../../std.zig");
const builtin = @import("builtin");
const mem = std.mem;
const debug = std.debug;
const Vector = std.meta.Vector;
@ -97,7 +98,7 @@ pub const Block = struct {
const cpu = std.Target.x86.cpu;
/// The recommended number of AES encryption/decryption to perform in parallel for the chosen implementation.
pub const optimal_parallel_blocks = switch (std.Target.current.cpu.model) {
pub const optimal_parallel_blocks = switch (builtin.cpu.model) {
&cpu.westmere => 6,
&cpu.sandybridge, &cpu.ivybridge => 8,
&cpu.haswell, &cpu.broadwell => 7,

View File

@ -1,6 +1,5 @@
const std = @import("std");
const assert = std.debug.assert;
const builtin = std.builtin;
const crypto = std.crypto;
const debug = std.debug;
const Ghash = std.crypto.onetimeauth.Ghash;
@ -40,7 +39,7 @@ fn AesGcm(comptime Aes: anytype) type {
mac.pad();
mem.writeIntBig(u32, j[nonce_length..][0..4], 2);
modes.ctr(@TypeOf(aes), aes, c, m, j, builtin.Endian.Big);
modes.ctr(@TypeOf(aes), aes, c, m, j, std.builtin.Endian.Big);
mac.update(c[0..m.len][0..]);
mac.pad();
@ -94,7 +93,7 @@ fn AesGcm(comptime Aes: anytype) type {
}
mem.writeIntBig(u32, j[nonce_length..][0..4], 2);
modes.ctr(@TypeOf(aes), aes, m, c, j, builtin.Endian.Big);
modes.ctr(@TypeOf(aes), aes, m, c, j, std.builtin.Endian.Big);
}
};
}

View File

@ -1,4 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
const crypto = std.crypto;
const aes = crypto.core.aes;
const assert = std.debug.assert;
@ -100,9 +101,9 @@ fn AesOcb(comptime Aes: anytype) type {
return offset;
}
const has_aesni = std.Target.x86.featureSetHas(std.Target.current.cpu.features, .aes);
const has_armaes = std.Target.aarch64.featureSetHas(std.Target.current.cpu.features, .aes);
const wb: usize = if ((std.Target.current.cpu.arch == .x86_64 and has_aesni) or (std.Target.current.cpu.arch == .aarch64 and has_armaes)) 4 else 0;
const has_aesni = std.Target.x86.featureSetHas(builtin.cpu.features, .aes);
const has_armaes = std.Target.aarch64.featureSetHas(builtin.cpu.features, .aes);
const wb: usize = if ((builtin.cpu.arch == .x86_64 and has_aesni) or (builtin.cpu.arch == .aarch64 and has_armaes)) 4 else 0;
/// c: ciphertext: output buffer should be of size m.len
/// tag: authentication tag: output MAC

View File

@ -1,7 +1,7 @@
// zig run -O ReleaseFast --zig-lib-dir ../.. benchmark.zig
const std = @import("../std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const mem = std.mem;
const time = std.time;
const Timer = time.Timer;

View File

@ -2,6 +2,7 @@
// Source: https://github.com/BLAKE3-team/BLAKE3
const std = @import("../std.zig");
const builtin = @import("builtin");
const fmt = std.fmt;
const math = std.math;
const mem = std.mem;
@ -200,7 +201,7 @@ const CompressGeneric = struct {
}
};
const compress = if (std.Target.current.cpu.arch == .x86_64) CompressVectorized.compress else CompressGeneric.compress;
const compress = if (builtin.cpu.arch == .x86_64) CompressVectorized.compress else CompressGeneric.compress;
fn first8Words(words: [16]u32) [8]u32 {
return @ptrCast(*const [8]u32, &words).*;

View File

@ -1,6 +1,7 @@
// Based on public domain Supercop by Daniel J. Bernstein
const std = @import("../std.zig");
const builtin = @import("builtin");
const math = std.math;
const mem = std.mem;
const assert = std.debug.assert;
@ -359,7 +360,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
}
fn ChaChaImpl(comptime rounds_nb: usize) type {
return if (std.Target.current.cpu.arch == .x86_64) ChaChaVecImpl(rounds_nb) else ChaChaNonVecImpl(rounds_nb);
return if (builtin.cpu.arch == .x86_64) ChaChaVecImpl(rounds_nb) else ChaChaNonVecImpl(rounds_nb);
}
fn keyToWords(key: [32]u8) [8]u32 {

View File

@ -2,6 +2,7 @@
// Adapted from BearSSL's ctmul64 implementation originally written by Thomas Pornin <pornin@bolet.org>
const std = @import("../std.zig");
const builtin = @import("builtin");
const assert = std.debug.assert;
const math = std.math;
const mem = std.mem;
@ -45,7 +46,7 @@ pub const Ghash = struct {
const h2 = h0 ^ h1;
const h2r = h0r ^ h1r;
if (std.builtin.mode == .ReleaseSmall) {
if (builtin.mode == .ReleaseSmall) {
return Ghash{
.h0 = h0,
.h1 = h1,
@ -132,12 +133,12 @@ pub const Ghash = struct {
return z0 | z1 | z2 | z3;
}
const has_pclmul = std.Target.x86.featureSetHas(std.Target.current.cpu.features, .pclmul);
const has_avx = std.Target.x86.featureSetHas(std.Target.current.cpu.features, .avx);
const has_armaes = std.Target.aarch64.featureSetHas(std.Target.current.cpu.features, .aes);
const clmul = if (std.Target.current.cpu.arch == .x86_64 and has_pclmul and has_avx) impl: {
const has_pclmul = std.Target.x86.featureSetHas(builtin.cpu.features, .pclmul);
const has_avx = std.Target.x86.featureSetHas(builtin.cpu.features, .avx);
const has_armaes = std.Target.aarch64.featureSetHas(builtin.cpu.features, .aes);
const clmul = if (builtin.cpu.arch == .x86_64 and has_pclmul and has_avx) impl: {
break :impl clmul_pclmul;
} else if (std.Target.current.cpu.arch == .aarch64 and has_armaes) impl: {
} else if (builtin.cpu.arch == .aarch64 and has_armaes) impl: {
break :impl clmul_pmull;
} else impl: {
break :impl clmul_soft;
@ -151,7 +152,7 @@ pub const Ghash = struct {
var i: usize = 0;
// 2-blocks aggregated reduction
if (std.builtin.mode != .ReleaseSmall) {
if (builtin.mode != .ReleaseSmall) {
while (i + 32 <= msg.len) : (i += 32) {
// B0 * H^2 unreduced
y1 ^= mem.readIntBig(u64, msg[i..][0..8]);

View File

@ -1,13 +1,14 @@
// Gimli is a 384-bit permutation designed to achieve high security with high
// performance across a broad range of platforms, including 64-bit Intel/AMD
// server CPUs, 64-bit and 32-bit ARM smartphone CPUs, 32-bit ARM
// microcontrollers, 8-bit AVR microcontrollers, FPGAs, ASICs without
// side-channel protection, and ASICs with side-channel protection.
//
// https://gimli.cr.yp.to/
// https://csrc.nist.gov/CSRC/media/Projects/Lightweight-Cryptography/documents/round-1/spec-doc/gimli-spec.pdf
//! Gimli is a 384-bit permutation designed to achieve high security with high
//! performance across a broad range of platforms, including 64-bit Intel/AMD
//! server CPUs, 64-bit and 32-bit ARM smartphone CPUs, 32-bit ARM
//! microcontrollers, 8-bit AVR microcontrollers, FPGAs, ASICs without
//! side-channel protection, and ASICs with side-channel protection.
//!
//! https://gimli.cr.yp.to/
//! https://csrc.nist.gov/CSRC/media/Projects/Lightweight-Cryptography/documents/round-1/spec-doc/gimli-spec.pdf
const std = @import("../std.zig");
const builtin = @import("builtin");
const mem = std.mem;
const math = std.math;
const debug = std.debug;
@ -152,9 +153,9 @@ pub const State = struct {
self.endianSwap();
}
pub const permute = if (std.Target.current.cpu.arch == .x86_64) impl: {
pub const permute = if (builtin.cpu.arch == .x86_64) impl: {
break :impl permute_vectorized;
} else if (std.builtin.mode == .ReleaseSmall) impl: {
} else if (builtin.mode == .ReleaseSmall) impl: {
break :impl permute_small;
} else impl: {
break :impl permute_unrolled;

View File

@ -1,7 +1,6 @@
// Based on Go stdlib implementation
const std = @import("../std.zig");
const builtin = std.builtin;
const mem = std.mem;
const debug = std.debug;
@ -11,7 +10,7 @@ const debug = std.debug;
///
/// Important: the counter mode doesn't provide authenticated encryption: the ciphertext can be trivially modified without this being detected.
/// As a result, applications should generally never use it directly, but only in a construction that includes a MAC.
pub fn ctr(comptime BlockCipher: anytype, block_cipher: BlockCipher, dst: []u8, src: []const u8, iv: [BlockCipher.block_length]u8, endian: builtin.Endian) void {
pub fn ctr(comptime BlockCipher: anytype, block_cipher: BlockCipher, dst: []u8, src: []const u8, iv: [BlockCipher.block_length]u8, endian: std.builtin.Endian) void {
debug.assert(dst.len >= src.len);
const block_length = BlockCipher.block_length;
var counter: [BlockCipher.block_length]u8 = undefined;

View File

@ -1,5 +1,4 @@
const std = @import("std");
const builtin = std.builtin;
const crypto = std.crypto;
const debug = std.debug;
const mem = std.mem;
@ -51,7 +50,7 @@ pub fn Field(comptime params: FieldParams) type {
};
/// Reject non-canonical encodings of an element.
pub fn rejectNonCanonical(s_: [encoded_length]u8, endian: builtin.Endian) NonCanonicalError!void {
pub fn rejectNonCanonical(s_: [encoded_length]u8, endian: std.builtin.Endian) NonCanonicalError!void {
var s = if (endian == .Little) s_ else orderSwap(s_);
const field_order_s = comptime fos: {
var fos: [encoded_length]u8 = undefined;
@ -71,7 +70,7 @@ pub fn Field(comptime params: FieldParams) type {
}
/// Unpack a field element.
pub fn fromBytes(s_: [encoded_length]u8, endian: builtin.Endian) NonCanonicalError!Fe {
pub fn fromBytes(s_: [encoded_length]u8, endian: std.builtin.Endian) NonCanonicalError!Fe {
var s = if (endian == .Little) s_ else orderSwap(s_);
try rejectNonCanonical(s, .Little);
var limbs_z: NonMontgomeryDomainFieldElement = undefined;
@ -82,7 +81,7 @@ pub fn Field(comptime params: FieldParams) type {
}
/// Pack a field element.
pub fn toBytes(fe: Fe, endian: builtin.Endian) [encoded_length]u8 {
pub fn toBytes(fe: Fe, endian: std.builtin.Endian) [encoded_length]u8 {
var limbs_z: NonMontgomeryDomainFieldElement = undefined;
fiat.fromMontgomery(&limbs_z, fe.limbs);
var s: [encoded_length]u8 = undefined;

View File

@ -1,5 +1,4 @@
const std = @import("std");
const builtin = std.builtin;
const crypto = std.crypto;
const mem = std.mem;
const meta = std.meta;
@ -59,7 +58,7 @@ pub const P256 = struct {
}
/// Create a point from serialized affine coordinates.
pub fn fromSerializedAffineCoordinates(xs: [32]u8, ys: [32]u8, endian: builtin.Endian) (NonCanonicalError || EncodingError)!P256 {
pub fn fromSerializedAffineCoordinates(xs: [32]u8, ys: [32]u8, endian: std.builtin.Endian) (NonCanonicalError || EncodingError)!P256 {
const x = try Fe.fromBytes(xs, endian);
const y = try Fe.fromBytes(ys, endian);
return fromAffineCoordinates(.{ .x = x, .y = y });
@ -396,7 +395,7 @@ pub const P256 = struct {
/// Multiply an elliptic curve point by a scalar.
/// Return error.IdentityElement if the result is the identity element.
pub fn mul(p: P256, s_: [32]u8, endian: builtin.Endian) IdentityElementError!P256 {
pub fn mul(p: P256, s_: [32]u8, endian: std.builtin.Endian) IdentityElementError!P256 {
const s = if (endian == .Little) s_ else Fe.orderSwap(s_);
if (p.is_base) {
return pcMul16(&basePointPc, s, false);
@ -408,7 +407,7 @@ pub const P256 = struct {
/// Multiply an elliptic curve point by a *PUBLIC* scalar *IN VARIABLE TIME*
/// This can be used for signature verification.
pub fn mulPublic(p: P256, s_: [32]u8, endian: builtin.Endian) IdentityElementError!P256 {
pub fn mulPublic(p: P256, s_: [32]u8, endian: std.builtin.Endian) IdentityElementError!P256 {
const s = if (endian == .Little) s_ else Fe.orderSwap(s_);
if (p.is_base) {
return pcMul16(&basePointPc, s, true);
@ -420,7 +419,7 @@ pub const P256 = struct {
/// Double-base multiplication of public parameters - Compute (p1*s1)+(p2*s2) *IN VARIABLE TIME*
/// This can be used for signature verification.
pub fn mulDoubleBasePublic(p1: P256, s1_: [32]u8, p2: P256, s2_: [32]u8, endian: builtin.Endian) IdentityElementError!P256 {
pub fn mulDoubleBasePublic(p1: P256, s1_: [32]u8, p2: P256, s2_: [32]u8, endian: std.builtin.Endian) IdentityElementError!P256 {
const s1 = if (endian == .Little) s1_ else Fe.orderSwap(s1_);
const s2 = if (endian == .Little) s2_ else Fe.orderSwap(s2_);
try p1.rejectIdentity();

View File

@ -18,7 +18,7 @@
// if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256
const std = @import("std");
const mode = std.builtin.mode; // Checked arithmetic is disabled in non-debug modes to avoid side channels
const mode = @import("builtin").mode; // Checked arithmetic is disabled in non-debug modes to avoid side channels
// The type MontgomeryDomainFieldElement is a field element in the Montgomery domain.
// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]

View File

@ -18,7 +18,7 @@
// if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256
const std = @import("std");
const mode = std.builtin.mode; // Checked arithmetic is disabled in non-debug modes to avoid side channels
const mode = @import("builtin").mode; // Checked arithmetic is disabled in non-debug modes to avoid side channels
// The type MontgomeryDomainFieldElement is a field element in the Montgomery domain.
// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]

View File

@ -1,5 +1,4 @@
const std = @import("std");
const builtin = std.builtin;
const common = @import("../common.zig");
const crypto = std.crypto;
const debug = std.debug;
@ -26,47 +25,47 @@ const Fe = Field(.{
});
/// Reject a scalar whose encoding is not canonical.
pub fn rejectNonCanonical(s: CompressedScalar, endian: builtin.Endian) NonCanonicalError!void {
pub fn rejectNonCanonical(s: CompressedScalar, endian: std.builtin.Endian) NonCanonicalError!void {
return Fe.rejectNonCanonical(s, endian);
}
/// Reduce a 48-bytes scalar to the field size.
pub fn reduce48(s: [48]u8, endian: builtin.Endian) CompressedScalar {
pub fn reduce48(s: [48]u8, endian: std.builtin.Endian) CompressedScalar {
return Scalar.fromBytes48(s, endian).toBytes(endian);
}
/// Reduce a 64-bytes scalar to the field size.
pub fn reduce64(s: [64]u8, endian: builtin.Endian) CompressedScalar {
pub fn reduce64(s: [64]u8, endian: std.builtin.Endian) CompressedScalar {
return ScalarDouble.fromBytes64(s, endian).toBytes(endian);
}
/// Return a*b (mod L)
pub fn mul(a: CompressedScalar, b: CompressedScalar, endian: builtin.Endian) NonCanonicalError!CompressedScalar {
pub fn mul(a: CompressedScalar, b: CompressedScalar, endian: std.builtin.Endian) NonCanonicalError!CompressedScalar {
return (try Scalar.fromBytes(a, endian)).mul(try Scalar.fromBytes(b, endian)).toBytes(endian);
}
/// Return a*b+c (mod L)
pub fn mulAdd(a: CompressedScalar, b: CompressedScalar, c: CompressedScalar, endian: builtin.Endian) NonCanonicalError!CompressedScalar {
pub fn mulAdd(a: CompressedScalar, b: CompressedScalar, c: CompressedScalar, endian: std.builtin.Endian) NonCanonicalError!CompressedScalar {
return (try Scalar.fromBytes(a, endian)).mul(try Scalar.fromBytes(b, endian)).add(try Scalar.fromBytes(c, endian)).toBytes(endian);
}
/// Return a+b (mod L)
pub fn add(a: CompressedScalar, b: CompressedScalar, endian: builtin.Endian) NonCanonicalError!CompressedScalar {
pub fn add(a: CompressedScalar, b: CompressedScalar, endian: std.builtin.Endian) NonCanonicalError!CompressedScalar {
return (try Scalar.fromBytes(a, endian)).add(try Scalar.fromBytes(b, endian)).toBytes(endian);
}
/// Return -s (mod L)
pub fn neg(s: CompressedScalar, endian: builtin.Endian) NonCanonicalError!CompressedScalar {
pub fn neg(s: CompressedScalar, endian: std.builtin.Endian) NonCanonicalError!CompressedScalar {
return (try Scalar.fromBytes(s, endian)).neg().toBytes(endian);
}
/// Return (a-b) (mod L)
pub fn sub(a: CompressedScalar, b: CompressedScalar, endian: builtin.Endian) NonCanonicalError!CompressedScalar {
pub fn sub(a: CompressedScalar, b: CompressedScalar, endian: std.builtin.Endian) NonCanonicalError!CompressedScalar {
return (try Scalar.fromBytes(a, endian)).sub(try Scalar.fromBytes(b.endian)).toBytes(endian);
}
/// Return a random scalar
pub fn random(endian: builtin.Endian) CompressedScalar {
pub fn random(endian: std.builtin.Endian) CompressedScalar {
return Scalar.random().toBytes(endian);
}
@ -81,24 +80,24 @@ pub const Scalar = struct {
pub const one = Scalar{ .fe = Fe.one };
/// Unpack a serialized representation of a scalar.
pub fn fromBytes(s: CompressedScalar, endian: builtin.Endian) NonCanonicalError!Scalar {
pub fn fromBytes(s: CompressedScalar, endian: std.builtin.Endian) NonCanonicalError!Scalar {
return Scalar{ .fe = try Fe.fromBytes(s, endian) };
}
/// Reduce a 384 bit input to the field size.
pub fn fromBytes48(s: [48]u8, endian: builtin.Endian) Scalar {
pub fn fromBytes48(s: [48]u8, endian: std.builtin.Endian) Scalar {
const t = ScalarDouble.fromBytes(384, s, endian);
return t.reduce(384);
}
/// Reduce a 512 bit input to the field size.
pub fn fromBytes64(s: [64]u8, endian: builtin.Endian) Scalar {
pub fn fromBytes64(s: [64]u8, endian: std.builtin.Endian) Scalar {
const t = ScalarDouble.fromBytes(512, s, endian);
return t.reduce(512);
}
/// Pack a scalar into bytes.
pub fn toBytes(n: Scalar, endian: builtin.Endian) CompressedScalar {
pub fn toBytes(n: Scalar, endian: std.builtin.Endian) CompressedScalar {
return n.fe.toBytes(endian);
}
@ -180,7 +179,7 @@ const ScalarDouble = struct {
x2: Fe,
x3: Fe,
fn fromBytes(comptime bits: usize, s_: [bits / 8]u8, endian: builtin.Endian) ScalarDouble {
fn fromBytes(comptime bits: usize, s_: [bits / 8]u8, endian: std.builtin.Endian) ScalarDouble {
debug.assert(bits > 0 and bits <= 512 and bits >= Fe.saturated_bits and bits <= Fe.saturated_bits * 3);
var s = s_;

View File

@ -1,4 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
const crypto = std.crypto;
const debug = std.debug;
const math = std.math;
@ -304,7 +305,7 @@ const Salsa20NonVecImpl = struct {
}
};
const Salsa20Impl = if (std.Target.current.cpu.arch == .x86_64) Salsa20VecImpl else Salsa20NonVecImpl;
const Salsa20Impl = if (builtin.cpu.arch == .x86_64) Salsa20VecImpl else Salsa20NonVecImpl;
fn keyToWords(key: [32]u8) [8]u32 {
var k: [8]u32 = undefined;

View File

@ -4,6 +4,7 @@
//! directly to standard library users.
const std = @import("std");
const builtin = @import("builtin");
const root = @import("root");
const mem = std.mem;
const os = std.os;
@ -12,7 +13,7 @@ const os = std.os;
/// point to thread-local variables.
pub var interface = std.rand.Random{ .fillFn = tlsCsprngFill };
const os_has_fork = switch (std.Target.current.os.tag) {
const os_has_fork = switch (builtin.os.tag) {
.dragonfly,
.freebsd,
.ios,
@ -29,10 +30,10 @@ const os_has_fork = switch (std.Target.current.os.tag) {
else => false,
};
const os_has_arc4random = std.builtin.link_libc and @hasDecl(std.c, "arc4random_buf");
const os_has_arc4random = builtin.link_libc and @hasDecl(std.c, "arc4random_buf");
const want_fork_safety = os_has_fork and !os_has_arc4random and
(std.meta.globalOption("crypto_fork_safety", bool) orelse true);
const maybe_have_wipe_on_fork = std.Target.current.os.isAtLeast(.linux, .{
const maybe_have_wipe_on_fork = builtin.os.isAtLeast(.linux, .{
.major = 4,
.minor = 14,
}) orelse true;
@ -55,7 +56,7 @@ var install_atfork_handler = std.once(struct {
threadlocal var wipe_mem: []align(mem.page_size) u8 = &[_]u8{};
fn tlsCsprngFill(_: *const std.rand.Random, buffer: []u8) void {
if (std.builtin.link_libc and @hasDecl(std.c, "arc4random_buf")) {
if (builtin.link_libc and @hasDecl(std.c, "arc4random_buf")) {
// arc4random is already a thread-local CSPRNG.
return std.c.arc4random_buf(buffer.ptr, buffer.len);
}

View File

@ -1,5 +1,5 @@
const std = @import("std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const debug = std.debug;
const mem = std.mem;
const testing = std.testing;

View File

@ -1,5 +1,5 @@
const std = @import("std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const math = std.math;
const mem = std.mem;
const io = std.io;
@ -16,8 +16,8 @@ const root = @import("root");
const maxInt = std.math.maxInt;
const File = std.fs.File;
const windows = std.os.windows;
const native_arch = std.Target.current.cpu.arch;
const native_os = std.Target.current.os.tag;
const native_arch = builtin.cpu.arch;
const native_os = builtin.os.tag;
const native_endian = native_arch.endian();
pub const runtime_safety = switch (builtin.mode) {
@ -150,7 +150,7 @@ pub fn dumpStackTraceFromBase(bp: usize, ip: usize) void {
/// capture that many stack frames exactly, and then look for the first address,
/// chopping off the irrelevant frames and shifting so that the returned addresses pointer
/// equals the passed in addresses pointer.
pub fn captureStackTrace(first_address: ?usize, stack_trace: *builtin.StackTrace) void {
pub fn captureStackTrace(first_address: ?usize, stack_trace: *std.builtin.StackTrace) void {
if (native_os == .windows) {
const addrs = stack_trace.instruction_addresses;
const u32_addrs_len = @intCast(u32, addrs.len);
@ -194,7 +194,7 @@ pub fn captureStackTrace(first_address: ?usize, stack_trace: *builtin.StackTrace
/// Tries to print a stack trace to stderr, unbuffered, and ignores any error returned.
/// TODO multithreaded awareness
pub fn dumpStackTrace(stack_trace: builtin.StackTrace) void {
pub fn dumpStackTrace(stack_trace: std.builtin.StackTrace) void {
nosuspend {
const stderr = io.getStdErr().writer();
if (builtin.strip_debug_info) {
@ -235,7 +235,7 @@ pub fn panic(comptime format: []const u8, args: anytype) noreturn {
/// `panicExtra` is useful when you want to print out an `@errorReturnTrace`
/// and also print out some values.
pub fn panicExtra(
trace: ?*builtin.StackTrace,
trace: ?*std.builtin.StackTrace,
comptime format: []const u8,
args: anytype,
) noreturn {
@ -253,7 +253,7 @@ pub fn panicExtra(
break :blk &buf;
},
};
builtin.panic(msg, trace);
std.builtin.panic(msg, trace);
}
/// Non-zero whenever the program triggered a panic.
@ -269,7 +269,7 @@ threadlocal var panic_stage: usize = 0;
// `panicImpl` could be useful in implementing a custom panic handler which
// calls the default handler (on supported platforms)
pub fn panicImpl(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, msg: []const u8) noreturn {
pub fn panicImpl(trace: ?*const std.builtin.StackTrace, first_trace_addr: ?usize, msg: []const u8) noreturn {
@setCold(true);
if (enable_segfault_handler) {
@ -339,7 +339,7 @@ const DIM = "\x1b[2m";
const RESET = "\x1b[0m";
pub fn writeStackTrace(
stack_trace: builtin.StackTrace,
stack_trace: std.builtin.StackTrace,
out_stream: anytype,
allocator: *mem.Allocator,
debug_info: *DebugInfo,
@ -764,7 +764,7 @@ pub fn readElfDebugInfo(allocator: *mem.Allocator, elf_file: File) !ModuleDebugI
if (!mem.eql(u8, hdr.e_ident[0..4], "\x7fELF")) return error.InvalidElfMagic;
if (hdr.e_ident[elf.EI_VERSION] != 1) return error.InvalidElfVersion;
const endian: builtin.Endian = switch (hdr.e_ident[elf.EI_DATA]) {
const endian: std.builtin.Endian = switch (hdr.e_ident[elf.EI_DATA]) {
elf.ELFDATA2LSB => .Little,
elf.ELFDATA2MSB => .Big,
else => return error.InvalidElfEndian,
@ -1002,7 +1002,7 @@ pub const DebugInfo = struct {
}
pub fn getModuleForAddress(self: *DebugInfo, address: usize) !*ModuleDebugInfo {
if (comptime std.Target.current.isDarwin()) {
if (comptime builtin.target.isDarwin()) {
return self.lookupModuleDyld(address);
} else if (native_os == .windows) {
return self.lookupModuleWin32(address);

View File

@ -1,5 +1,4 @@
const std = @import("std.zig");
const builtin = std.builtin;
const debug = std.debug;
const fs = std.fs;
const io = std.io;
@ -454,7 +453,7 @@ const LineNumberProgram = struct {
}
};
fn readUnitLength(in_stream: anytype, endian: builtin.Endian, is_64: *bool) !u64 {
fn readUnitLength(in_stream: anytype, endian: std.builtin.Endian, is_64: *bool) !u64 {
const first_32_bits = try in_stream.readInt(u32, endian);
is_64.* = (first_32_bits == 0xffffffff);
if (is_64.*) {
@ -475,7 +474,7 @@ fn readAllocBytes(allocator: *mem.Allocator, in_stream: anytype, size: usize) ![
}
// TODO the nosuspends here are workarounds
fn readAddress(in_stream: anytype, endian: builtin.Endian, is_64: bool) !u64 {
fn readAddress(in_stream: anytype, endian: std.builtin.Endian, is_64: bool) !u64 {
return nosuspend if (is_64)
try in_stream.readInt(u64, endian)
else
@ -488,12 +487,12 @@ fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: anytype, size: u
}
// TODO the nosuspends here are workarounds
fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: anytype, endian: builtin.Endian, size: usize) !FormValue {
fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: usize) !FormValue {
const block_len = try nosuspend in_stream.readVarInt(usize, endian, size);
return parseFormValueBlockLen(allocator, in_stream, block_len);
}
fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed: bool, endian: builtin.Endian, comptime size: i32) !FormValue {
fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed: bool, endian: std.builtin.Endian, comptime size: i32) !FormValue {
_ = allocator;
// TODO: Please forgive me, I've worked around zig not properly spilling some intermediate values here.
// `nosuspend` should be removed from all the function calls once it is fixed.
@ -521,7 +520,7 @@ fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed:
}
// TODO the nosuspends here are workarounds
fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: builtin.Endian, size: i32) !FormValue {
fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: std.builtin.Endian, size: i32) !FormValue {
_ = allocator;
return FormValue{
.Ref = switch (size) {
@ -536,7 +535,7 @@ fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: buil
}
// TODO the nosuspends here are workarounds
fn parseFormValue(allocator: *mem.Allocator, in_stream: anytype, form_id: u64, endian: builtin.Endian, is_64: bool) anyerror!FormValue {
fn parseFormValue(allocator: *mem.Allocator, in_stream: anytype, form_id: u64, endian: std.builtin.Endian, is_64: bool) anyerror!FormValue {
return switch (form_id) {
FORM.addr => FormValue{ .Address = try readAddress(in_stream, endian, @sizeOf(usize) == 8) },
FORM.block1 => parseFormValueBlock(allocator, in_stream, endian, 1),
@ -593,7 +592,7 @@ fn getAbbrevTableEntry(abbrev_table: *const AbbrevTable, abbrev_code: u64) ?*con
}
pub const DwarfInfo = struct {
endian: builtin.Endian,
endian: std.builtin.Endian,
// No memory is owned by the DwarfInfo
debug_info: []const u8,
debug_abbrev: []const u8,

View File

@ -1,6 +1,5 @@
const builtin = std.builtin;
const std = @import("std.zig");
const builtin = @import("builtin");
const mem = std.mem;
const os = std.os;
const assert = std.debug.assert;

View File

@ -1,5 +1,5 @@
const std = @import("../std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const assert = std.debug.assert;
const testing = std.testing;
const Loop = std.event.Loop;

View File

@ -1,7 +1,7 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
const assert = std.debug.assert;
const testing = std.testing;
const builtin = std.builtin;
const Lock = std.event.Lock;
/// This is a value that starts out unavailable, until resolve() is called

View File

@ -1,5 +1,5 @@
const std = @import("../std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const Lock = std.event.Lock;
const testing = std.testing;
const Allocator = std.mem.Allocator;

View File

@ -1,5 +1,5 @@
const std = @import("../std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;

View File

@ -1,5 +1,5 @@
const std = @import("../std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const root = @import("root");
const assert = std.debug.assert;
const testing = std.testing;
@ -9,7 +9,7 @@ const windows = os.windows;
const maxInt = std.math.maxInt;
const Thread = std.Thread;
const is_windows = std.Target.current.os.tag == .windows;
const is_windows = builtin.os.tag == .windows;
pub const Loop = struct {
next_tick_queue: std.atomic.Queue(anyframe),
@ -191,7 +191,7 @@ pub const Loop = struct {
self.fs_thread.join();
};
if (!std.builtin.single_threaded)
if (!builtin.single_threaded)
try self.delay_queue.init();
}
@ -825,7 +825,7 @@ pub const Loop = struct {
}
pub fn sleep(self: *Loop, nanoseconds: u64) void {
if (std.builtin.single_threaded)
if (builtin.single_threaded)
@compileError("TODO: integrate timers with epoll/kevent/iocp for single-threaded");
suspend {

View File

@ -1,5 +1,5 @@
const std = @import("../std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;

View File

@ -1,5 +1,5 @@
const std = @import("../std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const Loop = std.event.Loop;
/// A WaitGroup keeps track and waits for a group of async tasks to finish.

View File

@ -1,6 +1,6 @@
const root = @import("root");
const builtin = std.builtin;
const std = @import("std.zig");
const builtin = @import("builtin");
const root = @import("root");
const os = std.os;
const mem = std.mem;
const base64 = std.base64;
@ -9,7 +9,7 @@ const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const math = std.math;
const is_darwin = std.Target.current.os.tag.isDarwin();
const is_darwin = builtin.os.tag.isDarwin();
pub const path = @import("fs/path.zig");
pub const File = @import("fs/file.zig").File;
@ -2607,7 +2607,7 @@ const CopyFileError = error{SystemResources} || os.CopyFileRangeError || os.Send
// The copy starts at offset 0, the initial offsets are preserved.
// No metadata is transferred over.
fn copy_file(fd_in: os.fd_t, fd_out: os.fd_t) CopyFileError!void {
if (comptime std.Target.current.isDarwin()) {
if (comptime builtin.target.isDarwin()) {
const rc = os.system.fcopyfile(fd_in, fd_out, null, os.system.COPYFILE_DATA);
switch (os.errno(rc)) {
.SUCCESS => return,
@ -2620,7 +2620,7 @@ fn copy_file(fd_in: os.fd_t, fd_out: os.fd_t) CopyFileError!void {
}
}
if (std.Target.current.os.tag == .linux) {
if (builtin.os.tag == .linux) {
// Try copy_file_range first as that works at the FS level and is the
// most efficient method (if available).
var offset: u64 = 0;

View File

@ -1,14 +1,14 @@
const std = @import("../std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const os = std.os;
const io = std.io;
const mem = std.mem;
const math = std.math;
const assert = std.debug.assert;
const windows = os.windows;
const Os = builtin.Os;
const Os = std.builtin.Os;
const maxInt = std.math.maxInt;
const is_windows = std.Target.current.os.tag == .windows;
const is_windows = builtin.os.tag == .windows;
pub const File = struct {
/// The OS-specific file descriptor or file handle.

View File

@ -1,5 +1,5 @@
const std = @import("../std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const unicode = std.unicode;
const mem = std.mem;
const fs = std.fs;

View File

@ -1,6 +1,6 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
const testing = std.testing;
const builtin = std.builtin;
const fs = std.fs;
const mem = std.mem;
const wasi = std.os.wasi;

View File

@ -1,4 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
const os = std.os;
const mem = std.mem;
const math = std.math;
@ -165,7 +166,7 @@ pub const PreopenList = struct {
};
test "extracting WASI preopens" {
if (std.builtin.os.tag != .wasi or std.builtin.link_libc) return error.SkipZigTest;
if (builtin.os.tag != .wasi or builtin.link_libc) return error.SkipZigTest;
var preopens = PreopenList.init(std.testing.allocator);
defer preopens.deinit();

View File

@ -1,5 +1,5 @@
const std = @import("std");
const builtin = std.builtin;
const builtin = @import("builtin");
const event = std.event;
const assert = std.debug.assert;
const testing = std.testing;
@ -250,7 +250,7 @@ pub fn Watch(comptime V: type) type {
};
// @TODO Can I close this fd and get an error from bsdWaitKev?
const flags = if (comptime std.Target.current.isDarwin()) os.O.SYMLINK | os.O.EVTONLY else 0;
const flags = if (comptime builtin.target.isDarwin()) os.O.SYMLINK | os.O.EVTONLY else 0;
const fd = try os.open(realpath, flags, 0);
gop.value_ptr.putter_frame = async self.kqPutEvents(fd, gop.key_ptr.*, gop.value_ptr.*);
return null;

View File

@ -2,7 +2,6 @@ const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
const meta = std.meta;
const builtin = std.builtin;
/// Describes how pointer types should be hashed.
pub const HashStrategy = enum {
@ -233,7 +232,7 @@ fn testHashDeepRecursive(key: anytype) u64 {
test "typeContainsSlice" {
comptime {
try testing.expect(!typeContainsSlice(meta.Tag(builtin.TypeInfo)));
try testing.expect(!typeContainsSlice(meta.Tag(std.builtin.TypeInfo)));
try testing.expect(typeContainsSlice([]const u8));
try testing.expect(!typeContainsSlice(u8));

View File

@ -1,7 +1,7 @@
// zig run -O ReleaseFast --zig-lib-dir ../.. benchmark.zig
const builtin = std.builtin;
const std = @import("std");
const builtin = @import("builtin");
const time = std.time;
const Timer = time.Timer;
const hash = std.hash;

View File

@ -1,5 +1,4 @@
const std = @import("std");
const builtin = std.builtin;
inline fn offsetPtr(ptr: [*]const u8, offset: usize) [*]const u8 {
// ptr + offset doesn't work at comptime so we need this instead.

View File

@ -6,6 +6,7 @@
// still moderately fast just slow relative to the slicing approach.
const std = @import("../std.zig");
const builtin = @import("builtin");
const debug = std.debug;
const testing = std.testing;
@ -97,7 +98,7 @@ pub fn Crc32WithPoly(comptime poly: Polynomial) type {
};
}
const please_windows_dont_oom = std.Target.current.os.tag == .windows;
const please_windows_dont_oom = builtin.os.tag == .windows;
test "crc32 ieee" {
if (please_windows_dont_oom) return error.SkipZigTest;

View File

@ -1,11 +1,11 @@
const std = @import("std.zig");
const builtin = @import("builtin");
const root = @import("root");
const debug = std.debug;
const assert = debug.assert;
const testing = std.testing;
const mem = std.mem;
const os = std.os;
const builtin = std.builtin;
const c = std.c;
const maxInt = std.math.maxInt;
@ -209,9 +209,9 @@ fn rawCResize(
/// This allocator makes a syscall directly for every allocation and free.
/// Thread-safe and lock-free.
pub const page_allocator = if (std.Target.current.isWasm())
pub const page_allocator = if (builtin.target.isWasm())
&wasm_page_allocator_state
else if (std.Target.current.os.tag == .freestanding)
else if (builtin.target.os.tag == .freestanding)
root.os.heap.page_allocator
else
&page_allocator_state;
@ -402,7 +402,7 @@ const PageAllocator = struct {
const WasmPageAllocator = struct {
comptime {
if (!std.Target.current.isWasm()) {
if (!builtin.target.isWasm()) {
@compileError("WasmPageAllocator is only available for wasm32 arch");
}
}
@ -608,11 +608,11 @@ pub const HeapAllocator = switch (builtin.os.tag) {
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
const amt = n + ptr_align - 1 + @sizeOf(usize);
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, builtin.AtomicOrder.SeqCst);
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .SeqCst);
const heap_handle = optional_heap_handle orelse blk: {
const options = if (builtin.single_threaded) os.windows.HEAP_NO_SERIALIZE else 0;
const hh = os.windows.kernel32.HeapCreate(options, amt, 0) orelse return error.OutOfMemory;
const other_hh = @cmpxchgStrong(?HeapHandle, &self.heap_handle, null, hh, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) orelse break :blk hh;
const other_hh = @cmpxchgStrong(?HeapHandle, &self.heap_handle, null, hh, .SeqCst, .SeqCst) orelse break :blk hh;
os.windows.HeapDestroy(hh);
break :blk other_hh.?; // can't be null because of the cmpxchg
};
@ -792,7 +792,7 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
_ = len_align;
_ = ra;
const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
var end_index = @atomicLoad(usize, &self.end_index, .SeqCst);
while (true) {
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse
return error.OutOfMemory;
@ -801,7 +801,7 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
}
end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) orelse return self.buffer[adjusted_index..new_end_index];
end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .SeqCst, .SeqCst) orelse return self.buffer[adjusted_index..new_end_index];
}
}
@ -884,7 +884,7 @@ test "raw_c_allocator" {
}
test "WasmPageAllocator internals" {
if (comptime std.Target.current.isWasm()) {
if (comptime builtin.target.isWasm()) {
const conventional_memsize = WasmPageAllocator.conventional.totalPages() * mem.page_size;
const initial = try page_allocator.alloc(u8, mem.page_size);
try testing.expect(@ptrToInt(initial.ptr) < conventional_memsize); // If this isn't conventional, the rest of these tests don't make sense. Also we have a serious memory leak in the test suite.
@ -924,7 +924,7 @@ test "PageAllocator" {
const allocator = page_allocator;
try testAllocator(allocator);
try testAllocatorAligned(allocator);
if (!std.Target.current.isWasm()) {
if (!builtin.target.isWasm()) {
try testAllocatorLargeAlignment(allocator);
try testAllocatorAlignedShrink(allocator);
}

View File

@ -93,6 +93,7 @@
//! in a `std.HashMap` using the backing allocator.
const std = @import("std");
const builtin = @import("builtin");
const log = std.log.scoped(.gpa);
const math = std.math;
const assert = std.debug.assert;
@ -104,7 +105,7 @@ const StackTrace = std.builtin.StackTrace;
/// Integer type for pointing to slots in a small allocation
const SlotIndex = std.meta.Int(.unsigned, math.log2(page_size) + 1);
const sys_can_stack_trace = switch (std.Target.current.cpu.arch) {
const sys_can_stack_trace = switch (builtin.cpu.arch) {
// Observed to go into an infinite loop.
// TODO: Make this work.
.mips,
@ -115,13 +116,13 @@ const sys_can_stack_trace = switch (std.Target.current.cpu.arch) {
// "Non-Emscripten WebAssembly hasn't implemented __builtin_return_address".
.wasm32,
.wasm64,
=> std.Target.current.os.tag == .emscripten,
=> builtin.os.tag == .emscripten,
else => true,
};
const default_test_stack_trace_frames: usize = if (std.builtin.is_test) 8 else 4;
const default_test_stack_trace_frames: usize = if (builtin.is_test) 8 else 4;
const default_sys_stack_trace_frames: usize = if (sys_can_stack_trace) default_test_stack_trace_frames else 0;
const default_stack_trace_frames: usize = switch (std.builtin.mode) {
const default_stack_trace_frames: usize = switch (builtin.mode) {
.Debug => default_sys_stack_trace_frames,
else => 0,
};
@ -141,7 +142,7 @@ pub const Config = struct {
safety: bool = std.debug.runtime_safety,
/// Whether the allocator may be used simultaneously from multiple threads.
thread_safe: bool = !std.builtin.single_threaded,
thread_safe: bool = !builtin.single_threaded,
/// What type of mutex you'd like to use, for thread safety.
/// when specfied, the mutex type must have the same shape as `std.Thread.Mutex` and
@ -988,7 +989,7 @@ test "shrink large object to large object with larger alignment" {
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
defer allocator.free(slice);
const big_alignment: usize = switch (std.Target.current.os.tag) {
const big_alignment: usize = switch (builtin.os.tag) {
.windows => page_size * 32, // Windows aligns to 64K.
else => page_size * 2,
};
@ -1058,7 +1059,7 @@ test "realloc large object to larger alignment" {
var slice = try allocator.alignedAlloc(u8, 16, page_size * 2 + 50);
defer allocator.free(slice);
const big_alignment: usize = switch (std.Target.current.os.tag) {
const big_alignment: usize = switch (builtin.os.tag) {
.windows => page_size * 32, // Windows aligns to 64K.
else => page_size * 2,
};

View File

@ -1,5 +1,5 @@
const std = @import("std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const root = @import("root");
const c = std.c;

View File

@ -1,5 +1,4 @@
const std = @import("../std.zig");
const builtin = std.builtin;
const io = std.io;
const assert = std.debug.assert;
const testing = std.testing;
@ -8,7 +7,7 @@ const meta = std.meta;
const math = std.math;
/// Creates a stream which allows for reading bit fields from another stream
pub fn BitReader(endian: builtin.Endian, comptime ReaderType: type) type {
pub fn BitReader(endian: std.builtin.Endian, comptime ReaderType: type) type {
return struct {
forward_reader: ReaderType,
bit_buffer: u7,
@ -162,7 +161,7 @@ pub fn BitReader(endian: builtin.Endian, comptime ReaderType: type) type {
}
pub fn bitReader(
comptime endian: builtin.Endian,
comptime endian: std.builtin.Endian,
underlying_stream: anytype,
) BitReader(endian, @TypeOf(underlying_stream)) {
return BitReader(endian, @TypeOf(underlying_stream)).init(underlying_stream);

View File

@ -1,5 +1,4 @@
const std = @import("../std.zig");
const builtin = std.builtin;
const io = std.io;
const testing = std.testing;
const assert = std.debug.assert;
@ -8,7 +7,7 @@ const meta = std.meta;
const math = std.math;
/// Creates a stream which allows for writing bit fields to another stream
pub fn BitWriter(endian: builtin.Endian, comptime WriterType: type) type {
pub fn BitWriter(endian: std.builtin.Endian, comptime WriterType: type) type {
return struct {
forward_writer: WriterType,
bit_buffer: u8,
@ -138,7 +137,7 @@ pub fn BitWriter(endian: builtin.Endian, comptime WriterType: type) type {
}
pub fn bitWriter(
comptime endian: builtin.Endian,
comptime endian: std.builtin.Endian,
underlying_stream: anytype,
) BitWriter(endian, @TypeOf(underlying_stream)) {
return BitWriter(endian, @TypeOf(underlying_stream)).init(underlying_stream);

View File

@ -1,5 +1,5 @@
const std = @import("../std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const io = std.io;
const testing = std.testing;
const os = std.os;

View File

@ -1,5 +1,4 @@
const std = @import("../std.zig");
const builtin = std.builtin;
const math = std.math;
const assert = std.debug.assert;
const mem = std.mem;
@ -265,12 +264,12 @@ pub fn Reader(
return mem.readIntBig(T, &bytes);
}
pub fn readInt(self: Self, comptime T: type, endian: builtin.Endian) !T {
pub fn readInt(self: Self, comptime T: type, endian: std.builtin.Endian) !T {
const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
return mem.readInt(T, &bytes, endian);
}
pub fn readVarInt(self: Self, comptime ReturnType: type, endian: builtin.Endian, size: usize) !ReturnType {
pub fn readVarInt(self: Self, comptime ReturnType: type, endian: std.builtin.Endian, size: usize) !ReturnType {
assert(size <= @sizeOf(ReturnType));
var bytes_buf: [@sizeOf(ReturnType)]u8 = undefined;
const bytes = bytes_buf[0..size];
@ -310,7 +309,7 @@ pub fn Reader(
pub fn readStruct(self: Self, comptime T: type) !T {
// Only extern and packed structs have defined in-memory layout.
comptime assert(@typeInfo(T).Struct.layout != builtin.TypeInfo.ContainerLayout.Auto);
comptime assert(@typeInfo(T).Struct.layout != std.builtin.TypeInfo.ContainerLayout.Auto);
var res: [1]T = undefined;
try self.readNoEof(mem.sliceAsBytes(res[0..]));
return res[0];
@ -319,7 +318,7 @@ pub fn Reader(
/// Reads an integer with the same size as the given enum's tag type. If the integer matches
/// an enum tag, casts the integer to the enum tag and returns it. Otherwise, returns an error.
/// TODO optimization taking advantage of most fields being in order
pub fn readEnum(self: Self, comptime Enum: type, endian: builtin.Endian) !Enum {
pub fn readEnum(self: Self, comptime Enum: type, endian: std.builtin.Endian) !Enum {
const E = error{
/// An integer was read, but it did not match any of the tags in the supplied enum.
InvalidValue,

View File

@ -1,4 +1,5 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
const io = std.io;
/// Provides `io.Reader`, `io.Writer`, and `io.SeekableStream` for in-memory buffers as
@ -6,7 +7,7 @@ const io = std.io;
/// For memory sources, if the supplied byte buffer is const, then `io.Writer` is not available.
/// The error set of the stream functions is the error set of the corresponding file functions.
pub const StreamSource = union(enum) {
const has_file = (std.builtin.os.tag != .freestanding);
const has_file = (builtin.os.tag != .freestanding);
/// The stream access is redirected to this buffer.
buffer: io.FixedBufferStream([]u8),

View File

@ -1,6 +1,5 @@
const std = @import("../std.zig");
const assert = std.debug.assert;
const builtin = std.builtin;
const mem = std.mem;
pub fn Writer(
@ -77,7 +76,7 @@ pub fn Writer(
}
/// TODO audit non-power-of-two int sizes
pub fn writeInt(self: Self, comptime T: type, value: T, endian: builtin.Endian) Error!void {
pub fn writeInt(self: Self, comptime T: type, value: T, endian: std.builtin.Endian) Error!void {
var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
mem.writeInt(T, &bytes, value, endian);
return self.writeAll(&bytes);
@ -85,7 +84,7 @@ pub fn Writer(
pub fn writeStruct(self: Self, value: anytype) Error!void {
// Only extern and packed structs have defined in-memory layout.
comptime assert(@typeInfo(@TypeOf(value)).Struct.layout != builtin.TypeInfo.ContainerLayout.Auto);
comptime assert(@typeInfo(@TypeOf(value)).Struct.layout != std.builtin.TypeInfo.ContainerLayout.Auto);
return self.writeAll(mem.asBytes(&value));
}
};

View File

@ -69,7 +69,7 @@
//! ```
const std = @import("std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const root = @import("root");
pub const Level = enum {
@ -170,7 +170,7 @@ pub fn defaultLog(
comptime format: []const u8,
args: anytype,
) void {
if (std.Target.current.os.tag == .freestanding) {
if (builtin.os.tag == .freestanding) {
// On freestanding one must provide a log function; we do not have
// any I/O configured.
return;

View File

@ -1,4 +1,5 @@
const std = @import("std.zig");
const builtin = @import("builtin");
const debug = std.debug;
const assert = debug.assert;
const math = std.math;
@ -7,13 +8,13 @@ const meta = std.meta;
const trait = meta.trait;
const testing = std.testing;
const Endian = std.builtin.Endian;
const native_endian = std.Target.current.cpu.arch.endian();
const native_endian = builtin.cpu.arch.endian();
/// Compile time known minimum page size.
/// https://github.com/ziglang/zig/issues/4082
pub const page_size = switch (std.Target.current.cpu.arch) {
pub const page_size = switch (builtin.cpu.arch) {
.wasm32, .wasm64 => 64 * 1024,
.aarch64 => switch (std.Target.current.os.tag) {
.aarch64 => switch (builtin.os.tag) {
.macos, .ios, .watchos, .tvos => 16 * 1024,
else => 4 * 1024,
},

View File

@ -1,5 +1,4 @@
const std = @import("std.zig");
const builtin = std.builtin;
const debug = std.debug;
const mem = std.mem;
const math = std.math;
@ -9,7 +8,7 @@ const root = @import("root");
pub const trait = @import("meta/trait.zig");
pub const TrailerFlags = @import("meta/trailer_flags.zig").TrailerFlags;
const TypeInfo = builtin.TypeInfo;
const TypeInfo = std.builtin.TypeInfo;
pub fn tagName(v: anytype) []const u8 {
const T = @TypeOf(v);
@ -858,7 +857,7 @@ pub fn declList(comptime Namespace: type, comptime Decl: type) []const *const De
pub const IntType = @compileError("replaced by std.meta.Int");
pub fn Int(comptime signedness: builtin.Signedness, comptime bit_count: u16) type {
pub fn Int(comptime signedness: std.builtin.Signedness, comptime bit_count: u16) type {
return @Type(TypeInfo{
.Int = .{
.signedness = signedness,

View File

@ -1,5 +1,4 @@
const std = @import("../std.zig");
const builtin = std.builtin;
const mem = std.mem;
const debug = std.debug;
const testing = std.testing;
@ -98,7 +97,7 @@ test "std.meta.trait.hasField" {
try testing.expect(!hasField("value")(u8));
}
pub fn is(comptime id: builtin.TypeId) TraitFn {
pub fn is(comptime id: std.builtin.TypeId) TraitFn {
const Closure = struct {
pub fn trait(comptime T: type) bool {
return id == @typeInfo(T);
@ -115,7 +114,7 @@ test "std.meta.trait.is" {
try testing.expect(!is(.Optional)(anyerror));
}
pub fn isPtrTo(comptime id: builtin.TypeId) TraitFn {
pub fn isPtrTo(comptime id: std.builtin.TypeId) TraitFn {
const Closure = struct {
pub fn trait(comptime T: type) bool {
if (!comptime isSingleItemPtr(T)) return false;
@ -131,7 +130,7 @@ test "std.meta.trait.isPtrTo" {
try testing.expect(!isPtrTo(.Struct)(**struct {}));
}
pub fn isSliceOf(comptime id: builtin.TypeId) TraitFn {
pub fn isSliceOf(comptime id: std.builtin.TypeId) TraitFn {
const Closure = struct {
pub fn trait(comptime T: type) bool {
if (!comptime isSlice(T)) return false;

View File

@ -12,7 +12,7 @@ const native_endian = builtin.target.cpu.arch.endian();
// first release to support them.
pub const has_unix_sockets = @hasDecl(os.sockaddr, "un") and
(builtin.target.os.tag != .windows or
std.Target.current.os.version_range.windows.isAtLeast(.win10_rs4) orelse false);
builtin.os.version_range.windows.isAtLeast(.win10_rs4) orelse false);
pub const Address = extern union {
any: os.sockaddr,
@ -1623,7 +1623,7 @@ pub const Stream = struct {
}
pub fn read(self: Stream, buffer: []u8) ReadError!usize {
if (std.Target.current.os.tag == .windows) {
if (builtin.os.tag == .windows) {
return os.windows.ReadFile(self.handle, buffer, null, io.default_mode);
}
@ -1638,7 +1638,7 @@ pub const Stream = struct {
/// file system thread instead of non-blocking. It needs to be reworked to properly
/// use non-blocking I/O.
pub fn write(self: Stream, buffer: []const u8) WriteError!usize {
if (std.Target.current.os.tag == .windows) {
if (builtin.os.tag == .windows) {
return os.windows.WriteFile(self.handle, buffer, null, io.default_mode);
}

View File

@ -1,5 +1,5 @@
const std = @import("../std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const net = std.net;
const mem = std.mem;
const testing = std.testing;
@ -35,7 +35,7 @@ test "parse and render IPv6 addresses" {
var newIp = std.fmt.bufPrint(buffer[0..], "{}", .{addr}) catch unreachable;
try std.testing.expect(std.mem.eql(u8, printed[i], newIp[1 .. newIp.len - 3]));
if (std.builtin.os.tag == .linux) {
if (builtin.os.tag == .linux) {
var addr_via_resolve = net.Address.resolveIp6(ip, 0) catch unreachable;
var newResolvedIp = std.fmt.bufPrint(buffer[0..], "{}", .{addr_via_resolve}) catch unreachable;
try std.testing.expect(std.mem.eql(u8, printed[i], newResolvedIp[1 .. newResolvedIp.len - 3]));
@ -49,7 +49,7 @@ test "parse and render IPv6 addresses" {
try testing.expectError(error.Incomplete, net.Address.parseIp6("FF01:", 0));
try testing.expectError(error.InvalidIpv4Mapping, net.Address.parseIp6("::123.123.123.123", 0));
// TODO Make this test pass on other operating systems.
if (std.builtin.os.tag == .linux) {
if (builtin.os.tag == .linux) {
try testing.expectError(error.Incomplete, net.Address.resolveIp6("ff01::fb%", 0));
try testing.expectError(error.Overflow, net.Address.resolveIp6("ff01::fb%wlp3s0s0s0s0s0s0s0s0", 0));
try testing.expectError(error.Overflow, net.Address.resolveIp6("ff01::fb%12345678901234", 0));
@ -57,7 +57,7 @@ test "parse and render IPv6 addresses" {
}
test "invalid but parseable IPv6 scope ids" {
if (std.builtin.os.tag != .linux) {
if (builtin.os.tag != .linux) {
// Currently, resolveIp6 with alphanumerical scope IDs only works on Linux.
// TODO Make this test pass on other operating systems.
return error.SkipZigTest;
@ -106,11 +106,11 @@ test "parse and render UNIX addresses" {
test "resolve DNS" {
if (builtin.os.tag == .wasi) return error.SkipZigTest;
if (std.builtin.os.tag == .windows) {
if (builtin.os.tag == .windows) {
_ = try std.os.windows.WSAStartup(2, 2);
}
defer {
if (std.builtin.os.tag == .windows) {
if (builtin.os.tag == .windows) {
std.os.windows.WSACleanup() catch unreachable;
}
}
@ -143,11 +143,11 @@ test "listen on a port, send bytes, receive bytes" {
if (builtin.single_threaded) return error.SkipZigTest;
if (builtin.os.tag == .wasi) return error.SkipZigTest;
if (std.builtin.os.tag == .windows) {
if (builtin.os.tag == .windows) {
_ = try std.os.windows.WSAStartup(2, 2);
}
defer {
if (std.builtin.os.tag == .windows) {
if (builtin.os.tag == .windows) {
std.os.windows.WSACleanup() catch unreachable;
}
}
@ -185,7 +185,7 @@ test "listen on a port, send bytes, receive bytes" {
test "listen on a port, send bytes, receive bytes" {
if (!std.io.is_async) return error.SkipZigTest;
if (std.builtin.os.tag != .linux and !std.builtin.os.tag.isDarwin()) {
if (builtin.os.tag != .linux and !builtin.os.tag.isDarwin()) {
// TODO build abstractions for other operating systems
return error.SkipZigTest;
}
@ -207,7 +207,7 @@ test "listen on a port, send bytes, receive bytes" {
test "listen on ipv4 try connect on ipv6 then ipv4" {
if (!std.io.is_async) return error.SkipZigTest;
if (std.builtin.os.tag != .linux and !std.builtin.os.tag.isDarwin()) {
if (builtin.os.tag != .linux and !builtin.os.tag.isDarwin()) {
// TODO build abstractions for other operating systems
return error.SkipZigTest;
}
@ -267,11 +267,11 @@ test "listen on a unix socket, send bytes, receive bytes" {
if (builtin.single_threaded) return error.SkipZigTest;
if (!net.has_unix_sockets) return error.SkipZigTest;
if (std.builtin.os.tag == .windows) {
if (builtin.os.tag == .windows) {
_ = try std.os.windows.WSAStartup(2, 2);
}
defer {
if (std.builtin.os.tag == .windows) {
if (builtin.os.tag == .windows) {
std.os.windows.WSACleanup() catch unreachable;
}
}

View File

@ -1,5 +1,5 @@
const std = @import("std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const testing = std.testing;
pub fn once(comptime f: fn () void) Once(f) {

View File

@ -240,7 +240,7 @@ pub fn close(fd: fd_t) void {
_ = wasi.fd_close(fd);
return;
}
if (comptime std.Target.current.isDarwin()) {
if (comptime builtin.target.isDarwin()) {
// This avoids the EINTR problem.
switch (darwin.getErrno(darwin.@"close$NOCANCEL"(fd))) {
.BADF => unreachable, // Always a race condition.
@ -487,7 +487,7 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
}
// Prevents EINVAL.
const max_count = switch (std.Target.current.os.tag) {
const max_count = switch (builtin.os.tag) {
.linux => 0x7ffff000,
.macos, .ios, .watchos, .tvos => math.maxInt(i32),
else => math.maxInt(isize),
@ -525,7 +525,7 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
/// * Windows
/// On these systems, the read races with concurrent writes to the same file descriptor.
pub fn readv(fd: fd_t, iov: []const iovec) ReadError!usize {
if (std.Target.current.os.tag == .windows) {
if (builtin.os.tag == .windows) {
// TODO improve this to use ReadFileScatter
if (iov.len == 0) return @as(usize, 0);
const first = iov[0];
@ -616,7 +616,7 @@ pub fn pread(fd: fd_t, buf: []u8, offset: u64) PReadError!usize {
}
// Prevent EINVAL.
const max_count = switch (std.Target.current.os.tag) {
const max_count = switch (builtin.os.tag) {
.linux => 0x7ffff000,
.macos, .ios, .watchos, .tvos => math.maxInt(i32),
else => math.maxInt(isize),
@ -662,7 +662,7 @@ pub const TruncateError = error{
} || UnexpectedError;
pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void {
if (std.Target.current.os.tag == .windows) {
if (builtin.os.tag == .windows) {
var io_status_block: windows.IO_STATUS_BLOCK = undefined;
var eof_info = windows.FILE_END_OF_FILE_INFORMATION{
.EndOfFile = @bitCast(windows.LARGE_INTEGER, length),
@ -683,7 +683,7 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void {
else => return windows.unexpectedStatus(rc),
}
}
if (std.Target.current.os.tag == .wasi and !builtin.link_libc) {
if (builtin.os.tag == .wasi and !builtin.link_libc) {
switch (wasi.fd_filestat_set_size(fd, length)) {
.SUCCESS => return,
.INTR => unreachable,
@ -733,7 +733,7 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void {
/// * Windows
/// On these systems, the read races with concurrent writes to the same file descriptor.
pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) PReadError!usize {
const have_pread_but_not_preadv = switch (std.Target.current.os.tag) {
const have_pread_but_not_preadv = switch (builtin.os.tag) {
.windows, .macos, .ios, .watchos, .tvos, .haiku => true,
else => false,
};
@ -868,7 +868,7 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
}
}
const max_count = switch (std.Target.current.os.tag) {
const max_count = switch (builtin.os.tag) {
.linux => 0x7ffff000,
.macos, .ios, .watchos, .tvos => math.maxInt(i32),
else => math.maxInt(isize),
@ -916,7 +916,7 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
///
/// If `iov.len` is larger than `IOV_MAX`, a partial write will occur.
pub fn writev(fd: fd_t, iov: []const iovec_const) WriteError!usize {
if (std.Target.current.os.tag == .windows) {
if (builtin.os.tag == .windows) {
// TODO improve this to use WriteFileScatter
if (iov.len == 0) return @as(usize, 0);
const first = iov[0];
@ -991,7 +991,7 @@ pub const PWriteError = WriteError || error{Unseekable};
/// The limit on Darwin is `0x7fffffff`, trying to write more than that returns EINVAL.
/// The corresponding POSIX limit is `math.maxInt(isize)`.
pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize {
if (std.Target.current.os.tag == .windows) {
if (builtin.os.tag == .windows) {
return windows.WriteFile(fd, bytes, offset, std.io.default_mode);
}
if (builtin.os.tag == .wasi and !builtin.link_libc) {
@ -1024,7 +1024,7 @@ pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize {
}
// Prevent EINVAL.
const max_count = switch (std.Target.current.os.tag) {
const max_count = switch (builtin.os.tag) {
.linux => 0x7ffff000,
.macos, .ios, .watchos, .tvos => math.maxInt(i32),
else => math.maxInt(isize),
@ -1083,7 +1083,7 @@ pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize {
///
/// If `iov.len` is larger than `IOV_MAX`, a partial write will occur.
pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) PWriteError!usize {
const have_pwrite_but_not_pwritev = switch (std.Target.current.os.tag) {
const have_pwrite_but_not_pwritev = switch (builtin.os.tag) {
.windows, .macos, .ios, .watchos, .tvos, .haiku => true,
else => false,
};
@ -1199,7 +1199,7 @@ pub const OpenError = error{
/// Open and possibly create a file. Keeps trying if it gets interrupted.
/// See also `openZ`.
pub fn open(file_path: []const u8, flags: u32, perm: mode_t) OpenError!fd_t {
if (std.Target.current.os.tag == .windows) {
if (builtin.os.tag == .windows) {
const file_path_w = try windows.sliceToPrefixedFileW(file_path);
return openW(file_path_w.span(), flags, perm);
}
@ -1212,7 +1212,7 @@ pub const openC = @compileError("deprecated: renamed to openZ");
/// Open and possibly create a file. Keeps trying if it gets interrupted.
/// See also `open`.
pub fn openZ(file_path: [*:0]const u8, flags: u32, perm: mode_t) OpenError!fd_t {
if (std.Target.current.os.tag == .windows) {
if (builtin.os.tag == .windows) {
const file_path_w = try windows.cStrToPrefixedFileW(file_path);
return openW(file_path_w.span(), flags, perm);
}
@ -2900,7 +2900,7 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) SocketError!socket_t
return rc;
}
const have_sock_flags = comptime !std.Target.current.isDarwin();
const have_sock_flags = comptime !builtin.target.isDarwin();
const filtered_sock_type = if (!have_sock_flags)
socket_type & ~@as(u32, SOCK.NONBLOCK | SOCK.CLOEXEC)
else
@ -3199,7 +3199,7 @@ pub fn accept(
/// description of the `O.CLOEXEC` flag in `open` for reasons why this may be useful.
flags: u32,
) AcceptError!socket_t {
const have_accept4 = comptime !(std.Target.current.isDarwin() or builtin.os.tag == .windows);
const have_accept4 = comptime !(builtin.target.isDarwin() or builtin.os.tag == .windows);
assert(0 == (flags & ~@as(u32, SOCK.NONBLOCK | SOCK.CLOEXEC))); // Unsupported flag(s)
const accepted_sock = while (true) {
@ -4807,7 +4807,7 @@ pub const ClockGetTimeError = error{UnsupportedClock} || UnexpectedError;
/// TODO: change this to return the timespec as a return value
/// TODO: look into making clk_id an enum
pub fn clock_gettime(clk_id: i32, tp: *timespec) ClockGetTimeError!void {
if (std.Target.current.os.tag == .wasi and !builtin.link_libc) {
if (builtin.os.tag == .wasi and !builtin.link_libc) {
var ts: timestamp_t = undefined;
switch (system.clock_time_get(@bitCast(u32, clk_id), 1, &ts)) {
.SUCCESS => {
@ -4821,7 +4821,7 @@ pub fn clock_gettime(clk_id: i32, tp: *timespec) ClockGetTimeError!void {
}
return;
}
if (std.Target.current.os.tag == .windows) {
if (builtin.os.tag == .windows) {
if (clk_id == CLOCK.REALTIME) {
var ft: windows.FILETIME = undefined;
windows.kernel32.GetSystemTimeAsFileTime(&ft);
@ -4848,7 +4848,7 @@ pub fn clock_gettime(clk_id: i32, tp: *timespec) ClockGetTimeError!void {
}
pub fn clock_getres(clk_id: i32, res: *timespec) ClockGetTimeError!void {
if (std.Target.current.os.tag == .wasi and !builtin.link_libc) {
if (builtin.os.tag == .wasi and !builtin.link_libc) {
var ts: timestamp_t = undefined;
switch (system.clock_res_get(@bitCast(u32, clk_id), &ts)) {
.SUCCESS => res.* = .{
@ -5416,19 +5416,19 @@ pub fn sendfile(
// Prevents EOVERFLOW.
const size_t = std.meta.Int(.unsigned, @typeInfo(usize).Int.bits - 1);
const max_count = switch (std.Target.current.os.tag) {
const max_count = switch (builtin.os.tag) {
.linux => 0x7ffff000,
.macos, .ios, .watchos, .tvos => math.maxInt(i32),
else => math.maxInt(size_t),
};
switch (std.Target.current.os.tag) {
switch (builtin.os.tag) {
.linux => sf: {
// sendfile() first appeared in Linux 2.2, glibc 2.1.
const call_sf = comptime if (builtin.link_libc)
std.c.versionCheck(.{ .major = 2, .minor = 1 }).ok
else
std.Target.current.os.version_range.linux.range.max.order(.{ .major = 2, .minor = 2 }) != .lt;
builtin.os.version_range.linux.range.max.order(.{ .major = 2, .minor = 2 }) != .lt;
if (!call_sf) break :sf;
if (headers.len != 0) {
@ -5719,13 +5719,13 @@ var has_copy_file_range_syscall = std.atomic.Atomic(bool).init(true);
///
/// Maximum offsets on Linux are `math.maxInt(i64)`.
pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len: usize, flags: u32) CopyFileRangeError!usize {
const call_cfr = comptime if (std.Target.current.os.tag == .wasi)
const call_cfr = comptime if (builtin.os.tag == .wasi)
// WASI-libc doesn't have copy_file_range.
false
else if (builtin.link_libc)
std.c.versionCheck(.{ .major = 2, .minor = 27, .patch = 0 }).ok
else
std.Target.current.os.isAtLeast(.linux, .{ .major = 4, .minor = 5 }) orelse true;
builtin.os.isAtLeast(.linux, .{ .major = 4, .minor = 5 }) orelse true;
if (call_cfr and has_copy_file_range_syscall.load(.Monotonic)) {
var off_in_copy = @bitCast(i64, off_in);
@ -6179,7 +6179,7 @@ pub fn syncfs(fd: fd_t) SyncError!void {
/// Write all pending file contents and metadata modifications for the specified file descriptor to the underlying filesystem.
pub fn fsync(fd: fd_t) SyncError!void {
if (std.Target.current.os.tag == .windows) {
if (builtin.os.tag == .windows) {
if (windows.kernel32.FlushFileBuffers(fd) != 0)
return;
switch (windows.kernel32.GetLastError()) {
@ -6203,7 +6203,7 @@ pub fn fsync(fd: fd_t) SyncError!void {
/// Write all pending file contents for the specified file descriptor to the underlying filesystem, but not necessarily the metadata.
pub fn fdatasync(fd: fd_t) SyncError!void {
if (std.Target.current.os.tag == .windows) {
if (builtin.os.tag == .windows) {
return fsync(fd) catch |err| switch (err) {
SyncError.AccessDenied => return, // fdatasync doesn't promise that the access time was synced
else => return err,

View File

@ -6,12 +6,13 @@
//! provide `rename` when only the `renameat` syscall exists.
//! * Does not support POSIX thread cancellation.
const std = @import("../std.zig");
const builtin = @import("builtin");
const assert = std.debug.assert;
const maxInt = std.math.maxInt;
const elf = std.elf;
const vdso = @import("linux/vdso.zig");
const dl = @import("../dynamic_library.zig");
const native_arch = std.Target.current.cpu.arch;
const native_arch = builtin.cpu.arch;
const native_endian = native_arch.endian();
const is_mips = native_arch.isMIPS();
const is_ppc = native_arch.isPPC();
@ -21,7 +22,7 @@ const iovec = std.os.iovec;
const iovec_const = std.os.iovec_const;
test {
if (std.Target.current.os.tag == .linux) {
if (builtin.os.tag == .linux) {
_ = @import("linux/test.zig");
}
}
@ -150,10 +151,10 @@ pub fn getauxval(index: usize) usize {
// Some architectures (and some syscalls) require 64bit parameters to be passed
// in a even-aligned register pair.
const require_aligned_register_pair =
std.Target.current.cpu.arch.isPPC() or
std.Target.current.cpu.arch.isMIPS() or
std.Target.current.cpu.arch.isARM() or
std.Target.current.cpu.arch.isThumb();
builtin.cpu.arch.isPPC() or
builtin.cpu.arch.isMIPS() or
builtin.cpu.arch.isARM() or
builtin.cpu.arch.isThumb();
// Split a 64bit value into a {LSB,MSB} pair.
// The LE/BE variants specify the endianness to assume.
@ -1579,7 +1580,7 @@ pub fn process_vm_writev(pid: pid_t, local: [*]const iovec, local_count: usize,
}
pub fn fadvise(fd: fd_t, offset: i64, len: i64, advice: usize) usize {
if (comptime std.Target.current.cpu.arch.isMIPS()) {
if (comptime builtin.cpu.arch.isMIPS()) {
// MIPS requires a 7 argument syscall
const offset_halves = splitValue64(offset);
@ -1595,7 +1596,7 @@ pub fn fadvise(fd: fd_t, offset: i64, len: i64, advice: usize) usize {
length_halves[1],
advice,
);
} else if (comptime std.Target.current.cpu.arch.isARM()) {
} else if (comptime builtin.cpu.arch.isARM()) {
// ARM reorders the arguments
const offset_halves = splitValue64(offset);

View File

@ -1,6 +1,7 @@
const std = @import("../../../std.zig");
const builtin = @import("builtin");
const in_bpf_program = switch (std.builtin.cpu.arch) {
const in_bpf_program = switch (builtin.cpu.arch) {
.bpfel, .bpfeb => true,
else => false,
};

View File

@ -1,6 +1,6 @@
const std = @import("../../std.zig");
const builtin = @import("builtin");
const assert = std.debug.assert;
const builtin = std.builtin;
const mem = std.mem;
const net = std.net;
const os = std.os;

View File

@ -1,6 +1,6 @@
const std = @import("std");
const builtin = @import("builtin");
const elf = std.elf;
const builtin = std.builtin;
const assert = std.debug.assert;
const R_AMD64_RELATIVE = 8;

View File

@ -1,5 +1,5 @@
const std = @import("../../std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const linux = std.os.linux;
const mem = std.mem;
const elf = std.elf;

View File

@ -4,7 +4,7 @@ const mem = std.mem;
const elf = std.elf;
const math = std.math;
const assert = std.debug.assert;
const native_arch = std.Target.current.cpu.arch;
const native_arch = @import("builtin").cpu.arch;
// This file implements the two TLS variants [1] used by ELF-based systems.
//

View File

@ -1,4 +1,5 @@
const std = @import("../../std.zig");
const builtin = @import("builtin");
const assert = std.debug.assert;
const windows = std.os.windows;
@ -29,7 +30,7 @@ const HBRUSH = windows.HBRUSH;
fn selectSymbol(comptime function_static: anytype, function_dynamic: @TypeOf(function_static), comptime os: std.Target.Os.WindowsVersion) @TypeOf(function_static) {
comptime {
const sym_ok = std.Target.current.os.isAtLeast(.windows, os);
const sym_ok = builtin.os.isAtLeast(.windows, os);
if (sym_ok == true) return function_static;
if (sym_ok == null) return function_dynamic;
if (sym_ok == false) @compileError("Target OS range does not support function, at least " ++ @tagName(os) ++ " is required");

View File

@ -1,4 +1,3 @@
const builtin = std.builtin;
const std = @import("std.zig");
const io = std.io;
const math = std.math;

View File

@ -1,5 +1,5 @@
const std = @import("std.zig");
const builtin = std.builtin;
const builtin = @import("builtin");
const os = std.os;
const fs = std.fs;
const BufMap = std.BufMap;
@ -863,9 +863,9 @@ pub fn execve(
if (env_map) |m| {
const envp_buf = try child_process.createNullDelimitedEnvMap(arena, m);
break :m envp_buf.ptr;
} else if (std.builtin.link_libc) {
} else if (builtin.link_libc) {
break :m std.c.environ;
} else if (std.builtin.output_mode == .Exe) {
} else if (builtin.output_mode == .Exe) {
// Then we have Zig start code and this works.
// TODO type-safety for null-termination of `os.environ`.
break :m @ptrCast([*:null]?[*:0]u8, os.environ.ptr);

View File

@ -7,7 +7,6 @@
//! TODO(tiehuis): Benchmark these against other reference implementations.
const std = @import("std.zig");
const builtin = std.builtin;
const assert = std.debug.assert;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;

View File

@ -1,14 +1,15 @@
// Implements ZIGNOR [1].
//
// [1]: Jurgen A. Doornik (2005). [*An Improved Ziggurat Method to Generate Normal Random Samples*]
// (https://www.doornik.com/research/ziggurat.pdf). Nuffield College, Oxford.
//
// rust/rand used as a reference;
//
// NOTE: This seems interesting but reference code is a bit hard to grok:
// https://sbarral.github.io/etf.
//! Implements ZIGNOR [1].
//!
//! [1]: Jurgen A. Doornik (2005). [*An Improved Ziggurat Method to Generate Normal Random Samples*]
//! (https://www.doornik.com/research/ziggurat.pdf). Nuffield College, Oxford.
//!
//! rust/rand used as a reference;
//!
//! NOTE: This seems interesting but reference code is a bit hard to grok:
//! https://sbarral.github.io/etf.
const std = @import("../std.zig");
const builtin = @import("builtin");
const math = std.math;
const Random = std.rand.Random;
@ -126,7 +127,7 @@ fn norm_zero_case(random: *Random, u: f64) f64 {
}
}
const please_windows_dont_oom = std.Target.current.os.tag == .windows;
const please_windows_dont_oom = builtin.os.tag == .windows;
test "normal dist sanity" {
if (please_windows_dont_oom) return error.SkipZigTest;

View File

@ -3,7 +3,6 @@ const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
const math = std.math;
const builtin = std.builtin;
pub fn binarySearch(
comptime T: type,

View File

@ -1,9 +1,9 @@
const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
const os_tag = std.Target.current.os.tag;
const os_tag = builtin.os.tag;
const arch = builtin.stage2_arch;
const abi = std.Target.current.abi;
const abi = builtin.abi;
const is_gnu = abi.isGnu();
const is_mingw = os_tag == .windows and is_gnu;

View File

@ -1,8 +1,8 @@
const std = @import("std");
const builtin = std.builtin;
const arch = std.Target.current.cpu.arch;
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const linkage: builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
// This parameter is true iff the target architecture supports the bare minimum
// to implement the atomic load/store intrinsics.
@ -16,7 +16,7 @@ const supports_atomic_ops = switch (arch) {
// operations (unless we're targeting Linux, the kernel provides a way to
// perform CAS operations).
// XXX: The Linux code path is not implemented yet.
!std.Target.arm.featureSetHas(std.Target.current.cpu.features, .has_v6m),
!std.Target.arm.featureSetHas(builtin.cpu.features, .has_v6m),
else => true,
};
@ -257,7 +257,7 @@ comptime {
}
}
fn fetchFn(comptime T: type, comptime op: builtin.AtomicRmwOp) fn (*T, T, i32) callconv(.C) T {
fn fetchFn(comptime T: type, comptime op: std.builtin.AtomicRmwOp) fn (*T, T, i32) callconv(.C) T {
return struct {
pub fn fetch_op_N(ptr: *T, val: T, model: i32) callconv(.C) T {
_ = model;

View File

@ -1,6 +1,7 @@
const std = @import("std");
const arch = std.builtin.cpu.arch;
const os = std.builtin.os.tag;
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
const os = builtin.os.tag;
// Ported from llvm-project d32170dbd5b0d54436537b6b75beaf44324e0c28
@ -156,7 +157,7 @@ pub fn clear_cache(start: usize, end: usize) callconv(.C) void {
}
}
const linkage = if (std.builtin.is_test) std.builtin.GlobalLinkage.Internal else std.builtin.GlobalLinkage.Weak;
const linkage = if (builtin.is_test) std.builtin.GlobalLinkage.Internal else std.builtin.GlobalLinkage.Weak;
fn exportIt() void {
@export(clear_cache, .{ .name = "__clear_cache", .linkage = linkage });

View File

@ -1,5 +1,5 @@
const std = @import("std");
const builtin = std.builtin;
const builtin = @import("builtin");
// clz - count leading zeroes
// - clzXi2_generic for little endian
@ -118,18 +118,18 @@ fn __clzsi2_arm32() callconv(.Naked) void {
}
pub const __clzsi2 = impl: {
switch (std.Target.current.cpu.arch) {
switch (builtin.cpu.arch) {
.arm, .armeb, .thumb, .thumbeb => {
const use_thumb1 =
(std.Target.current.cpu.arch.isThumb() or
std.Target.arm.featureSetHas(std.Target.current.cpu.features, .noarm)) and
!std.Target.arm.featureSetHas(std.Target.current.cpu.features, .thumb2);
(builtin.cpu.arch.isThumb() or
std.Target.arm.featureSetHas(builtin.cpu.features, .noarm)) and
!std.Target.arm.featureSetHas(builtin.cpu.features, .thumb2);
if (use_thumb1) {
break :impl __clzsi2_thumb1;
}
// From here on we're either targeting Thumb2 or ARM.
else if (!std.Target.current.cpu.arch.isThumb()) {
else if (!builtin.cpu.arch.isThumb()) {
break :impl __clzsi2_arm32;
}
// Use the generic implementation otherwise.
@ -140,14 +140,14 @@ pub const __clzsi2 = impl: {
};
pub const __clzdi2 = impl: {
switch (std.Target.current.cpu.arch) {
switch (builtin.cpu.arch) {
// TODO architecture optimised versions
else => break :impl clzXi2_generic(i64),
}
};
pub const __clzti2 = impl: {
switch (std.Target.current.cpu.arch) {
switch (builtin.cpu.arch) {
// TODO architecture optimised versions
else => break :impl clzXi2_generic(i128),
}

View File

@ -5,6 +5,7 @@
//
const std = @import("std");
const builtin = @import("builtin");
const abort = std.os.abort;
const assert = std.debug.assert;
@ -15,7 +16,7 @@ const expect = std.testing.expect;
const gcc_word = usize;
comptime {
assert(std.builtin.link_libc);
assert(builtin.link_libc);
}
/// public entrypoint for generated code using EmulatedTLS

View File

@ -1,6 +1,7 @@
const std = @import("std");
const is_test = std.builtin.is_test;
const native_endian = std.Target.current.cpu.arch.endian();
const builtin = @import("builtin");
const is_test = builtin.is_test;
const native_endian = builtin.cpu.arch.endian();
// Ported from
// https://github.com/llvm/llvm-project/blob/llvmorg-9.0.0/compiler-rt/lib/builtins/muldi3.c

View File

@ -1,7 +1,8 @@
const compiler_rt = @import("../compiler_rt.zig");
const std = @import("std");
const is_test = std.builtin.is_test;
const native_endian = std.Target.current.cpu.arch.endian();
const builtin = @import("builtin");
const is_test = builtin.is_test;
const native_endian = builtin.cpu.arch.endian();
// Ported from git@github.com:llvm-project/llvm-project-20170507.git
// ae684fad6d34858c014c94da69c15e7774a633c3

View File

@ -1,6 +1,6 @@
const std = @import("std");
const Log2Int = std.math.Log2Int;
const native_endian = std.Target.current.cpu.arch.endian();
const native_endian = @import("builtin").cpu.arch.endian();
fn Dwords(comptime T: type, comptime signed_half: bool) type {
return extern union {

View File

@ -1,4 +1,4 @@
const native_arch = @import("std").Target.current.cpu.arch;
const native_arch = @import("builtin").cpu.arch;
// Zig's own stack-probe routine (available only on x86 and x86_64)
pub fn zig_probe_stack() callconv(.Naked) void {

View File

@ -1,6 +1,6 @@
const builtin = @import("builtin");
const is_test = builtin.is_test;
const native_endian = @import("std").Target.current.cpu.arch.endian();
const native_endian = builtin.cpu.arch.endian();
const low = switch (native_endian) {
.Big => 1,

Some files were not shown because too many files have changed in this diff Show More