Merge pull request #13101 from alichraghi/o4

This commit is contained in:
Andrew Kelley 2022-11-05 02:34:24 -04:00 committed by GitHub
commit 1d68045919
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
103 changed files with 204 additions and 204 deletions

View File

@ -5126,7 +5126,7 @@ const builtin = @import("builtin");
const native_arch = builtin.cpu.arch;
const expect = std.testing.expect;
const WINAPI: std.builtin.CallingConvention = if (native_arch == .i386) .Stdcall else .C;
const WINAPI: std.builtin.CallingConvention = if (native_arch == .x86) .Stdcall else .C;
extern "kernel32" fn ExitProcess(exit_code: c_uint) callconv(WINAPI) noreturn;
test "foo" {
@ -5165,7 +5165,7 @@ export fn sub(a: i8, b: i8) i8 { return a - b; }
// at link time, when linking statically, or at runtime, when linking
// dynamically.
// The callconv specifier changes the calling convention of the function.
const WINAPI: std.builtin.CallingConvention = if (native_arch == .i386) .Stdcall else .C;
const WINAPI: std.builtin.CallingConvention = if (native_arch == .x86) .Stdcall else .C;
extern "kernel32" fn ExitProcess(exit_code: u32) callconv(WINAPI) noreturn;
extern "c" fn atan2(a: f64, b: f64) f64;
@ -7487,7 +7487,7 @@ volatile (
: "rcx", "r11"
);{#end_syntax_block#}
<p>
For i386 and x86_64 targets, the syntax is AT&amp;T syntax, rather than the more
For x86 and x86_64 targets, the syntax is AT&amp;T syntax, rather than the more
popular Intel syntax. This is due to technical constraints; assembly parsing is
provided by LLVM and its support for Intel syntax is buggy and not well tested.
</p>
@ -11608,7 +11608,7 @@ Architectures:
v5
v5te
v4t
i386
x86
x86_64 (native)
xcore
nvptx
@ -11687,8 +11687,8 @@ Available libcs:
arm-linux-gnueabihf
arm-linux-musleabi
arm-linux-musleabihf
i386-linux-gnu
i386-linux-musl
x86-linux-gnu
x86-linux-musl
mips64el-linux-gnuabi64
mips64el-linux-gnuabin32
mips64el-linux-musl

View File

@ -190,7 +190,7 @@ test "strncmp" {
// across .o file boundaries. fix comptime @ptrCast of nakedcc functions.
fn clone() callconv(.Naked) void {
switch (native_arch) {
.i386 => {
.x86 => {
// __clone(func, stack, flags, arg, ptid, tls, ctid)
// +8, +12, +16, +20, +24, +28, +32
// syscall(SYS_clone, flags, stack, ptid, tls, ctid)

View File

@ -7,7 +7,7 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
if (arch == .i386 and abi == .msvc) {
if (arch == .x86 and abi == .msvc) {
// Don't let LLVM apply the stdcall name mangling on those MSVC builtins
@export(_alldiv, .{ .name = "\x01__alldiv", .linkage = common.linkage });
@export(_aulldiv, .{ .name = "\x01__aulldiv", .linkage = common.linkage });

View File

@ -7,7 +7,7 @@ const common = @import("common.zig");
pub const panic = common.panic;
comptime {
if (arch == .i386 and abi == .msvc) {
if (arch == .x86 and abi == .msvc) {
// Don't let LLVM apply the stdcall name mangling on those MSVC builtins
@export(_allrem, .{ .name = "\x01__allrem", .linkage = common.linkage });
@export(_aullrem, .{ .name = "\x01__aullrem", .linkage = common.linkage });

View File

@ -17,7 +17,7 @@ comptime {
fn clear_cache(start: usize, end: usize) callconv(.C) void {
const x86 = switch (arch) {
.i386, .x86_64 => true,
.x86, .x86_64 => true,
else => false,
};
const arm32 = switch (arch) {

View File

@ -48,7 +48,7 @@ pub const gnu_f16_abi = switch (builtin.cpu.arch) {
.x86_64,
=> false,
.i386 => true,
.x86 => true,
.arm, .armeb, .thumb, .thumbeb => switch (builtin.abi) {
.eabi, .eabihf => false,
@ -79,7 +79,7 @@ pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace, _: ?
pub const F16T = switch (builtin.cpu.arch) {
.aarch64, .aarch64_be, .aarch64_32 => f16,
.riscv64 => if (builtin.zig_backend == .stage1) u16 else f16,
.i386, .x86_64 => f16,
.x86, .x86_64 => f16,
else => u16,
};

View File

@ -9,7 +9,7 @@ pub const panic = common.panic;
comptime {
if (builtin.os.tag == .windows) {
switch (arch) {
.i386 => {
.x86 => {
@export(__divti3, .{ .name = "__divti3", .linkage = common.linkage });
},
.x86_64 => {

View File

@ -140,7 +140,7 @@ test "extendhfsf2" {
try test__extendhfsf2(0x7f00, 0x7fe00000); // sNaN
// On x86 the NaN becomes quiet because the return is pushed on the x87
// stack due to ABI requirements
if (builtin.target.cpu.arch != .i386 and builtin.target.os.tag == .windows)
if (builtin.target.cpu.arch != .x86 and builtin.target.os.tag == .windows)
try test__extendhfsf2(0x7c01, 0x7f802000); // sNaN
try test__extendhfsf2(0, 0); // 0

View File

@ -30,7 +30,7 @@ comptime {
}
switch (arch) {
.i386,
.x86,
.x86_64,
=> {
@export(zig_probe_stack, .{ .name = "__zig_probe_stack", .linkage = linkage });
@ -69,7 +69,7 @@ pub fn zig_probe_stack() callconv(.Naked) void {
\\ ret
);
},
.i386 => {
.x86 => {
// %eax = probe length, %esp = stack pointer
asm volatile (
\\ push %%ecx
@ -121,7 +121,7 @@ fn win_probe_stack_only() void {
\\ ret
);
},
.i386 => {
.x86 => {
asm volatile (
\\ push %%ecx
\\ push %%eax
@ -191,7 +191,7 @@ fn win_probe_stack_adjust_sp() void {
\\ ret
);
},
.i386 => {
.x86 => {
asm volatile (
\\ push %%ecx
\\ cmp $0x1000,%%eax
@ -243,7 +243,7 @@ pub fn __chkstk() callconv(.Naked) void {
if (comptime arch.isAARCH64()) {
@call(.{ .modifier = .always_inline }, win_probe_stack_only, .{});
} else switch (arch) {
.i386 => @call(.{ .modifier = .always_inline }, win_probe_stack_adjust_sp, .{}),
.x86 => @call(.{ .modifier = .always_inline }, win_probe_stack_adjust_sp, .{}),
.x86_64 => @call(.{ .modifier = .always_inline }, win_probe_stack_only, .{}),
else => unreachable,
}

Binary file not shown.

View File

@ -35,7 +35,7 @@ export fn __chk_fail() callconv(.C) noreturn {
@panic("buffer overflow detected");
}
// Emitted when targeting some architectures (eg. i386)
// Emitted when targeting some architectures (eg. x86)
// XXX: This symbol should be hidden
export fn __stack_chk_fail_local() callconv(.C) noreturn {
__stack_chk_fail();

View File

@ -753,7 +753,7 @@ const LinuxThreadImpl = struct {
/// https://github.com/ifduyue/musl/search?q=__unmapself
fn freeAndExit(self: *ThreadCompletion) noreturn {
switch (target.cpu.arch) {
.i386 => asm volatile (
.x86 => asm volatile (
\\ movl $91, %%eax
\\ movl %[ptr], %%ebx
\\ movl %[len], %%ecx
@ -959,10 +959,10 @@ const LinuxThreadImpl = struct {
else => |e| return e,
};
// Prepare the TLS segment and prepare a user_desc struct when needed on i386
// Prepare the TLS segment and prepare a user_desc struct when needed on x86
var tls_ptr = os.linux.tls.prepareTLS(mapped[tls_offset..]);
var user_desc: if (target.cpu.arch == .i386) os.linux.user_desc else void = undefined;
if (target.cpu.arch == .i386) {
var user_desc: if (target.cpu.arch == .x86) os.linux.user_desc else void = undefined;
if (target.cpu.arch == .x86) {
defer tls_ptr = @ptrToInt(&user_desc);
user_desc = .{
.entry_number = os.linux.tls.tls_image.gdt_entry_number,

View File

@ -44,7 +44,7 @@ pub inline fn spinLoopHint() void {
// No-op instruction that can hint to save (or share with a hardware-thread)
// pipelining/power resources
// https://software.intel.com/content/www/us/en/develop/articles/benefitting-power-and-performance-sleep-loops.html
.i386, .x86_64 => asm volatile ("pause" ::: "memory"),
.x86, .x86_64 => asm volatile ("pause" ::: "memory"),
// No-op instruction that serves as a hardware-thread resource yield hint.
// https://stackoverflow.com/a/7588941

View File

@ -2978,12 +2978,12 @@ pub const LibExeObjStep = struct {
if (glibc_dir_arg) |dir| {
// TODO look into making this a call to `linuxTriple`. This
// needs the directory to be called "i686" rather than
// "i386" which is why we do it manually here.
// "x86" which is why we do it manually here.
const fmt_str = "{s}" ++ fs.path.sep_str ++ "{s}-{s}-{s}";
const cpu_arch = self.target.getCpuArch();
const os_tag = self.target.getOsTag();
const abi = self.target.getAbi();
const cpu_arch_name: []const u8 = if (cpu_arch == .i386)
const cpu_arch_name: []const u8 = if (cpu_arch == .x86)
"i686"
else
@tagName(cpu_arch);

View File

@ -95,12 +95,12 @@ fn make(step: *Step) !void {
if (glibc_dir_arg) |dir| {
// TODO look into making this a call to `linuxTriple`. This
// needs the directory to be called "i686" rather than
// "i386" which is why we do it manually here.
// "x86" which is why we do it manually here.
const fmt_str = "{s}" ++ fs.path.sep_str ++ "{s}-{s}-{s}";
const cpu_arch = self.exe.target.getCpuArch();
const os_tag = self.exe.target.getOsTag();
const abi = self.exe.target.getAbi();
const cpu_arch_name: []const u8 = if (cpu_arch == .i386)
const cpu_arch_name: []const u8 = if (cpu_arch == .x86)
"i686"
else
@tagName(cpu_arch);

View File

@ -1444,7 +1444,7 @@ pub const E = enum(u16) {
};
pub const MINSIGSTKSZ = switch (builtin.cpu.arch) {
.i386, .x86_64 => 2048,
.x86, .x86_64 => 2048,
.arm, .aarch64 => 4096,
else => @compileError("MINSIGSTKSZ not defined for this architecture"),
};

View File

@ -80,7 +80,7 @@ pub const pthread_cond_t = extern struct {
pub const pthread_rwlock_t = extern struct {
magic: c_uint = 0x99990009,
interlock: switch (builtin.cpu.arch) {
.aarch64, .sparc, .x86_64, .i386 => u8,
.aarch64, .sparc, .x86_64, .x86 => u8,
.arm, .powerpc => c_int,
else => unreachable,
} = 0,
@ -97,7 +97,7 @@ const pthread_spin_t = switch (builtin.cpu.arch) {
.aarch64, .aarch64_be, .aarch64_32 => u8,
.mips, .mipsel, .mips64, .mips64el => u32,
.powerpc, .powerpc64, .powerpc64le => i32,
.i386, .x86_64 => u8,
.x86, .x86_64 => u8,
.arm, .armeb, .thumb, .thumbeb => i32,
.sparc, .sparcel, .sparc64 => u8,
.riscv32, .riscv64 => u32,
@ -105,7 +105,7 @@ const pthread_spin_t = switch (builtin.cpu.arch) {
};
const padded_pthread_spin_t = switch (builtin.cpu.arch) {
.i386, .x86_64 => u32,
.x86, .x86_64 => u32,
.sparc, .sparcel, .sparc64 => u32,
else => pthread_spin_t,
};
@ -1067,7 +1067,7 @@ pub const ucontext_t = extern struct {
mcontext: mcontext_t,
__pad: [
switch (builtin.cpu.arch) {
.i386 => 4,
.x86 => 4,
.mips, .mipsel, .mips64, .mips64el => 14,
.arm, .armeb, .thumb, .thumbeb => 1,
.sparc, .sparcel, .sparc64 => if (@sizeOf(usize) == 4) 43 else 8,

View File

@ -1247,7 +1247,7 @@ pub const E = enum(u16) {
};
const _MAX_PAGE_SHIFT = switch (builtin.cpu.arch) {
.i386 => 12,
.x86 => 12,
.sparc64 => 13,
};
pub const MINSIGSTKSZ = 1 << _MAX_PAGE_SHIFT;

View File

@ -1025,7 +1025,7 @@ pub const MachineType = enum(u16) {
.powerpc => .POWERPC,
.riscv32 => .RISCV32,
.thumb => .Thumb,
.i386 => .I386,
.x86 => .I386,
.aarch64 => .ARM64,
.riscv64 => .RISCV64,
.x86_64 => .X64,
@ -1040,7 +1040,7 @@ pub const MachineType = enum(u16) {
.POWERPC => .powerpc,
.RISCV32 => .riscv32,
.Thumb => .thumb,
.I386 => .i386,
.I386 => .x86,
.ARM64 => .aarch64,
.RISCV64 => .riscv64,
.X64 => .x86_64,

View File

@ -182,7 +182,7 @@ pub fn dumpStackTraceFromBase(bp: usize, ip: usize) void {
// therefore, we do a check for `return_address == 0` before subtracting 1 from it to avoid
// an overflow. We do not need to signal `StackIterator` as it will correctly detect this
// condition on the subsequent iteration and return `null` thus terminating the loop.
// same behaviour for i386-windows-msvc
// same behaviour for x86-windows-msvc
const address = if (return_address == 0) return_address else return_address - 1;
printSourceAtAddress(debug_info, stderr, address, tty_config) catch return;
}
@ -568,7 +568,7 @@ pub fn writeCurrentStackTrace(
// therefore, we do a check for `return_address == 0` before subtracting 1 from it to avoid
// an overflow. We do not need to signal `StackIterator` as it will correctly detect this
// condition on the subsequent iteration and return `null` thus terminating the loop.
// same behaviour for i386-windows-msvc
// same behaviour for x86-windows-msvc
const address = if (return_address == 0) return_address else return_address - 1;
try printSourceAtAddress(debug_info, out_stream, address, tty_config);
}
@ -1922,7 +1922,7 @@ fn handleSegfaultPosix(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const any
}
switch (native_arch) {
.i386 => {
.x86 => {
const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
const ip = @intCast(usize, ctx.mcontext.gregs[os.REG.EIP]);
const bp = @intCast(usize, ctx.mcontext.gregs[os.REG.EBP]);

View File

@ -1502,7 +1502,7 @@ pub const EM = enum(u16) {
.MIPS_RS3_LE => .mipsel,
.PPC => .powerpc,
.SPARC => .sparc,
.@"386" => .i386,
.@"386" => .x86,
.XCORE => .xcore,
.CSR_KALIMBA => .kalimba,
.LANAI => .lanai,

View File

@ -1201,7 +1201,7 @@ pub const MH_DEAD_STRIPPABLE_DYLIB = 0x400000;
/// Contains a section of type S_THREAD_LOCAL_VARIABLES
pub const MH_HAS_TLV_DESCRIPTORS = 0x800000;
/// When this bit is set, the OS will run the main executable with a non-executable heap even on platforms (e.g. i386) that don't require it. Only used in MH_EXECUTE filetypes.
/// When this bit is set, the OS will run the main executable with a non-executable heap even on platforms (e.g. x86) that don't require it. Only used in MH_EXECUTE filetypes.
pub const MH_NO_HEAP_EXECUTION = 0x1000000;
/// The code was linked for use in an application extension.
@ -1444,7 +1444,7 @@ pub const S_ATTR_NO_DEAD_STRIP = 0x10000000;
/// blocks are live if they reference live blocks
pub const S_ATTR_LIVE_SUPPORT = 0x8000000;
/// used with i386 code stubs written on by dyld
/// used with x86 code stubs written on by dyld
pub const S_ATTR_SELF_MODIFYING_CODE = 0x4000000;
/// section contains some machine instructions

View File

@ -33,7 +33,7 @@ const syscall_bits = switch (native_arch) {
};
const arch_bits = switch (native_arch) {
.i386 => @import("linux/i386.zig"),
.x86 => @import("linux/x86.zig"),
.x86_64 => @import("linux/x86_64.zig"),
.aarch64 => @import("linux/arm64.zig"),
.arm, .thumb => @import("linux/arm-eabi.zig"),
@ -94,7 +94,7 @@ pub const SECCOMP = @import("linux/seccomp.zig");
pub const syscalls = @import("linux/syscalls.zig");
pub const SYS = switch (@import("builtin").cpu.arch) {
.i386 => syscalls.X86,
.x86 => syscalls.X86,
.x86_64 => syscalls.X64,
.aarch64 => syscalls.Arm64,
.arm, .thumb => syscalls.Arm,
@ -1198,42 +1198,42 @@ pub fn sigismember(set: *const sigset_t, sig: u6) bool {
}
pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
if (native_arch == .i386) {
if (native_arch == .x86) {
return socketcall(SC.getsockname, &[3]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), @ptrToInt(len) });
}
return syscall3(.getsockname, @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), @ptrToInt(len));
}
pub fn getpeername(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
if (native_arch == .i386) {
if (native_arch == .x86) {
return socketcall(SC.getpeername, &[3]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), @ptrToInt(len) });
}
return syscall3(.getpeername, @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), @ptrToInt(len));
}
pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize {
if (native_arch == .i386) {
if (native_arch == .x86) {
return socketcall(SC.socket, &[3]usize{ domain, socket_type, protocol });
}
return syscall3(.socket, domain, socket_type, protocol);
}
pub fn setsockopt(fd: i32, level: u32, optname: u32, optval: [*]const u8, optlen: socklen_t) usize {
if (native_arch == .i386) {
if (native_arch == .x86) {
return socketcall(SC.setsockopt, &[5]usize{ @bitCast(usize, @as(isize, fd)), level, optname, @ptrToInt(optval), @intCast(usize, optlen) });
}
return syscall5(.setsockopt, @bitCast(usize, @as(isize, fd)), level, optname, @ptrToInt(optval), @intCast(usize, optlen));
}
pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: [*]u8, noalias optlen: *socklen_t) usize {
if (native_arch == .i386) {
if (native_arch == .x86) {
return socketcall(SC.getsockopt, &[5]usize{ @bitCast(usize, @as(isize, fd)), level, optname, @ptrToInt(optval), @ptrToInt(optlen) });
}
return syscall5(.getsockopt, @bitCast(usize, @as(isize, fd)), level, optname, @ptrToInt(optval), @ptrToInt(optlen));
}
pub fn sendmsg(fd: i32, msg: *const std.x.os.Socket.Message, flags: c_int) usize {
if (native_arch == .i386) {
if (native_arch == .x86) {
return socketcall(SC.sendmsg, &[3]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(msg), @bitCast(usize, @as(isize, flags)) });
}
return syscall3(.sendmsg, @bitCast(usize, @as(isize, fd)), @ptrToInt(msg), @bitCast(usize, @as(isize, flags)));
@ -1280,49 +1280,49 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize
}
pub fn connect(fd: i32, addr: *const anyopaque, len: socklen_t) usize {
if (native_arch == .i386) {
if (native_arch == .x86) {
return socketcall(SC.connect, &[3]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), len });
}
return syscall3(.connect, @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), len);
}
pub fn recvmsg(fd: i32, msg: *std.x.os.Socket.Message, flags: c_int) usize {
if (native_arch == .i386) {
if (native_arch == .x86) {
return socketcall(SC.recvmsg, &[3]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(msg), @bitCast(usize, @as(isize, flags)) });
}
return syscall3(.recvmsg, @bitCast(usize, @as(isize, fd)), @ptrToInt(msg), @bitCast(usize, @as(isize, flags)));
}
pub fn recvfrom(fd: i32, noalias buf: [*]u8, len: usize, flags: u32, noalias addr: ?*sockaddr, noalias alen: ?*socklen_t) usize {
if (native_arch == .i386) {
if (native_arch == .x86) {
return socketcall(SC.recvfrom, &[6]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(buf), len, flags, @ptrToInt(addr), @ptrToInt(alen) });
}
return syscall6(.recvfrom, @bitCast(usize, @as(isize, fd)), @ptrToInt(buf), len, flags, @ptrToInt(addr), @ptrToInt(alen));
}
pub fn shutdown(fd: i32, how: i32) usize {
if (native_arch == .i386) {
if (native_arch == .x86) {
return socketcall(SC.shutdown, &[2]usize{ @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, how)) });
}
return syscall2(.shutdown, @bitCast(usize, @as(isize, fd)), @bitCast(usize, @as(isize, how)));
}
pub fn bind(fd: i32, addr: *const sockaddr, len: socklen_t) usize {
if (native_arch == .i386) {
if (native_arch == .x86) {
return socketcall(SC.bind, &[3]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), @intCast(usize, len) });
}
return syscall3(.bind, @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), @intCast(usize, len));
}
pub fn listen(fd: i32, backlog: u32) usize {
if (native_arch == .i386) {
if (native_arch == .x86) {
return socketcall(SC.listen, &[2]usize{ @bitCast(usize, @as(isize, fd)), backlog });
}
return syscall2(.listen, @bitCast(usize, @as(isize, fd)), backlog);
}
pub fn sendto(fd: i32, buf: [*]const u8, len: usize, flags: u32, addr: ?*const sockaddr, alen: socklen_t) usize {
if (native_arch == .i386) {
if (native_arch == .x86) {
return socketcall(SC.sendto, &[6]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(buf), len, flags, @ptrToInt(addr), @intCast(usize, alen) });
}
return syscall6(.sendto, @bitCast(usize, @as(isize, fd)), @ptrToInt(buf), len, flags, @ptrToInt(addr), @intCast(usize, alen));
@ -1349,21 +1349,21 @@ pub fn sendfile(outfd: i32, infd: i32, offset: ?*i64, count: usize) usize {
}
pub fn socketpair(domain: i32, socket_type: i32, protocol: i32, fd: *[2]i32) usize {
if (native_arch == .i386) {
if (native_arch == .x86) {
return socketcall(SC.socketpair, &[4]usize{ @intCast(usize, domain), @intCast(usize, socket_type), @intCast(usize, protocol), @ptrToInt(fd) });
}
return syscall4(.socketpair, @intCast(usize, domain), @intCast(usize, socket_type), @intCast(usize, protocol), @ptrToInt(fd));
}
pub fn accept(fd: i32, noalias addr: ?*sockaddr, noalias len: ?*socklen_t) usize {
if (native_arch == .i386) {
if (native_arch == .x86) {
return socketcall(SC.accept, &[4]usize{ fd, addr, len, 0 });
}
return accept4(fd, addr, len, 0);
}
pub fn accept4(fd: i32, noalias addr: ?*sockaddr, noalias len: ?*socklen_t, flags: u32) usize {
if (native_arch == .i386) {
if (native_arch == .x86) {
return socketcall(SC.accept4, &[4]usize{ @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), @ptrToInt(len), flags });
}
return syscall4(.accept4, @bitCast(usize, @as(isize, fd)), @ptrToInt(addr), @ptrToInt(len), flags);
@ -3459,12 +3459,12 @@ pub fn CPU_COUNT(set: cpu_set_t) cpu_count_t {
}
pub const MINSIGSTKSZ = switch (native_arch) {
.i386, .x86_64, .arm, .mipsel => 2048,
.x86, .x86_64, .arm, .mipsel => 2048,
.aarch64 => 5120,
else => @compileError("MINSIGSTKSZ not defined for this architecture"),
};
pub const SIGSTKSZ = switch (native_arch) {
.i386, .x86_64, .arm, .mipsel => 8192,
.x86, .x86_64, .arm, .mipsel => 8192,
.aarch64 => 16384,
else => @compileError("SIGSTKSZ not defined for this architecture"),
};
@ -5631,7 +5631,7 @@ pub const AUDIT = struct {
const LE = 0x40000000;
pub const current: AUDIT.ARCH = switch (native_arch) {
.i386 => .I386,
.x86 => .X86,
.x86_64 => .X86_64,
.aarch64 => .AARCH64,
.arm, .thumb => .ARM,
@ -5650,7 +5650,7 @@ pub const AUDIT = struct {
ARMEB = toAudit(.armeb),
CSKY = toAudit(.csky),
HEXAGON = @enumToInt(std.elf.EM.HEXAGON),
I386 = toAudit(.i386),
X86 = toAudit(.x86),
M68K = toAudit(.m68k),
MIPS = toAudit(.mips),
MIPSEL = toAudit(.mips) | LE,

View File

@ -11,7 +11,7 @@ const R_RISCV_RELATIVE = 3;
const R_SPARC_RELATIVE = 22;
const R_RELATIVE = switch (builtin.cpu.arch) {
.i386 => R_386_RELATIVE,
.x86 => R_386_RELATIVE,
.x86_64 => R_AMD64_RELATIVE,
.arm => R_ARM_RELATIVE,
.aarch64 => R_AARCH64_RELATIVE,
@ -24,7 +24,7 @@ const R_RELATIVE = switch (builtin.cpu.arch) {
// relocation that, at this point, is not yet applied.
fn getDynamicSymbol() [*]elf.Dyn {
return switch (builtin.cpu.arch) {
.i386 => asm volatile (
.x86 => asm volatile (
\\ .weak _DYNAMIC
\\ .hidden _DYNAMIC
\\ call 1f

View File

@ -30,7 +30,7 @@ const native_arch = @import("builtin").cpu.arch;
// `-- The thread pointer register points here
//
// The structure of the TCB is not defined by the ABI so we reserve enough space
// for a single pointer as some architectures such as i386 and x86_64 need a
// for a single pointer as some architectures such as x86 and x86_64 need a
// pointer to the TCB block itself at the address pointed by the tp.
//
// In this case the control structure and DTV are placed one after another right
@ -49,7 +49,7 @@ const TLSVariant = enum {
const tls_variant = switch (native_arch) {
.arm, .armeb, .thumb, .aarch64, .aarch64_be, .riscv32, .riscv64, .mips, .mipsel, .powerpc, .powerpc64, .powerpc64le => TLSVariant.VariantI,
.x86_64, .i386, .sparc64 => TLSVariant.VariantII,
.x86_64, .x86, .sparc64 => TLSVariant.VariantII,
else => @compileError("undefined tls_variant for this architecture"),
};
@ -102,7 +102,7 @@ const TLSImage = struct {
dtv_offset: usize,
data_offset: usize,
data_size: usize,
// Only used on the i386 architecture
// Only used on the x86 architecture
gdt_entry_number: usize,
};
@ -110,7 +110,7 @@ pub var tls_image: TLSImage = undefined;
pub fn setThreadPointer(addr: usize) void {
switch (native_arch) {
.i386 => {
.x86 => {
var user_desc = std.os.linux.user_desc{
.entry_number = tls_image.gdt_entry_number,
.base_addr = addr,

View File

@ -746,7 +746,7 @@ test "sigaction" {
return error.SkipZigTest;
// https://github.com/ziglang/zig/issues/7427
if (native_os == .linux and builtin.target.cpu.arch == .i386)
if (native_os == .linux and builtin.target.cpu.arch == .x86)
return error.SkipZigTest;
const S = struct {

View File

@ -1750,7 +1750,7 @@ pub fn UnlockFile(
pub fn teb() *TEB {
return switch (native_arch) {
.i386 => asm volatile (
.x86 => asm volatile (
\\ movl %%fs:0x18, %[ptr]
: [ptr] "=r" (-> *TEB),
),
@ -2053,7 +2053,7 @@ pub const STD_OUTPUT_HANDLE = maxInt(DWORD) - 11 + 1;
/// The standard error device. Initially, this is the active console screen buffer, CONOUT$.
pub const STD_ERROR_HANDLE = maxInt(DWORD) - 12 + 1;
pub const WINAPI: std.builtin.CallingConvention = if (native_arch == .i386)
pub const WINAPI: std.builtin.CallingConvention = if (native_arch == .x86)
.Stdcall
else
.C;
@ -3019,7 +3019,7 @@ pub const EXCEPTION_RECORD = extern struct {
};
pub usingnamespace switch (native_arch) {
.i386 => struct {
.x86 => struct {
pub const FLOATING_SAVE_AREA = extern struct {
ControlWord: DWORD,
StatusWord: DWORD,

View File

@ -275,7 +275,7 @@ fn _start() callconv(.Naked) noreturn {
\\ andq $-16, %%rsp
\\ call _posixCallMainAndExit
),
.i386 => asm volatile (
.x86 => asm volatile (
\\ xorl %%ebp, %%ebp
\\ movl %%esp, argc_argv_ptr
\\ andl $-16, %%esp
@ -307,7 +307,7 @@ fn _start() callconv(.Naked) noreturn {
: [argc] "={rsp}" (-> [*]usize),
);
},
.i386 => {
.x86 => {
argc_argv_ptr = asm volatile (
\\ xor %%ebp, %%ebp
: [argc] "={esp}" (-> [*]usize),

View File

@ -8,7 +8,7 @@ export var __xl_a: std.os.windows.PIMAGE_TLS_CALLBACK linksection(".CRT$XLA") =
export var __xl_z: std.os.windows.PIMAGE_TLS_CALLBACK linksection(".CRT$XLZ") = null;
comptime {
if (builtin.target.cpu.arch == .i386) {
if (builtin.target.cpu.arch == .x86) {
// The __tls_array is the offset of the ThreadLocalStoragePointer field
// in the TEB block whose base address held in the %fs segment.
asm (

View File

@ -850,7 +850,7 @@ pub const Target = struct {
tcele,
thumb,
thumbeb,
i386,
x86,
x86_64,
xcore,
nvptx,
@ -879,7 +879,7 @@ pub const Target = struct {
pub fn isX86(arch: Arch) bool {
return switch (arch) {
.i386, .x86_64 => true,
.x86, .x86_64 => true,
else => false,
};
}
@ -999,7 +999,7 @@ pub const Target = struct {
.tcele => .NONE,
.thumb => .ARM,
.thumbeb => .ARM,
.i386 => .@"386",
.x86 => .@"386",
.xcore => .XCORE,
.nvptx => .NONE,
.amdil => .NONE,
@ -1063,7 +1063,7 @@ pub const Target = struct {
.tcele => .Unknown,
.thumb => .Thumb,
.thumbeb => .Thumb,
.i386 => .I386,
.x86 => .I386,
.xcore => .Unknown,
.nvptx => .Unknown,
.amdil => .Unknown,
@ -1134,7 +1134,7 @@ pub const Target = struct {
.r600,
.riscv32,
.riscv64,
.i386,
.x86,
.x86_64,
.wasm32,
.wasm64,
@ -1179,7 +1179,7 @@ pub const Target = struct {
const is_nvptx = arch == .nvptx or arch == .nvptx64;
return switch (address_space) {
.generic => true,
.fs, .gs, .ss => arch == .x86_64 or arch == .i386,
.fs, .gs, .ss => arch == .x86_64 or arch == .x86,
.global, .constant, .local, .shared => arch == .amdgcn or is_nvptx,
.param => is_nvptx,
};
@ -1211,7 +1211,7 @@ pub const Target = struct {
.tcele,
.thumb,
.thumbeb,
.i386,
.x86,
.xcore,
.nvptx,
.amdil,
@ -1267,7 +1267,7 @@ pub const Target = struct {
.riscv32, .riscv64 => "riscv",
.sparc, .sparc64, .sparcel => "sparc",
.s390x => "s390x",
.i386, .x86_64 => "x86",
.x86, .x86_64 => "x86",
.nvptx, .nvptx64 => "nvptx",
.wasm32, .wasm64 => "wasm",
.spirv32, .spirv64 => "spir-v",
@ -1291,7 +1291,7 @@ pub const Target = struct {
.sparc, .sparc64, .sparcel => &sparc.all_features,
.spirv32, .spirv64 => &spirv.all_features,
.s390x => &s390x.all_features,
.i386, .x86_64 => &x86.all_features,
.x86, .x86_64 => &x86.all_features,
.nvptx, .nvptx64 => &nvptx.all_features,
.ve => &ve.all_features,
.wasm32, .wasm64 => &wasm.all_features,
@ -1315,7 +1315,7 @@ pub const Target = struct {
.riscv32, .riscv64 => comptime allCpusFromDecls(riscv.cpu),
.sparc, .sparc64, .sparcel => comptime allCpusFromDecls(sparc.cpu),
.s390x => comptime allCpusFromDecls(s390x.cpu),
.i386, .x86_64 => comptime allCpusFromDecls(x86.cpu),
.x86, .x86_64 => comptime allCpusFromDecls(x86.cpu),
.nvptx, .nvptx64 => comptime allCpusFromDecls(nvptx.cpu),
.ve => comptime allCpusFromDecls(ve.cpu),
.wasm32, .wasm64 => comptime allCpusFromDecls(wasm.cpu),
@ -1377,7 +1377,7 @@ pub const Target = struct {
.sparc, .sparcel => &sparc.cpu.generic,
.sparc64 => &sparc.cpu.v9, // 64-bit SPARC needs v9 as the baseline
.s390x => &s390x.cpu.generic,
.i386 => &x86.cpu.i386,
.x86 => &x86.cpu.x86,
.x86_64 => &x86.cpu.x86_64,
.nvptx, .nvptx64 => &nvptx.cpu.sm_20,
.ve => &ve.cpu.generic,
@ -1392,7 +1392,7 @@ pub const Target = struct {
.arm, .armeb, .thumb, .thumbeb => &arm.cpu.baseline,
.riscv32 => &riscv.cpu.baseline_rv32,
.riscv64 => &riscv.cpu.baseline_rv64,
.i386 => &x86.cpu.pentium4,
.x86 => &x86.cpu.pentium4,
.nvptx, .nvptx64 => &nvptx.cpu.sm_20,
.sparc, .sparcel => &sparc.cpu.v8,
@ -1622,7 +1622,7 @@ pub const Target = struct {
.dragonfly => return copy(&result, "/libexec/ld-elf.so.2"),
.solaris => return copy(&result, "/lib/64/ld.so.1"),
.linux => switch (self.cpu.arch) {
.i386,
.x86,
.sparc,
.sparcel,
=> return copy(&result, "/lib/ld-linux.so.2"),
@ -1771,7 +1771,7 @@ pub const Target = struct {
/// 5c arm little-endian ARM
/// 6c amd64 AMD64 and compatibles (e.g., Intel EM64T)
/// 7c arm64 ARM64 (ARMv8)
/// 8c 386 Intel i386, i486, Pentium, etc.
/// 8c 386 Intel x86, i486, Pentium, etc.
/// kc sparc Sun SPARC
/// qc power Power PC
/// vc mips big-endian MIPS 3000 family
@ -1780,7 +1780,7 @@ pub const Target = struct {
.arm => ".5",
.x86_64 => ".6",
.aarch64 => ".7",
.i386 => ".8",
.x86 => ".8",
.sparc => ".k",
.powerpc, .powerpcle => ".q",
.mips, .mipsel => ".v",
@ -1815,7 +1815,7 @@ pub const Target = struct {
.wasm64,
=> 8,
.i386 => return switch (target.os.tag) {
.x86 => return switch (target.os.tag) {
.windows, .uefi => 8,
else => 4,
},

View File

@ -2040,8 +2040,8 @@ pub const cpu = struct {
.xsaveopt,
}),
};
pub const @"i386" = CpuModel{
.name = "i386",
pub const x86 = CpuModel{
.name = "x86",
.llvm_name = "i386",
.features = featureSet(&[_]Feature{
.slow_unaligned_mem_16,

View File

@ -8,7 +8,7 @@ pub fn doClientRequest(default: usize, request: usize, a1: usize, a2: usize, a3:
}
switch (builtin.target.cpu.arch) {
.i386 => {
.x86 => {
return asm volatile (
\\ roll $3, %%edi ; roll $13, %%edi
\\ roll $29, %%edi ; roll $19, %%edi

View File

@ -592,7 +592,7 @@ pub const VcpkgLinkage = std.builtin.LinkMode;
/// Returned slice must be freed by the caller.
pub fn vcpkgTriplet(self: CrossTarget, allocator: mem.Allocator, linkage: VcpkgLinkage) ![]u8 {
const arch = switch (self.getCpuArch()) {
.i386 => "x86",
.x86 => "x86",
.x86_64 => "x64",
.arm,

View File

@ -199,11 +199,11 @@ pub fn detect(cross_target: CrossTarget) DetectError!NativeTargetInfo {
// For x86, we need to populate some CPU feature flags depending on architecture
// and mode:
// * 16bit_mode => if the abi is code16
// * 32bit_mode => if the arch is i386
// * 32bit_mode => if the arch is x86
// However, the "mode" flags can be used as overrides, so if the user explicitly
// sets one of them, that takes precedence.
switch (cpu_arch) {
.i386 => {
.x86 => {
if (!std.Target.x86.featureSetHasAny(cross_target.cpu_features_add, .{
.@"16bit_mode", .@"32bit_mode",
})) {
@ -969,7 +969,7 @@ fn detectNativeCpuAndFeatures(cpu_arch: Target.Cpu.Arch, os: Target.Os, cross_ta
// although it is a runtime value, is guaranteed to be one of the architectures in the set
// of the respective switch prong.
switch (builtin.cpu.arch) {
.x86_64, .i386 => {
.x86_64, .x86 => {
return @import("x86.zig").detectNativeCpuAndFeatures(cpu_arch, os, cross_target);
},
else => {},
@ -1019,7 +1019,7 @@ pub fn getExternalExecutor(
if (host.target.cpu.arch == candidate.target.cpu.arch)
break :cpu_ok true;
if (host.target.cpu.arch == .x86_64 and candidate.target.cpu.arch == .i386)
if (host.target.cpu.arch == .x86_64 and candidate.target.cpu.arch == .x86)
break :cpu_ok true;
if (host.target.cpu.arch == .aarch64 and candidate.target.cpu.arch == .arm)
@ -1068,7 +1068,7 @@ pub fn getExternalExecutor(
.arm => Executor{ .qemu = "qemu-arm" },
.armeb => Executor{ .qemu = "qemu-armeb" },
.hexagon => Executor{ .qemu = "qemu-hexagon" },
.i386 => Executor{ .qemu = "qemu-i386" },
.x86 => Executor{ .qemu = "qemu-i386" },
.m68k => Executor{ .qemu = "qemu-m68k" },
.mips => Executor{ .qemu = "qemu-mips" },
.mipsel => Executor{ .qemu = "qemu-mipsel" },

View File

@ -80,7 +80,7 @@ fn detectIntelProcessor(cpu: *Target.Cpu, family: u32, model: u32, brand_id: u32
}
switch (family) {
3 => {
cpu.model = &Target.x86.cpu.i386;
cpu.model = &Target.x86.cpu.x86;
return;
},
4 => {

View File

@ -4281,7 +4281,7 @@ pub fn addCCArgs(
},
.ios, .tvos, .watchos => switch (target.cpu.arch) {
// Pass the proper -m<os>-version-min argument for darwin.
.i386, .x86_64 => {
.x86, .x86_64 => {
const ver = target.os.version_range.semver.min;
try argv.append(try std.fmt.allocPrint(
arena,
@ -4969,7 +4969,7 @@ pub fn getZigBackend(comp: Compilation) std.builtin.CompilerBackend {
.wasm32, .wasm64 => std.builtin.CompilerBackend.stage2_wasm,
.arm, .armeb, .thumb, .thumbeb => .stage2_arm,
.x86_64 => .stage2_x86_64,
.i386 => .stage2_x86,
.x86 => .stage2_x86,
.aarch64, .aarch64_be, .aarch64_32 => .stage2_aarch64,
.riscv64 => .stage2_riscv64,
.sparc64 => .stage2_sparc64,

View File

@ -8415,20 +8415,20 @@ fn funcCommon(
if (switch (cc_workaround) {
.Unspecified, .C, .Naked, .Async, .Inline => null,
.Interrupt => switch (arch) {
.i386, .x86_64, .avr, .msp430 => null,
else => @as([]const u8, "i386, x86_64, AVR, and MSP430"),
.x86, .x86_64, .avr, .msp430 => null,
else => @as([]const u8, "x86, x86_64, AVR, and MSP430"),
},
.Signal => switch (arch) {
.avr => null,
else => @as([]const u8, "AVR"),
},
.Stdcall, .Fastcall, .Thiscall => switch (arch) {
.i386 => null,
else => @as([]const u8, "i386"),
.x86 => null,
else => @as([]const u8, "x86"),
},
.Vectorcall => switch (arch) {
.i386, .aarch64, .aarch64_be, .aarch64_32 => null,
else => @as([]const u8, "i386 and AArch64"),
.x86, .aarch64, .aarch64_be, .aarch64_32 => null,
else => @as([]const u8, "x86 and AArch64"),
},
.APCS, .AAPCS, .AAPCSVFP => switch (arch) {
.arm, .armeb, .aarch64, .aarch64_be, .aarch64_32, .thumb, .thumbeb => null,
@ -30813,7 +30813,7 @@ pub fn analyzeAddressSpace(
const supported = switch (address_space) {
.generic => true,
.gs, .fs, .ss => (arch == .i386 or arch == .x86_64) and ctx == .pointer,
.gs, .fs, .ss => (arch == .x86 or arch == .x86_64) and ctx == .pointer,
// TODO: check that .shared and .local are left uninitialized
.param => is_nv,
.global, .shared, .local => is_gpu,

View File

@ -110,7 +110,7 @@ pub fn generateFunction(
//.tcele => return Function(.tcele).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.thumb => return Function(.thumb).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.thumbeb => return Function(.thumbeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.i386 => return Function(.i386).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.x86 => return Function(.x86).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
.x86_64 => return @import("arch/x86_64/CodeGen.zig").generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.xcore => return Function(.xcore).generate(bin_file, src_loc, func, air, liveness, code, debug_output),
//.nvptx => return Function(.nvptx).generate(bin_file, src_loc, func, air, liveness, code, debug_output),

View File

@ -70,7 +70,7 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![:0]u8 {
.tcele => "tcele",
.thumb => "thumb",
.thumbeb => "thumbeb",
.i386 => "i386",
.x86 => "i386",
.x86_64 => "x86_64",
.xcore => "xcore",
.nvptx => "nvptx",
@ -282,7 +282,7 @@ pub fn targetArch(arch_tag: std.Target.Cpu.Arch) llvm.ArchType {
.tcele => .tcele,
.thumb => .thumb,
.thumbeb => .thumbeb,
.i386 => .x86,
.x86 => .x86,
.x86_64 => .x86_64,
.xcore => .xcore,
.nvptx => .nvptx,
@ -6195,7 +6195,7 @@ pub const FuncGen = struct {
// here then we may risk tripping LLVM bugs since anything not used by Clang tends
// to be buggy and regress often.
switch (target.cpu.arch) {
.x86_64, .i386 => {
.x86_64, .x86 => {
if (total_i != 0) try llvm_constraints.append(self.gpa, ',');
try llvm_constraints.appendSlice(self.gpa, "~{dirflag},~{fpsr},~{flags}");
total_i += 3;
@ -9275,7 +9275,7 @@ pub const FuncGen = struct {
switch (prefetch.cache) {
.instruction => switch (target.cpu.arch) {
.x86_64,
.i386,
.x86,
.powerpc,
.powerpcle,
.powerpc64,
@ -9856,7 +9856,7 @@ fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void {
llvm.LLVMInitializeWebAssemblyAsmPrinter();
llvm.LLVMInitializeWebAssemblyAsmParser();
},
.i386, .x86_64 => {
.x86, .x86_64 => {
llvm.LLVMInitializeX86Target();
llvm.LLVMInitializeX86TargetInfo();
llvm.LLVMInitializeX86TargetMC();
@ -9968,7 +9968,7 @@ fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) llvm.Ca
.Stdcall => .X86_StdCall,
.Fastcall => .X86_FastCall,
.Vectorcall => return switch (target.cpu.arch) {
.i386, .x86_64 => .X86_VectorCall,
.x86, .x86_64 => .X86_VectorCall,
.aarch64, .aarch64_be, .aarch64_32 => .AArch64_VectorCall,
else => unreachable,
},
@ -9977,7 +9977,7 @@ fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) llvm.Ca
.AAPCS => .ARM_AAPCS,
.AAPCSVFP => .ARM_AAPCS_VFP,
.Interrupt => return switch (target.cpu.arch) {
.i386, .x86_64 => .X86_INTR,
.x86, .x86_64 => .X86_INTR,
.avr => .AVR_INTR,
.msp430 => .MSP430_INTR,
else => unreachable,
@ -9999,7 +9999,7 @@ fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) llvm.Ca
/// Convert a zig-address space to an llvm address space.
fn toLlvmAddressSpace(address_space: std.builtin.AddressSpace, target: std.Target) c_uint {
return switch (target.cpu.arch) {
.i386, .x86_64 => switch (address_space) {
.x86, .x86_64 => switch (address_space) {
.generic => llvm.address_space.default,
.gs => llvm.address_space.x86.gs,
.fs => llvm.address_space.x86.fs,
@ -10714,7 +10714,7 @@ fn isScalar(ty: Type) bool {
/// and false if we expect LLVM to crash if it counters an x86_fp80 type.
fn backendSupportsF80(target: std.Target) bool {
return switch (target.cpu.arch) {
.x86_64, .i386 => !std.Target.x86.featureSetHas(target.cpu.features, .soft_float),
.x86_64, .x86 => !std.Target.x86.featureSetHas(target.cpu.features, .soft_float),
else => false,
};
}

View File

@ -205,7 +205,7 @@ fn handleSegfaultPosix(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const any
};
const stack_ctx: StackContext = switch (builtin.cpu.arch) {
.i386 => ctx: {
.x86 => ctx: {
const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
const ip = @intCast(usize, ctx.mcontext.gregs[os.REG.EIP]);
const bp = @intCast(usize, ctx.mcontext.gregs[os.REG.EBP]);

View File

@ -327,7 +327,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
});
try add_include_dirs(comp, arena, &args);
if (target.cpu.arch == .i386) {
if (target.cpu.arch == .x86) {
// This prevents i386/sysdep.h from trying to do some
// silly and unnecessary inline asm hack that uses weird
// syntax that clang does not support.
@ -406,7 +406,7 @@ fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![
}
} else if (arch == .x86_64) {
try result.appendSlice("x86_64");
} else if (arch == .i386) {
} else if (arch == .x86) {
try result.appendSlice("i386");
} else if (is_aarch64) {
try result.appendSlice("aarch64");
@ -504,7 +504,7 @@ fn add_include_dirs_arch(
opt_nptl: ?[]const u8,
dir: []const u8,
) error{OutOfMemory}!void {
const is_x86 = arch == .i386 or arch == .x86_64;
const is_x86 = arch == .x86 or arch == .x86_64;
const is_aarch64 = arch == .aarch64 or arch == .aarch64_be;
const is_ppc = arch == .powerpc or arch == .powerpc64 or arch == .powerpc64le;
const is_sparc = arch == .sparc or arch == .sparcel or arch == .sparc64;
@ -521,7 +521,7 @@ fn add_include_dirs_arch(
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "x86_64" }));
}
} else if (arch == .i386) {
} else if (arch == .x86) {
if (opt_nptl) |nptl| {
try args.append("-I");
try args.append(try path.join(arena, &[_][]const u8{ dir, "i386", nptl }));

View File

@ -408,7 +408,7 @@ pub const LibCInstallation = struct {
defer result_buf.deinit();
const arch_sub_dir = switch (builtin.target.cpu.arch) {
.i386 => "x86",
.x86 => "x86",
.x86_64 => "x64",
.arm, .armeb => "arm",
.aarch64 => "arm64",
@ -472,7 +472,7 @@ pub const LibCInstallation = struct {
defer result_buf.deinit();
const arch_sub_dir = switch (builtin.target.cpu.arch) {
.i386 => "x86",
.x86 => "x86",
.x86_64 => "x64",
.arm, .armeb => "arm",
.aarch64 => "arm64",

View File

@ -1294,7 +1294,7 @@ pub fn updateDeclExports(
const exported_decl = module.declPtr(exp.exported_decl);
if (exported_decl.getFunction() == null) continue;
const winapi_cc = switch (self.base.options.target.cpu.arch) {
.i386 => std.builtin.CallingConvention.Stdcall,
.x86 => std.builtin.CallingConvention.Stdcall,
else => std.builtin.CallingConvention.C,
};
const decl_cc = exported_decl.ty.fnCallingConvention();

View File

@ -197,7 +197,7 @@ pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
try argv.append(try std.fmt.allocPrint(arena, "-BASE:{d}", .{image_base}));
}
if (target.cpu.arch == .i386) {
if (target.cpu.arch == .x86) {
try argv.append("-MACHINE:X86");
} else if (target.cpu.arch == .x86_64) {
try argv.append("-MACHINE:X64");
@ -380,7 +380,7 @@ pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
if (target.abi.isGnu()) {
try argv.append("-lldmingw");
if (target.cpu.arch == .i386) {
if (target.cpu.arch == .x86) {
try argv.append("-ALTERNATENAME:__image_base__=___ImageBase");
} else {
try argv.append("-ALTERNATENAME:__image_base__=__ImageBase");
@ -388,7 +388,7 @@ pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
if (is_dyn_lib) {
try argv.append(try comp.get_libc_crt_file(arena, "dllcrt2.obj"));
if (target.cpu.arch == .i386) {
if (target.cpu.arch == .x86) {
try argv.append("-ALTERNATENAME:__DllMainCRTStartup@12=_DllMainCRTStartup@12");
} else {
try argv.append("-ALTERNATENAME:_DllMainCRTStartup=DllMainCRTStartup");

View File

@ -3004,7 +3004,7 @@ fn sectHeaderTo32(shdr: elf.Elf64_Shdr) elf.Elf32_Shdr {
fn getLDMOption(target: std.Target) ?[]const u8 {
switch (target.cpu.arch) {
.i386 => return "elf_i386",
.x86 => return "elf_i386",
.aarch64 => return "aarch64linux",
.aarch64_be => return "aarch64_be_linux",
.arm, .thumb => return "armelf_linux_eabi",

View File

@ -150,7 +150,7 @@ pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases {
.text = 0x200028,
.data = 0x400000,
},
.i386 => .{
.x86 => .{
// header size => 32 => 0x20
.text = 0x200020,
.data = 0x400000,

View File

@ -109,7 +109,7 @@ pub const R_MAGIC = _MAGIC(HDR_MAGIC, 28); // arm64
pub fn magicFromArch(arch: std.Target.Cpu.Arch) !u32 {
return switch (arch) {
.i386 => I_MAGIC,
.x86 => I_MAGIC,
.sparc => K_MAGIC, // TODO should sparc64 and sparcel go here?
.mips => V_MAGIC,
.arm => E_MAGIC,
@ -124,7 +124,7 @@ pub fn magicFromArch(arch: std.Target.Cpu.Arch) !u32 {
/// gets the quantization of pc for the arch
pub fn getPCQuant(arch: std.Target.Cpu.Arch) !u8 {
return switch (arch) {
.i386, .x86_64 => 1,
.x86, .x86_64 => 1,
.powerpc, .powerpc64, .mips, .sparc, .arm, .aarch64 => 4,
else => error.ArchNotSupportedByPlan9,
};

View File

@ -128,7 +128,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
.extra_flags = extra_flags,
};
}
if (comp.getTarget().cpu.arch == .i386) {
if (comp.getTarget().cpu.arch == .x86) {
for (msvcrt_i386_src) |dep| {
(try c_source_files.addOne()).* = .{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
@ -180,7 +180,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
};
}
const target = comp.getTarget();
if (target.cpu.arch == .i386 or target.cpu.arch == .x86_64) {
if (target.cpu.arch == .x86 or target.cpu.arch == .x86_64) {
for (mingwex_x86_src) |dep| {
(try c_source_files.addOne()).* = .{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
@ -338,7 +338,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
});
const target_def_arg = switch (target.cpu.arch) {
.i386 => "-DDEF_I386",
.x86 => "-DDEF_I386",
.x86_64 => "-DDEF_X64",
.arm, .armeb, .thumb, .thumbeb, .aarch64_32 => "-DDEF_ARM32",
.aarch64, .aarch64_be => "-DDEF_ARM64",
@ -434,7 +434,7 @@ fn findDef(comp: *Compilation, allocator: Allocator, lib_name: []const u8) ![]u8
const target = comp.getTarget();
const lib_path = switch (target.cpu.arch) {
.i386 => "lib32",
.x86 => "lib32",
.x86_64 => "lib64",
.arm, .armeb, .thumb, .thumbeb, .aarch64_32 => "libarm32",
.aarch64, .aarch64_be => "libarm64",

View File

@ -262,7 +262,7 @@ pub fn archName(arch: std.Target.Cpu.Arch) [:0]const u8 {
switch (arch) {
.aarch64, .aarch64_be => return "aarch64",
.arm, .armeb, .thumb, .thumbeb => return "arm",
.i386 => return "i386",
.x86 => return "i386",
.mips, .mipsel => return "mips",
.mips64el, .mips64 => return "mips64",
.powerpc => return "powerpc",

View File

@ -35,9 +35,9 @@ pub const available_libcs = [_]ArchOsAbi{
.{ .arch = .arm, .os = .windows, .abi = .gnu },
.{ .arch = .csky, .os = .linux, .abi = .gnueabi },
.{ .arch = .csky, .os = .linux, .abi = .gnueabihf },
.{ .arch = .i386, .os = .linux, .abi = .gnu },
.{ .arch = .i386, .os = .linux, .abi = .musl },
.{ .arch = .i386, .os = .windows, .abi = .gnu },
.{ .arch = .x86, .os = .linux, .abi = .gnu },
.{ .arch = .x86, .os = .linux, .abi = .musl },
.{ .arch = .x86, .os = .windows, .abi = .gnu },
.{ .arch = .m68k, .os = .linux, .abi = .gnu },
.{ .arch = .m68k, .os = .linux, .abi = .musl },
.{ .arch = .mips64el, .os = .linux, .abi = .gnuabi64 },
@ -137,7 +137,7 @@ pub fn osArchName(target: std.Target) [:0]const u8 {
.powerpc, .powerpcle, .powerpc64, .powerpc64le => "powerpc",
.riscv32, .riscv64 => "riscv",
.sparc, .sparcel, .sparc64 => "sparc",
.i386, .x86_64 => "x86",
.x86, .x86_64 => "x86",
else => @tagName(target.cpu.arch),
},
else => @tagName(target.cpu.arch),
@ -278,7 +278,7 @@ pub fn hasLlvmSupport(target: std.Target, ofmt: std.Target.ObjectFormat) bool {
.tcele,
.thumb,
.thumbeb,
.i386,
.x86,
.x86_64,
.xcore,
.nvptx,
@ -319,7 +319,7 @@ pub fn selfHostedBackendIsAsRobustAsLlvm(target: std.Target) bool {
pub fn supportsStackProbing(target: std.Target) bool {
return target.os.tag != .windows and target.os.tag != .uefi and
(target.cpu.arch == .i386 or target.cpu.arch == .x86_64);
(target.cpu.arch == .x86 or target.cpu.arch == .x86_64);
}
pub fn supportsStackProtector(target: std.Target) bool {
@ -431,7 +431,7 @@ pub fn defaultCompilerRtOptimizeMode(target: std.Target) std.builtin.Mode {
pub fn hasRedZone(target: std.Target) bool {
return switch (target.cpu.arch) {
.x86_64,
.i386,
.x86,
.powerpc,
.powerpc64,
.powerpc64le,
@ -550,7 +550,7 @@ pub fn atomicPtrAlignment(
.tcele,
.thumb,
.thumbeb,
.i386,
.x86,
.xcore,
.amdil,
.hsail,
@ -655,7 +655,7 @@ pub fn addrSpaceCastIsValid(
) bool {
const arch = target.cpu.arch;
switch (arch) {
.x86_64, .i386 => return arch.supportsAddressSpace(from) and arch.supportsAddressSpace(to),
.x86_64, .x86 => return arch.supportsAddressSpace(from) and arch.supportsAddressSpace(to),
.nvptx64, .nvptx, .amdgcn => {
const to_generic = arch.supportsAddressSpace(from) and to == .generic;
const from_generic = arch.supportsAddressSpace(to) and from == .generic;

View File

@ -6650,7 +6650,7 @@ pub const CType = enum {
.long, .ulong => return target.cpu.arch.ptrBitWidth(),
.longlong, .ulonglong, .double => return 64,
.longdouble => switch (target.cpu.arch) {
.i386 => switch (target.abi) {
.x86 => switch (target.abi) {
.android => return 64,
else => return 80,
},
@ -6738,7 +6738,7 @@ pub const CType = enum {
.long, .ulong => return target.cpu.arch.ptrBitWidth(),
.longlong, .ulonglong, .double => return 64,
.longdouble => switch (target.cpu.arch) {
.i386 => switch (target.abi) {
.x86 => switch (target.abi) {
.android => return 64,
else => return 80,
},
@ -6792,7 +6792,7 @@ pub const CType = enum {
},
.windows, .uefi => switch (target.cpu.arch) {
.i386 => switch (self) {
.x86 => switch (self) {
.short, .ushort => return 16,
.int, .uint, .float => return 32,
.long, .ulong => return 32,
@ -6828,7 +6828,7 @@ pub const CType = enum {
.short, .ushort => return 16,
.int, .uint, .float => return 32,
.long, .ulong => switch (target.cpu.arch) {
.i386, .arm, .aarch64_32 => return 32,
.x86, .arm, .aarch64_32 => return 32,
.x86_64 => switch (target.abi) {
.gnux32, .muslx32 => return 32,
else => return 64,
@ -6837,7 +6837,7 @@ pub const CType = enum {
},
.longlong, .ulonglong, .double => return 64,
.longdouble => switch (target.cpu.arch) {
.i386 => switch (target.abi) {
.x86 => switch (target.abi) {
.android => return 64,
else => return 80,
},
@ -6896,7 +6896,7 @@ pub const CType = enum {
.short, .ushort => return 2,
else => return 1,
},
.i386 => switch (target.os.tag) {
.x86 => switch (target.os.tag) {
.windows, .uefi => switch (self) {
.longlong, .ulonglong, .double => return 8,
.longdouble => switch (target.abi) {
@ -6937,7 +6937,7 @@ pub const CType = enum {
.arc,
.csky,
.i386,
.x86,
.xcore,
.dxil,
.loongarch32,
@ -7034,7 +7034,7 @@ pub const CType = enum {
.double => return 4,
.longlong, .ulonglong => return 8,
},
.i386 => switch (target.os.tag) {
.x86 => switch (target.os.tag) {
.windows, .uefi => switch (self) {
.longdouble => switch (target.abi) {
.gnu, .gnuilp32, .cygnus => return 4,
@ -7087,7 +7087,7 @@ pub const CType = enum {
.bpfeb,
.hexagon,
.hsail64,
.i386,
.x86,
.loongarch64,
.m68k,
.mips,

View File

@ -21,7 +21,7 @@ struct ZigWindowsSDKPrivate {
enum NativeArch {
NativeArchArm,
NativeArchi386,
NativeArchx86,
NativeArchx86_64,
NativeArchAarch64,
};
@ -29,7 +29,7 @@ enum NativeArch {
#if defined(_M_ARM) || defined(__arm_)
static const NativeArch native_arch = NativeArchArm;
#elif defined(_M_IX86) || defined(__i386__)
static const NativeArch native_arch = NativeArchi386;
static const NativeArch native_arch = NativeArchx86;
#elif defined(_M_X64) || defined(__x86_64__)
static const NativeArch native_arch = NativeArchx86_64;
#elif defined(_M_ARM64) || defined(__aarch64__)
@ -110,7 +110,7 @@ static ZigFindWindowsSdkError find_msvc_lib_dir(ZigWindowsSDKPrivate *priv) {
fclose(tools_file);
out_append_ptr += sprintf(out_append_ptr, "VC\\Tools\\MSVC\\%s\\lib\\", tmp_path);
switch (native_arch) {
case NativeArchi386:
case NativeArchx86:
out_append_ptr += sprintf(out_append_ptr, "x86\\");
break;
case NativeArchx86_64:
@ -158,7 +158,7 @@ com_done:;
char *tmp_buf_append_ptr = tmp_buf + (cb_data - 1);
tmp_buf_append_ptr += sprintf(tmp_buf_append_ptr, "VC\\Lib\\");
switch (native_arch) {
case NativeArchi386:
case NativeArchx86:
//x86 is in the root of the Lib folder
break;
case NativeArchx86_64:
@ -219,7 +219,7 @@ static ZigFindWindowsSdkError find_10_version(ZigWindowsSDKPrivate *priv) {
case NativeArchx86_64:
option_name = "OptionId.DesktopCPPx64";
break;
case NativeArchi386:
case NativeArchx86:
option_name = "OptionId.DesktopCPPx86";
break;
default:

View File

@ -107,7 +107,7 @@ test "alignment and size of structs with 128-bit fields" {
.u129_size = 24,
},
.i386 => switch (builtin.os.tag) {
.x86 => switch (builtin.os.tag) {
.windows => .{
.a_align = 8,
.a_size = 16,

View File

@ -5,7 +5,7 @@ const math = std.math;
const pi = std.math.pi;
const e = std.math.e;
const has_f80_rt = switch (builtin.cpu.arch) {
.x86_64, .i386 => true,
.x86_64, .x86 => true,
else => false,
};

View File

@ -150,9 +150,9 @@ test "extern struct with stdcallcc fn pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = extern struct {
ptr: *const fn () callconv(if (builtin.target.cpu.arch == .i386) .Stdcall else .C) i32,
ptr: *const fn () callconv(if (builtin.target.cpu.arch == .x86) .Stdcall else .C) i32,
fn foo() callconv(if (builtin.target.cpu.arch == .i386) .Stdcall else .C) i32 {
fn foo() callconv(if (builtin.target.cpu.arch == .x86) .Stdcall else .C) i32 {
return 1234;
}
};

View File

@ -1109,7 +1109,7 @@ test "packed struct with undefined initializers" {
var p: P = undefined;
p = P{ .a = 2, .b = 4, .c = 6 };
// Make sure the compiler doesn't touch the unprefixed fields.
// Use expect since i386-linux doesn't like expectEqual
// Use expect since x86-linux doesn't like expectEqual
try expect(p.a == 2);
try expect(p.b == 4);
try expect(p.c == 6);

View File

@ -639,7 +639,7 @@ test "vector shift operators" {
};
switch (builtin.target.cpu.arch) {
.i386,
.x86,
.aarch64,
.aarch64_be,
.aarch64_32,

View File

@ -2,7 +2,7 @@ const std = @import("std");
const builtin = @import("builtin");
const print = std.debug.print;
const expect = std.testing.expect;
const has_i128 = builtin.cpu.arch != .i386 and !builtin.cpu.arch.isARM() and
const has_i128 = builtin.cpu.arch != .x86 and !builtin.cpu.arch.isARM() and
!builtin.cpu.arch.isMIPS() and !builtin.cpu.arch.isPPC();
extern fn run_c_tests() void;
@ -153,7 +153,7 @@ export fn zig_bool(x: bool) void {
// https://github.com/ziglang/zig/issues/8465
//
// For now, we have no way of referring to the _Complex C types from Zig,
// so our ABI is unavoidably broken on some platforms (such as i386)
// so our ABI is unavoidably broken on some platforms (such as x86)
const ComplexFloat = extern struct {
real: f32,
imag: f32,
@ -170,7 +170,7 @@ extern fn c_cmultd_comp(a_r: f64, a_i: f64, b_r: f64, b_i: f64) ComplexDouble;
extern fn c_cmultf(a: ComplexFloat, b: ComplexFloat) ComplexFloat;
extern fn c_cmultd(a: ComplexDouble, b: ComplexDouble) ComplexDouble;
const complex_abi_compatible = builtin.cpu.arch != .i386 and !builtin.cpu.arch.isMIPS() and
const complex_abi_compatible = builtin.cpu.arch != .x86 and !builtin.cpu.arch.isMIPS() and
!builtin.cpu.arch.isARM() and !builtin.cpu.arch.isPPC() and !builtin.cpu.arch.isRISCV();
test "C ABI complex float" {
@ -323,7 +323,7 @@ extern fn c_med_struct_mixed(MedStructMixed) void;
extern fn c_ret_med_struct_mixed() MedStructMixed;
test "C ABI medium struct of ints and floats" {
if (builtin.cpu.arch == .i386) return error.SkipZigTest;
if (builtin.cpu.arch == .x86) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
@ -356,7 +356,7 @@ extern fn c_small_struct_ints(SmallStructInts) void;
extern fn c_ret_small_struct_ints() SmallStructInts;
test "C ABI small struct of ints" {
if (builtin.cpu.arch == .i386) return error.SkipZigTest;
if (builtin.cpu.arch == .x86) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
@ -438,7 +438,7 @@ const SplitStructInt = extern struct {
extern fn c_split_struct_ints(SplitStructInt) void;
test "C ABI split struct of ints" {
if (builtin.cpu.arch == .i386) return error.SkipZigTest;
if (builtin.cpu.arch == .x86) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
@ -466,7 +466,7 @@ extern fn c_split_struct_mixed(SplitStructMixed) void;
extern fn c_ret_split_struct_mixed() SplitStructMixed;
test "C ABI split struct of ints and floats" {
if (builtin.cpu.arch == .i386) return error.SkipZigTest;
if (builtin.cpu.arch == .x86) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
@ -729,7 +729,7 @@ extern fn c_struct_with_array(StructWithArray) void;
extern fn c_ret_struct_with_array() StructWithArray;
test "Struct with array as padding." {
if (builtin.cpu.arch == .i386) return error.SkipZigTest;
if (builtin.cpu.arch == .x86) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isPPC()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
@ -783,7 +783,7 @@ extern fn c_small_vec(SmallVec) void;
extern fn c_ret_small_vec() SmallVec;
test "small simd vector" {
if (builtin.cpu.arch == .i386) return error.SkipZigTest;
if (builtin.cpu.arch == .x86) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isPPC64()) return error.SkipZigTest;
c_small_vec(.{ 1, 2 });
@ -820,7 +820,7 @@ extern fn c_ptr_size_float_struct(Vector2) void;
extern fn c_ret_ptr_size_float_struct() Vector2;
test "C ABI pointer sized float struct" {
if (builtin.cpu.arch == .i386) return error.SkipZigTest;
if (builtin.cpu.arch == .x86) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isMIPS()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isRISCV()) return error.SkipZigTest;
if (comptime builtin.cpu.arch.isPPC()) return error.SkipZigTest;

View File

@ -4,4 +4,4 @@ export fn entry() callconv(.Interrupt) void {}
// backend=stage2
// target=aarch64-linux-none
//
// :1:29: error: callconv 'Interrupt' is only available on i386, x86_64, AVR, and MSP430, not aarch64
// :1:29: error: callconv 'Interrupt' is only available on x86, x86_64, AVR, and MSP430, not aarch64

View File

@ -18,6 +18,6 @@ export fn entry3() void {
// backend=stage2
// target=x86_64-linux-none
//
// :1:28: error: callconv 'Stdcall' is only available on i386, not x86_64
// :2:28: error: callconv 'Fastcall' is only available on i386, not x86_64
// :3:28: error: callconv 'Thiscall' is only available on i386, not x86_64
// :1:28: error: callconv 'Stdcall' is only available on x86, not x86_64
// :2:28: error: callconv 'Fastcall' is only available on x86, not x86_64
// :3:28: error: callconv 'Thiscall' is only available on x86, not x86_64

View File

@ -4,4 +4,4 @@ export fn entry() callconv(.Vectorcall) void {}
// backend=stage2
// target=x86_64-linux-none
//
// :1:29: error: callconv 'Vectorcall' is only available on i386 and AArch64, not x86_64
// :1:29: error: callconv 'Vectorcall' is only available on x86 and AArch64, not x86_64

View File

@ -163,14 +163,14 @@ const test_targets = blk: {
.{
.target = .{
.cpu_arch = .i386,
.cpu_arch = .x86,
.os_tag = .linux,
.abi = .none,
},
},
.{
.target = .{
.cpu_arch = .i386,
.cpu_arch = .x86,
.os_tag = .linux,
.abi = .musl,
},
@ -178,7 +178,7 @@ const test_targets = blk: {
},
.{
.target = .{
.cpu_arch = .i386,
.cpu_arch = .x86,
.os_tag = .linux,
.abi = .gnu,
},
@ -387,7 +387,7 @@ const test_targets = blk: {
.{
.target = .{
.cpu_arch = .i386,
.cpu_arch = .x86,
.os_tag = .windows,
.abi = .msvc,
},
@ -403,7 +403,7 @@ const test_targets = blk: {
.{
.target = .{
.cpu_arch = .i386,
.cpu_arch = .x86,
.os_tag = .windows,
.abi = .gnu,
},
@ -1279,7 +1279,7 @@ const c_abi_targets = [_]CrossTarget{
.abi = .musl,
},
.{
.cpu_arch = .i386,
.cpu_arch = .x86,
.os_tag = .linux,
.abi = .musl,
},

Some files were not shown because too many files have changed in this diff Show More