Merge remote-tracking branch 'origin/master' into stage2-whole-file-astgen

Conflicts:
 * build.zig
 * src/Compilation.zig
 * src/codegen/spirv/spec.zig
 * src/link/SpirV.zig
 * test/stage2/darwin.zig
   - this one might be problematic; start.zig looks for `main` in the
     root source file, not `_main`. Not sure why there is an underscore
     there in master branch.
This commit is contained in:
Andrew Kelley 2021-05-15 21:44:38 -07:00
commit 597082adf4
48 changed files with 3679 additions and 918 deletions

View File

@ -228,6 +228,7 @@ pub fn build(b: *Builder) !void {
const is_wine_enabled = b.option(bool, "enable-wine", "Use Wine to run cross compiled Windows tests") orelse false;
const is_qemu_enabled = b.option(bool, "enable-qemu", "Use QEMU to run cross compiled foreign architecture tests") orelse false;
const is_wasmtime_enabled = b.option(bool, "enable-wasmtime", "Use Wasmtime to enable and run WASI libstd tests") orelse false;
const is_darling_enabled = b.option(bool, "enable-darling", "[Experimental] Use Darling to run cross compiled macOS tests") orelse false;
const glibc_multi_dir = b.option([]const u8, "enable-foreign-glibc", "Provide directory with glibc installations to run cross compiled tests that link glibc");
test_stage2.addBuildOption(bool, "skip_non_native", skip_non_native);
@ -238,6 +239,7 @@ pub fn build(b: *Builder) !void {
test_stage2.addBuildOption(bool, "enable_wine", is_wine_enabled);
test_stage2.addBuildOption(bool, "enable_wasmtime", is_wasmtime_enabled);
test_stage2.addBuildOption(u32, "mem_leak_frames", mem_leak_frames * 2);
test_stage2.addBuildOption(bool, "enable_darling", is_darling_enabled);
test_stage2.addBuildOption(?[]const u8, "glibc_multi_install_dir", glibc_multi_dir);
test_stage2.addBuildOption([]const u8, "version", version);
@ -272,11 +274,56 @@ pub fn build(b: *Builder) !void {
const fmt_step = b.step("test-fmt", "Run zig fmt against build.zig to make sure it works");
fmt_step.dependOn(&fmt_build_zig.step);
// TODO for the moment, skip wasm32-wasi until bugs are sorted out.
toolchain_step.dependOn(tests.addPkgTests(b, test_filter, "test/behavior.zig", "behavior", "Run the behavior tests", modes, false, skip_non_native, skip_libc, is_wine_enabled, is_qemu_enabled, is_wasmtime_enabled, glibc_multi_dir));
toolchain_step.dependOn(tests.addPkgTests(
b,
test_filter,
"test/behavior.zig",
"behavior",
"Run the behavior tests",
modes,
false,
skip_non_native,
skip_libc,
is_wine_enabled,
is_qemu_enabled,
is_wasmtime_enabled,
is_darling_enabled,
glibc_multi_dir,
));
toolchain_step.dependOn(tests.addPkgTests(b, test_filter, "lib/std/special/compiler_rt.zig", "compiler-rt", "Run the compiler_rt tests", modes, true, skip_non_native, true, is_wine_enabled, is_qemu_enabled, is_wasmtime_enabled, glibc_multi_dir));
toolchain_step.dependOn(tests.addPkgTests(b, test_filter, "lib/std/special/c.zig", "minilibc", "Run the mini libc tests", modes, true, skip_non_native, true, is_wine_enabled, is_qemu_enabled, is_wasmtime_enabled, glibc_multi_dir));
toolchain_step.dependOn(tests.addPkgTests(
b,
test_filter,
"lib/std/special/compiler_rt.zig",
"compiler-rt",
"Run the compiler_rt tests",
modes,
true,
skip_non_native,
true,
is_wine_enabled,
is_qemu_enabled,
is_wasmtime_enabled,
is_darling_enabled,
glibc_multi_dir,
));
toolchain_step.dependOn(tests.addPkgTests(
b,
test_filter,
"lib/std/special/c.zig",
"minilibc",
"Run the mini libc tests",
modes,
true,
skip_non_native,
true,
is_wine_enabled,
is_qemu_enabled,
is_wasmtime_enabled,
is_darling_enabled,
glibc_multi_dir,
));
toolchain_step.dependOn(tests.addCompareOutputTests(b, test_filter, modes));
toolchain_step.dependOn(tests.addStandaloneTests(b, test_filter, modes));
@ -294,7 +341,22 @@ pub fn build(b: *Builder) !void {
toolchain_step.dependOn(tests.addCompileErrorTests(b, test_filter, modes));
}
const std_step = tests.addPkgTests(b, test_filter, "lib/std/std.zig", "std", "Run the standard library tests", modes, false, skip_non_native, skip_libc, is_wine_enabled, is_qemu_enabled, is_wasmtime_enabled, glibc_multi_dir);
const std_step = tests.addPkgTests(
b,
test_filter,
"lib/std/std.zig",
"std",
"Run the standard library tests",
modes,
false,
skip_non_native,
skip_libc,
is_wine_enabled,
is_qemu_enabled,
is_wasmtime_enabled,
is_darling_enabled,
glibc_multi_dir,
);
const test_step = b.step("test", "Run all the tests");
test_step.dependOn(toolchain_step);
@ -346,8 +408,7 @@ fn addCmakeCfgOptionsToExe(
},
else => |e| return e,
};
exe.linkSystemLibrary("pthread");
exe.linkSystemLibrary("unwind");
} else if (exe.target.isFreeBSD()) {
try addCxxKnownPath(b, cfg, exe, "libc++.a", null, need_cpp_includes);
exe.linkSystemLibrary("pthread");
@ -355,20 +416,7 @@ fn addCmakeCfgOptionsToExe(
try addCxxKnownPath(b, cfg, exe, "libc++.a", null, need_cpp_includes);
try addCxxKnownPath(b, cfg, exe, "libc++abi.a", null, need_cpp_includes);
} else if (exe.target.isDarwin()) {
if (addCxxKnownPath(b, cfg, exe, "libgcc_eh.a", "", need_cpp_includes)) {
// Compiler is GCC.
try addCxxKnownPath(b, cfg, exe, "libstdc++.a", null, need_cpp_includes);
exe.linkSystemLibrary("pthread");
// TODO LLD cannot perform this link.
// Set ZIG_SYSTEM_LINKER_HACK env var to use system linker ld instead.
// See https://github.com/ziglang/zig/issues/1535
} else |err| switch (err) {
error.RequiredLibraryNotFound => {
// System compiler, not gcc.
exe.linkSystemLibrary("c++");
},
else => |e| return e,
}
exe.linkSystemLibrary("c++");
}
if (cfg.dia_guids_lib.len != 0) {

View File

@ -1,81 +0,0 @@
/* Machine-specific pthread type layouts. SPARC version.
Copyright (C) 2003-2019 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#ifndef _BITS_PTHREADTYPES_ARCH_H
#define _BITS_PTHREADTYPES_ARCH_H 1
#include <bits/wordsize.h>
#if __WORDSIZE == 64
# define __SIZEOF_PTHREAD_ATTR_T 56
# define __SIZEOF_PTHREAD_MUTEX_T 40
# define __SIZEOF_PTHREAD_CONDATTR_T 4
# define __SIZEOF_PTHREAD_RWLOCK_T 56
# define __SIZEOF_PTHREAD_BARRIER_T 32
#else
# define __SIZEOF_PTHREAD_ATTR_T 36
# define __SIZEOF_PTHREAD_MUTEX_T 24
# define __SIZEOF_PTHREAD_CONDATTR_T 4
# define __SIZEOF_PTHREAD_RWLOCK_T 32
# define __SIZEOF_PTHREAD_BARRIER_T 20
#endif
#define __SIZEOF_PTHREAD_MUTEXATTR_T 4
#define __SIZEOF_PTHREAD_COND_T 48
#define __SIZEOF_PTHREAD_RWLOCKATTR_T 8
#define __SIZEOF_PTHREAD_BARRIERATTR_T 4
/* Definitions for internal mutex struct. */
#define __PTHREAD_COMPAT_PADDING_MID
#define __PTHREAD_COMPAT_PADDING_END
#define __PTHREAD_MUTEX_LOCK_ELISION 0
#define __PTHREAD_MUTEX_NUSERS_AFTER_KIND (__WORDSIZE != 64)
#define __PTHREAD_MUTEX_USE_UNION (__WORDSIZE != 64)
#define __LOCK_ALIGNMENT
#define __ONCE_ALIGNMENT
struct __pthread_rwlock_arch_t
{
unsigned int __readers;
unsigned int __writers;
unsigned int __wrphase_futex;
unsigned int __writers_futex;
unsigned int __pad3;
unsigned int __pad4;
#if __WORDSIZE == 64
int __cur_writer;
int __shared;
unsigned long int __pad1;
unsigned long int __pad2;
/* FLAGS must stay at this position in the structure to maintain
binary compatibility. */
unsigned int __flags;
#else
unsigned char __pad1;
unsigned char __pad2;
unsigned char __shared;
/* FLAGS must stay at this position in the structure to maintain
binary compatibility. */
unsigned char __flags;
int __cur_writer;
#endif
};
#define __PTHREAD_RWLOCK_ELISION_EXTRA 0
#endif /* bits/pthreadtypes.h */

View File

@ -1398,6 +1398,9 @@ pub const LibExeObjStep = struct {
/// Uses system Wasmtime installation to run cross compiled wasm/wasi build artifacts.
enable_wasmtime: bool = false,
/// Experimental. Uses system Darling installation to run cross compiled macOS build artifacts.
enable_darling: bool = false,
/// After following the steps in https://github.com/ziglang/zig/wiki/Updating-libc#glibc,
/// this will be the directory $glibc-build-dir/install/glibcs
/// Given the example of the aarch64 target, this is the directory
@ -2514,6 +2517,11 @@ pub const LibExeObjStep = struct {
try zig_args.append("--dir=.");
try zig_args.append("--test-cmd-bin");
},
.darling => |bin_name| if (self.enable_darling) {
try zig_args.append("--test-cmd");
try zig_args.append(bin_name);
try zig_args.append("--test-cmd-bin");
},
}
for (self.packages.items) |pkg| {

View File

@ -309,6 +309,7 @@ const RED = "\x1b[31;1m";
const GREEN = "\x1b[32;1m";
const CYAN = "\x1b[36;1m";
const WHITE = "\x1b[37;1m";
const BOLD = "\x1b[1m";
const DIM = "\x1b[2m";
const RESET = "\x1b[0m";
@ -479,8 +480,9 @@ pub const TTY = struct {
.Red => out_stream.writeAll(RED) catch return,
.Green => out_stream.writeAll(GREEN) catch return,
.Cyan => out_stream.writeAll(CYAN) catch return,
.White, .Bold => out_stream.writeAll(WHITE) catch return,
.White => out_stream.writeAll(WHITE) catch return,
.Dim => out_stream.writeAll(DIM) catch return,
.Bold => out_stream.writeAll(BOLD) catch return,
.Reset => out_stream.writeAll(RESET) catch return,
},
.windows_api => if (native_os == .windows) {
@ -632,7 +634,7 @@ fn printLineInfo(
comptime printLineFromFile: anytype,
) !void {
nosuspend {
tty_config.setColor(out_stream, .White);
tty_config.setColor(out_stream, .Bold);
if (line_info) |*li| {
try out_stream.print("{s}:{d}:{d}", .{ li.file_name, li.line, li.column });

View File

@ -501,7 +501,9 @@ pub const Dir = struct {
},
.linux => struct {
dir: Dir,
buf: [8192]u8, // TODO align(@alignOf(os.dirent64)),
// The if guard is solely there to prevent compile errors from missing `os.linux.dirent64`
// definition when compiling for other OSes. It doesn't do anything when compiling for Linux.
buf: [8192]u8 align(if (builtin.os.tag != .linux) 1 else @alignOf(os.linux.dirent64)),
index: usize,
end_index: usize,

View File

@ -303,29 +303,32 @@ pub fn HashMapUnmanaged(
/// Metadata for a slot. It can be in three states: empty, used or
/// tombstone. Tombstones indicate that an entry was previously used,
/// they are a simple way to handle removal.
/// To this state, we add 6 bits from the slot's key hash. These are
/// To this state, we add 7 bits from the slot's key hash. These are
/// used as a fast way to disambiguate between entries without
/// having to use the equality function. If two fingerprints are
/// different, we know that we don't have to compare the keys at all.
/// The 6 bits are the highest ones from a 64 bit hash. This way, not
/// The 7 bits are the highest ones from a 64 bit hash. This way, not
/// only we use the `log2(capacity)` lowest bits from the hash to determine
/// a slot index, but we use 6 more bits to quickly resolve collisions
/// when multiple elements with different hashes end up wanting to be in / the same slot.
/// a slot index, but we use 7 more bits to quickly resolve collisions
/// when multiple elements with different hashes end up wanting to be in the same slot.
/// Not using the equality function means we don't have to read into
/// the entries array, avoiding a likely cache miss.
/// the entries array, likely avoiding a cache miss and a potentially
/// costly function call.
const Metadata = packed struct {
const FingerPrint = u6;
const FingerPrint = u7;
const free: FingerPrint = 0;
const tombstone: FingerPrint = 1;
fingerprint: FingerPrint = free,
used: u1 = 0,
tombstone: u1 = 0,
fingerprint: FingerPrint = 0,
pub fn isUsed(self: Metadata) bool {
return self.used == 1;
}
pub fn isTombstone(self: Metadata) bool {
return self.tombstone == 1;
return !self.isUsed() and self.fingerprint == tombstone;
}
pub fn takeFingerprint(hash: Hash) FingerPrint {
@ -336,14 +339,12 @@ pub fn HashMapUnmanaged(
pub fn fill(self: *Metadata, fp: FingerPrint) void {
self.used = 1;
self.tombstone = 0;
self.fingerprint = fp;
}
pub fn remove(self: *Metadata) void {
self.used = 0;
self.tombstone = 1;
self.fingerprint = 0;
self.fingerprint = tombstone;
}
};

View File

@ -623,7 +623,7 @@ pub const StreamingParser = struct {
.ObjectSeparator => switch (c) {
':' => {
p.state = .ValueBegin;
p.state = .ValueBeginNoClosing;
p.after_string_state = .ValueEnd;
},
0x09, 0x0A, 0x0D, 0x20 => {
@ -1205,6 +1205,13 @@ test "json.token mismatched close" {
try testing.expectError(error.UnexpectedClosingBrace, p.next());
}
test "json.token premature object close" {
var p = TokenStream.init("{ \"key\": }");
try checkNext(&p, .ObjectBegin);
try checkNext(&p, .String);
try testing.expectError(error.InvalidValueBegin, p.next());
}
/// Validate a JSON string. This does not limit number precision so a decoder may not necessarily
/// be able to decode the string even if this returns true.
pub fn validate(s: []const u8) bool {
@ -1566,11 +1573,16 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
// .UseLast => {},
// }
if (options.duplicate_field_behavior == .UseFirst) {
// unconditonally ignore value. for comptime fields, this skips check against default_value
parseFree(field.field_type, try parse(field.field_type, tokens, options), options);
found = true;
break;
} else if (options.duplicate_field_behavior == .Error) {
return error.DuplicateJSONField;
} else if (options.duplicate_field_behavior == .UseLast) {
parseFree(field.field_type, @field(r, field.name), options);
if (!field.is_comptime) {
parseFree(field.field_type, @field(r, field.name), options);
}
fields_seen[i] = false;
}
}
@ -1724,7 +1736,9 @@ pub fn parseFree(comptime T: type, value: T, options: ParseOptions) void {
},
.Struct => |structInfo| {
inline for (structInfo.fields) |field| {
parseFree(field.field_type, @field(value, field.name), options);
if (!field.is_comptime) {
parseFree(field.field_type, @field(value, field.name), options);
}
}
},
.Array => |arrayInfo| {
@ -1901,14 +1915,19 @@ test "parse with comptime field" {
},
};
const r = try std.json.parse(T, &std.json.TokenStream.init(
const options = ParseOptions{
.allocator = std.testing.allocator,
};
const r = try parse(T, &TokenStream.init(
\\{
\\ "kind": "float",
\\ "b": 1.0
\\}
), .{
.allocator = std.testing.allocator,
});
), options);
// check that parseFree doesn't try to free comptime fields
parseFree(T, r, options);
}
}
@ -1995,17 +2014,33 @@ test "parse into struct with duplicate field" {
const ballast = try testing.allocator.alloc(u64, 1);
defer testing.allocator.free(ballast);
const options = ParseOptions{
const options_first = ParseOptions{
.allocator = testing.allocator,
.duplicate_field_behavior = .UseFirst
};
const options_last = ParseOptions{
.allocator = testing.allocator,
.duplicate_field_behavior = .UseLast,
};
const str = "{ \"a\": 1, \"a\": 0.25 }";
const T1 = struct { a: *u64 };
try testing.expectError(error.UnexpectedToken, parse(T1, &TokenStream.init(str), options));
// both .UseFirst and .UseLast should fail because second "a" value isn't a u64
try testing.expectError(error.UnexpectedToken, parse(T1, &TokenStream.init(str), options_first));
try testing.expectError(error.UnexpectedToken, parse(T1, &TokenStream.init(str), options_last));
const T2 = struct { a: f64 };
try testing.expectEqual(T2{ .a = 0.25 }, try parse(T2, &TokenStream.init(str), options));
try testing.expectEqual(T2{ .a = 1.0 }, try parse(T2, &TokenStream.init(str), options_first));
try testing.expectEqual(T2{ .a = 0.25 }, try parse(T2, &TokenStream.init(str), options_last));
const T3 = struct { comptime a: f64 = 1.0 };
// .UseFirst should succeed because second "a" value is unconditionally ignored (even though != 1.0)
const t3 = T3{ .a = 1.0 };
try testing.expectEqual(t3, try parse(T3, &TokenStream.init(str), options_first));
// .UseLast should fail because second "a" value is 0.25 which is not equal to default value of 1.0
try testing.expectError(error.UnexpectedValue, parse(T3, &TokenStream.init(str), options_last));
}
/// A non-stream JSON parser which constructs a tree of Value's.

View File

@ -76,6 +76,12 @@ test "y_trailing_comma_after_empty" {
);
}
test "n_object_closed_missing_value" {
try err(
\\{"a":}
);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
test "y_array_arraysWithSpaces" {

View File

@ -14,16 +14,20 @@ pub fn isNormal(x: anytype) bool {
switch (T) {
f16 => {
const bits = @bitCast(u16, x);
return (bits + 1024) & 0x7FFF >= 2048;
return (bits + (1 << 10)) & (maxInt(u16) >> 1) >= (1 << 11);
},
f32 => {
const bits = @bitCast(u32, x);
return (bits + 0x00800000) & 0x7FFFFFFF >= 0x01000000;
return (bits + (1 << 23)) & (maxInt(u32) >> 1) >= (1 << 24);
},
f64 => {
const bits = @bitCast(u64, x);
return (bits + (1 << 52)) & (maxInt(u64) >> 1) >= (1 << 53);
},
f128 => {
const bits = @bitCast(u128, x);
return (bits + (1 << 112)) & (maxInt(u128) >> 1) >= (1 << 113);
},
else => {
@compileError("isNormal not implemented for " ++ @typeName(T));
},
@ -34,10 +38,13 @@ test "math.isNormal" {
try expect(!isNormal(math.nan(f16)));
try expect(!isNormal(math.nan(f32)));
try expect(!isNormal(math.nan(f64)));
try expect(!isNormal(math.nan(f128)));
try expect(!isNormal(@as(f16, 0)));
try expect(!isNormal(@as(f32, 0)));
try expect(!isNormal(@as(f64, 0)));
try expect(!isNormal(@as(f128, 0)));
try expect(isNormal(@as(f16, 1.0)));
try expect(isNormal(@as(f32, 1.0)));
try expect(isNormal(@as(f64, 1.0)));
try expect(isNormal(@as(f128, 1.0)));
}

View File

@ -9,89 +9,89 @@
// https://git.musl-libc.org/cgit/musl/tree/src/math/scalbnf.c
// https://git.musl-libc.org/cgit/musl/tree/src/math/scalbn.c
const std = @import("../std.zig");
const std = @import("std");
const math = std.math;
const assert = std.debug.assert;
const expect = std.testing.expect;
/// Returns x * 2^n.
pub fn scalbn(x: anytype, n: i32) @TypeOf(x) {
const T = @TypeOf(x);
return switch (T) {
f32 => scalbn32(x, n),
f64 => scalbn64(x, n),
else => @compileError("scalbn not implemented for " ++ @typeName(T)),
};
}
var base = x;
var shift = n;
fn scalbn32(x: f32, n_: i32) f32 {
var y = x;
var n = n_;
const T = @TypeOf(base);
const IntT = std.meta.Int(.unsigned, @bitSizeOf(T));
if (@typeInfo(T) != .Float) {
@compileError("scalbn not implemented for " ++ @typeName(T));
}
if (n > 127) {
y *= 0x1.0p127;
n -= 127;
if (n > 1023) {
y *= 0x1.0p127;
n -= 127;
if (n > 127) {
n = 127;
}
const mantissa_bits = math.floatMantissaBits(T);
const exponent_bits = math.floatExponentBits(T);
const exponent_bias = (1 << (exponent_bits - 1)) - 1;
const exponent_min = 1 - exponent_bias;
const exponent_max = exponent_bias;
// fix double rounding errors in subnormal ranges
// https://git.musl-libc.org/cgit/musl/commit/src/math/scalbn.c?id=8c44a060243f04283ca68dad199aab90336141db
const scale_min_expo = exponent_min + mantissa_bits + 1;
const scale_min = @bitCast(T, @as(IntT, scale_min_expo + exponent_bias) << mantissa_bits);
const scale_max = @bitCast(T, @intCast(IntT, exponent_max + exponent_bias) << mantissa_bits);
// scale `shift` within floating point limits, if possible
// second pass is possible due to subnormal range
// third pass always results in +/-0.0 or +/-inf
if (shift > exponent_max) {
base *= scale_max;
shift -= exponent_max;
if (shift > exponent_max) {
base *= scale_max;
shift -= exponent_max;
if (shift > exponent_max) shift = exponent_max;
}
} else if (n < -126) {
y *= 0x1.0p-126 * 0x1.0p24;
n += 126 - 24;
if (n < -126) {
y *= 0x1.0p-126 * 0x1.0p24;
n += 126 - 24;
if (n < -126) {
n = -126;
}
} else if (shift < exponent_min) {
base *= scale_min;
shift -= scale_min_expo;
if (shift < exponent_min) {
base *= scale_min;
shift -= scale_min_expo;
if (shift < exponent_min) shift = exponent_min;
}
}
const u = @intCast(u32, n +% 0x7F) << 23;
return y * @bitCast(f32, u);
}
fn scalbn64(x: f64, n_: i32) f64 {
var y = x;
var n = n_;
if (n > 1023) {
y *= 0x1.0p1023;
n -= 1023;
if (n > 1023) {
y *= 0x1.0p1023;
n -= 1023;
if (n > 1023) {
n = 1023;
}
}
} else if (n < -1022) {
y *= 0x1.0p-1022 * 0x1.0p53;
n += 1022 - 53;
if (n < -1022) {
y *= 0x1.0p-1022 * 0x1.0p53;
n += 1022 - 53;
if (n < -1022) {
n = -1022;
}
}
}
const u = @intCast(u64, n +% 0x3FF) << 52;
return y * @bitCast(f64, u);
return base * @bitCast(T, @intCast(IntT, shift + exponent_bias) << mantissa_bits);
}
test "math.scalbn" {
try expect(scalbn(@as(f32, 1.5), 4) == scalbn32(1.5, 4));
try expect(scalbn(@as(f64, 1.5), 4) == scalbn64(1.5, 4));
}
// basic usage
try expect(scalbn(@as(f16, 1.5), 4) == 24.0);
try expect(scalbn(@as(f32, 1.5), 4) == 24.0);
try expect(scalbn(@as(f64, 1.5), 4) == 24.0);
try expect(scalbn(@as(f128, 1.5), 4) == 24.0);
test "math.scalbn32" {
try expect(scalbn32(1.5, 4) == 24.0);
}
// subnormals
try expect(math.isNormal(scalbn(@as(f16, 1.0), -14)));
try expect(!math.isNormal(scalbn(@as(f16, 1.0), -15)));
try expect(math.isNormal(scalbn(@as(f32, 1.0), -126)));
try expect(!math.isNormal(scalbn(@as(f32, 1.0), -127)));
try expect(math.isNormal(scalbn(@as(f64, 1.0), -1022)));
try expect(!math.isNormal(scalbn(@as(f64, 1.0), -1023)));
try expect(math.isNormal(scalbn(@as(f128, 1.0), -16382)));
try expect(!math.isNormal(scalbn(@as(f128, 1.0), -16383)));
// unreliable due to lack of native f16 support, see talk on PR #8733
// try expect(scalbn(@as(f16, 0x1.1FFp-1), -14 - 9) == math.f16_true_min);
try expect(scalbn(@as(f32, 0x1.3FFFFFp-1), -126 - 22) == math.f32_true_min);
try expect(scalbn(@as(f64, 0x1.7FFFFFFFFFFFFp-1), -1022 - 51) == math.f64_true_min);
try expect(scalbn(@as(f128, 0x1.7FFFFFFFFFFFFFFFFFFFFFFFFFFFp-1), -16382 - 111) == math.f128_true_min);
test "math.scalbn64" {
try expect(scalbn64(1.5, 4) == 24.0);
// float limits
try expect(scalbn(@as(f32, math.f32_max), -128 - 149) > 0.0);
try expect(scalbn(@as(f32, math.f32_max), -128 - 149 - 1) == 0.0);
try expect(!math.isPositiveInf(scalbn(@as(f16, math.f16_true_min), 15 + 24)));
try expect(math.isPositiveInf(scalbn(@as(f16, math.f16_true_min), 15 + 24 + 1)));
try expect(!math.isPositiveInf(scalbn(@as(f32, math.f32_true_min), 127 + 149)));
try expect(math.isPositiveInf(scalbn(@as(f32, math.f32_true_min), 127 + 149 + 1)));
try expect(!math.isPositiveInf(scalbn(@as(f64, math.f64_true_min), 1023 + 1074)));
try expect(math.isPositiveInf(scalbn(@as(f64, math.f64_true_min), 1023 + 1074 + 1)));
try expect(!math.isPositiveInf(scalbn(@as(f128, math.f128_true_min), 16383 + 16494)));
try expect(math.isPositiveInf(scalbn(@as(f128, math.f128_true_min), 16383 + 16494 + 1)));
}

View File

@ -52,6 +52,7 @@ pub const ENOPROTOOPT = 42;
pub const EPROTONOSUPPORT = 43;
pub const ESOCKTNOSUPPORT = 44;
pub const EOPNOTSUPP = 45;
pub const ENOTSUP = EOPNOTSUPP;
pub const EPFNOSUPPORT = 46;
pub const EAFNOSUPPORT = 47;
pub const EADDRINUSE = 48;

View File

@ -297,7 +297,17 @@ comptime {
@export(@import("compiler_rt/sparc.zig")._Qp_fgt, .{ .name = "_Qp_fgt", .linkage = linkage });
@export(@import("compiler_rt/sparc.zig")._Qp_fge, .{ .name = "_Qp_fge", .linkage = linkage });
@export(@import("compiler_rt/sparc.zig")._Qp_itoq, .{ .name = "_Qp_itoq", .linkage = linkage });
@export(@import("compiler_rt/sparc.zig")._Qp_uitoq, .{ .name = "_Qp_uitoq", .linkage = linkage });
@export(@import("compiler_rt/sparc.zig")._Qp_xtoq, .{ .name = "_Qp_xtoq", .linkage = linkage });
@export(@import("compiler_rt/sparc.zig")._Qp_uxtoq, .{ .name = "_Qp_uxtoq", .linkage = linkage });
@export(@import("compiler_rt/sparc.zig")._Qp_stoq, .{ .name = "_Qp_stoq", .linkage = linkage });
@export(@import("compiler_rt/sparc.zig")._Qp_dtoq, .{ .name = "_Qp_dtoq", .linkage = linkage });
@export(@import("compiler_rt/sparc.zig")._Qp_qtoi, .{ .name = "_Qp_qtoi", .linkage = linkage });
@export(@import("compiler_rt/sparc.zig")._Qp_qtoui, .{ .name = "_Qp_qtoui", .linkage = linkage });
@export(@import("compiler_rt/sparc.zig")._Qp_qtox, .{ .name = "_Qp_qtox", .linkage = linkage });
@export(@import("compiler_rt/sparc.zig")._Qp_qtoux, .{ .name = "_Qp_qtoux", .linkage = linkage });
@export(@import("compiler_rt/sparc.zig")._Qp_qtos, .{ .name = "_Qp_qtos", .linkage = linkage });
@export(@import("compiler_rt/sparc.zig")._Qp_qtod, .{ .name = "_Qp_qtod", .linkage = linkage });
}

View File

@ -98,8 +98,8 @@ fn mulXf3(comptime T: type, a: T, b: T) T {
// one or both of a or b is denormal, the other (if applicable) is a
// normal number. Renormalize one or both of a and b, and set scale to
// include the necessary exponent adjustment.
if (aAbs < implicitBit) scale +%= normalize(T, &aSignificand);
if (bAbs < implicitBit) scale +%= normalize(T, &bSignificand);
if (aAbs < implicitBit) scale += normalize(T, &aSignificand);
if (bAbs < implicitBit) scale += normalize(T, &bSignificand);
}
// Or in the implicit significand bit. (If we fell through from the
@ -277,7 +277,7 @@ fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T
const shift = @clz(Z, significand.*) - @clz(Z, implicitBit);
significand.* <<= @intCast(std.math.Log2Int(Z), shift);
return 1 - shift;
return @as(i32, 1) - shift;
}
fn wideRightShiftWithSticky(comptime Z: type, hi: *Z, lo: *Z, count: u32) void {
@ -285,15 +285,15 @@ fn wideRightShiftWithSticky(comptime Z: type, hi: *Z, lo: *Z, count: u32) void {
const typeWidth = @typeInfo(Z).Int.bits;
const S = std.math.Log2Int(Z);
if (count < typeWidth) {
const sticky = @truncate(u8, lo.* << @intCast(S, typeWidth -% count));
const sticky = @boolToInt((lo.* << @intCast(S, typeWidth -% count)) != 0);
lo.* = (hi.* << @intCast(S, typeWidth -% count)) | (lo.* >> @intCast(S, count)) | sticky;
hi.* = hi.* >> @intCast(S, count);
} else if (count < 2 * typeWidth) {
const sticky = @truncate(u8, hi.* << @intCast(S, 2 * typeWidth -% count) | lo.*);
const sticky = @boolToInt((hi.* << @intCast(S, 2 * typeWidth -% count) | lo.*) != 0);
lo.* = hi.* >> @intCast(S, count -% typeWidth) | sticky;
hi.* = 0;
} else {
const sticky = @truncate(u8, hi.* | lo.*);
const sticky = @boolToInt((hi.* | lo.*) != 0);
lo.* = sticky;
hi.* = 0;
}

View File

@ -88,4 +88,18 @@ test "multf3" {
);
try test__multf3(0x1.23456734245345p-10000, 0x1.edcba524498724p-6497, 0x0, 0x0);
// Denormal operands.
try test__multf3(
0x0.0000000000000000000000000001p-16382,
0x1.p16383,
0x3f90000000000000,
0x0,
);
try test__multf3(
0x1.p16383,
0x0.0000000000000000000000000001p-16382,
0x3f90000000000000,
0x0,
);
}

View File

@ -68,12 +68,52 @@ pub fn _Qp_fge(a: *f128, b: *f128) callconv(.C) bool {
return cmp == @enumToInt(FCMP.Greater) or cmp == @enumToInt(FCMP.Equal);
}
// Casting
// Conversion
pub fn _Qp_itoq(c: *f128, a: i32) callconv(.C) void {
c.* = @import("floatsiXf.zig").__floatsitf(a);
}
pub fn _Qp_uitoq(c: *f128, a: u32) callconv(.C) void {
c.* = @import("floatunsitf.zig").__floatunsitf(a);
}
pub fn _Qp_xtoq(c: *f128, a: i64) callconv(.C) void {
c.* = @import("floatditf.zig").__floatditf(a);
}
pub fn _Qp_uxtoq(c: *f128, a: u64) callconv(.C) void {
c.* = @import("floatunditf.zig").__floatunditf(a);
}
pub fn _Qp_stoq(c: *f128, a: f32) callconv(.C) void {
c.* = @import("extendXfYf2.zig").__extendsftf2(a);
}
pub fn _Qp_dtoq(c: *f128, a: f64) callconv(.C) void {
c.* = @import("extendXfYf2.zig").__extenddftf2(a);
}
pub fn _Qp_qtoi(a: *f128) callconv(.C) i32 {
return @import("fixtfsi.zig").__fixtfsi(a.*);
}
pub fn _Qp_qtoui(a: *f128) callconv(.C) u32 {
return @import("fixunstfsi.zig").__fixunstfsi(a.*);
}
pub fn _Qp_qtox(a: *f128) callconv(.C) i64 {
return @import("fixtfdi.zig").__fixtfdi(a.*);
}
pub fn _Qp_qtoux(a: *f128) callconv(.C) u64 {
return @import("fixunstfdi.zig").__fixunstfdi(a.*);
}
pub fn _Qp_qtos(a: *f128) callconv(.C) f32 {
return @import("truncXfYf2.zig").__trunctfsf2(a.*);
}
pub fn _Qp_qtod(a: *f128) callconv(.C) f64 {
return @import("truncXfYf2.zig").__trunctfdf2(a.*);
}

View File

@ -46,7 +46,7 @@ comptime {
} else if (builtin.output_mode == .Exe or @hasDecl(root, "main")) {
if (builtin.link_libc and @hasDecl(root, "main")) {
if (@typeInfo(@TypeOf(root.main)).Fn.calling_convention != .C) {
@export(main, .{ .name = "main", .linkage = .Weak });
@export(main, .{ .name = "main" });
}
} else if (native_os == .windows) {
if (!@hasDecl(root, "WinMain") and !@hasDecl(root, "WinMainCRTStartup") and

View File

@ -431,6 +431,7 @@ pub const Target = struct {
pub const powerpc = @import("target/powerpc.zig");
pub const riscv = @import("target/riscv.zig");
pub const sparc = @import("target/sparc.zig");
pub const spirv = @import("target/spirv.zig");
pub const systemz = @import("target/systemz.zig");
pub const ve = @import("target/ve.zig");
pub const wasm = @import("target/wasm.zig");
@ -594,7 +595,7 @@ pub const Target = struct {
pub const Set = struct {
ints: [usize_count]usize,
pub const needed_bit_count = 172;
pub const needed_bit_count = 288;
pub const byte_count = (needed_bit_count + 7) / 8;
pub const usize_count = (byte_count + (@sizeOf(usize) - 1)) / @sizeOf(usize);
pub const Index = std.math.Log2Int(std.meta.Int(.unsigned, usize_count * @bitSizeOf(usize)));
@ -822,6 +823,13 @@ pub const Target = struct {
};
}
pub fn isSPIRV(arch: Arch) bool {
return switch (arch) {
.spirv32, .spirv64 => true,
else => false,
};
}
pub fn parseCpuModel(arch: Arch, cpu_name: []const u8) !*const Cpu.Model {
for (arch.allCpuModels()) |cpu| {
if (mem.eql(u8, cpu_name, cpu.name)) {
@ -1116,6 +1124,7 @@ pub const Target = struct {
.amdgcn => &amdgpu.all_features,
.riscv32, .riscv64 => &riscv.all_features,
.sparc, .sparcv9, .sparcel => &sparc.all_features,
.spirv32, .spirv64 => &spirv.all_features,
.s390x => &systemz.all_features,
.i386, .x86_64 => &x86.all_features,
.nvptx, .nvptx64 => &nvptx.all_features,
@ -1320,6 +1329,9 @@ pub const Target = struct {
if (cpu_arch.isWasm()) {
return .wasm;
}
if (cpu_arch.isSPIRV()) {
return .spirv;
}
return .elf;
}

2135
lib/std/target/spirv.zig Normal file

File diff suppressed because it is too large Load Diff

View File

@ -29,11 +29,11 @@ pub var zig_exe_path: []const u8 = undefined;
/// and then aborts when actual_error_union is not expected_error.
pub fn expectError(expected_error: anyerror, actual_error_union: anytype) !void {
if (actual_error_union) |actual_payload| {
std.debug.print("expected error.{s}, found {any}", .{ @errorName(expected_error), actual_payload });
std.debug.print("expected error.{s}, found {any}\n", .{ @errorName(expected_error), actual_payload });
return error.TestUnexpectedError;
} else |actual_error| {
if (expected_error != actual_error) {
std.debug.print("expected error.{s}, found error.{s}", .{
std.debug.print("expected error.{s}, found error.{s}\n", .{
@errorName(expected_error),
@errorName(actual_error),
});
@ -62,7 +62,7 @@ pub fn expectEqual(expected: anytype, actual: @TypeOf(expected)) !void {
.Type => {
if (actual != expected) {
std.debug.print("expected type {s}, found type {s}", .{ @typeName(expected), @typeName(actual) });
std.debug.print("expected type {s}, found type {s}\n", .{ @typeName(expected), @typeName(actual) });
return error.TestExpectedEqual;
}
},
@ -78,7 +78,7 @@ pub fn expectEqual(expected: anytype, actual: @TypeOf(expected)) !void {
.ErrorSet,
=> {
if (actual != expected) {
std.debug.print("expected {}, found {}", .{ expected, actual });
std.debug.print("expected {}, found {}\n", .{ expected, actual });
return error.TestExpectedEqual;
}
},
@ -87,17 +87,17 @@ pub fn expectEqual(expected: anytype, actual: @TypeOf(expected)) !void {
switch (pointer.size) {
.One, .Many, .C => {
if (actual != expected) {
std.debug.print("expected {*}, found {*}", .{ expected, actual });
std.debug.print("expected {*}, found {*}\n", .{ expected, actual });
return error.TestExpectedEqual;
}
},
.Slice => {
if (actual.ptr != expected.ptr) {
std.debug.print("expected slice ptr {*}, found {*}", .{ expected.ptr, actual.ptr });
std.debug.print("expected slice ptr {*}, found {*}\n", .{ expected.ptr, actual.ptr });
return error.TestExpectedEqual;
}
if (actual.len != expected.len) {
std.debug.print("expected slice len {}, found {}", .{ expected.len, actual.len });
std.debug.print("expected slice len {}, found {}\n", .{ expected.len, actual.len });
return error.TestExpectedEqual;
}
},
@ -110,7 +110,7 @@ pub fn expectEqual(expected: anytype, actual: @TypeOf(expected)) !void {
var i: usize = 0;
while (i < vectorType.len) : (i += 1) {
if (!std.meta.eql(expected[i], actual[i])) {
std.debug.print("index {} incorrect. expected {}, found {}", .{ i, expected[i], actual[i] });
std.debug.print("index {} incorrect. expected {}, found {}\n", .{ i, expected[i], actual[i] });
return error.TestExpectedEqual;
}
}
@ -153,12 +153,12 @@ pub fn expectEqual(expected: anytype, actual: @TypeOf(expected)) !void {
if (actual) |actual_payload| {
try expectEqual(expected_payload, actual_payload);
} else {
std.debug.print("expected {any}, found null", .{expected_payload});
std.debug.print("expected {any}, found null\n", .{expected_payload});
return error.TestExpectedEqual;
}
} else {
if (actual) |actual_payload| {
std.debug.print("expected null, found {any}", .{actual_payload});
std.debug.print("expected null, found {any}\n", .{actual_payload});
return error.TestExpectedEqual;
}
}
@ -169,12 +169,12 @@ pub fn expectEqual(expected: anytype, actual: @TypeOf(expected)) !void {
if (actual) |actual_payload| {
try expectEqual(expected_payload, actual_payload);
} else |actual_err| {
std.debug.print("expected {any}, found {}", .{ expected_payload, actual_err });
std.debug.print("expected {any}, found {}\n", .{ expected_payload, actual_err });
return error.TestExpectedEqual;
}
} else |expected_err| {
if (actual) |actual_payload| {
std.debug.print("expected {}, found {any}", .{ expected_err, actual_payload });
std.debug.print("expected {}, found {any}\n", .{ expected_err, actual_payload });
return error.TestExpectedEqual;
} else |actual_err| {
try expectEqual(expected_err, actual_err);
@ -225,7 +225,7 @@ pub fn expectApproxEqAbs(expected: anytype, actual: @TypeOf(expected), tolerance
switch (@typeInfo(T)) {
.Float => if (!math.approxEqAbs(T, expected, actual, tolerance)) {
std.debug.print("actual {}, not within absolute tolerance {} of expected {}", .{ actual, tolerance, expected });
std.debug.print("actual {}, not within absolute tolerance {} of expected {}\n", .{ actual, tolerance, expected });
return error.TestExpectedApproxEqAbs;
},
@ -257,7 +257,7 @@ pub fn expectApproxEqRel(expected: anytype, actual: @TypeOf(expected), tolerance
switch (@typeInfo(T)) {
.Float => if (!math.approxEqRel(T, expected, actual, tolerance)) {
std.debug.print("actual {}, not within relative tolerance {} of expected {}", .{ actual, tolerance, expected });
std.debug.print("actual {}, not within relative tolerance {} of expected {}\n", .{ actual, tolerance, expected });
return error.TestExpectedApproxEqRel;
},
@ -292,13 +292,13 @@ pub fn expectEqualSlices(comptime T: type, expected: []const T, actual: []const
// If the child type is u8 and no weird bytes, we could print it as strings
// Even for the length difference, it would be useful to see the values of the slices probably.
if (expected.len != actual.len) {
std.debug.print("slice lengths differ. expected {d}, found {d}", .{ expected.len, actual.len });
std.debug.print("slice lengths differ. expected {d}, found {d}\n", .{ expected.len, actual.len });
return error.TestExpectedEqual;
}
var i: usize = 0;
while (i < expected.len) : (i += 1) {
if (!std.meta.eql(expected[i], actual[i])) {
std.debug.print("index {} incorrect. expected {any}, found {any}", .{ i, expected[i], actual[i] });
std.debug.print("index {} incorrect. expected {any}, found {any}\n", .{ i, expected[i], actual[i] });
return error.TestExpectedEqual;
}
}

View File

@ -606,6 +606,7 @@ pub const CrossTarget = struct {
qemu: []const u8,
wine: []const u8,
wasmtime: []const u8,
darling: []const u8,
unavailable,
};
@ -667,6 +668,15 @@ pub const CrossTarget = struct {
32 => return Executor{ .wasmtime = "wasmtime" },
else => return .unavailable,
},
.macos => {
// TODO loosen this check once upstream adds QEMU-based emulation
// layer for non-host architectures:
// https://github.com/darlinghq/darling/issues/863
if (cpu_arch != Target.current.cpu.arch) {
return .unavailable;
}
return Executor{ .darling = "darling" };
},
else => return .unavailable,
}
}

View File

@ -1309,9 +1309,8 @@ const Parser = struct {
return expr;
}
/// Expr <- BoolOrExpr
fn parseExpr(p: *Parser) Error!Node.Index {
return p.parseBoolOrExpr();
return p.parseExprPrecedence(0);
}
fn expectExpr(p: *Parser) Error!Node.Index {
@ -1323,263 +1322,100 @@ const Parser = struct {
}
}
/// BoolOrExpr <- BoolAndExpr (KEYWORD_or BoolAndExpr)*
fn parseBoolOrExpr(p: *Parser) Error!Node.Index {
var res = try p.parseBoolAndExpr();
if (res == 0) return null_node;
const Assoc = enum {
left,
none,
};
while (true) {
switch (p.token_tags[p.tok_i]) {
.keyword_or => {
const or_token = p.nextToken();
const rhs = try p.parseBoolAndExpr();
if (rhs == 0) {
return p.fail(.invalid_token);
}
res = try p.addNode(.{
.tag = .bool_or,
.main_token = or_token,
.data = .{
.lhs = res,
.rhs = rhs,
},
});
},
else => return res,
}
const OperInfo = struct {
prec: i8,
tag: Node.Tag,
assoc: Assoc = Assoc.left,
};
// A table of binary operator information. Higher precedence numbers are
// stickier. All operators at the same precedence level should have the same
// associativity.
const operTable = std.enums.directEnumArrayDefault(Token.Tag, OperInfo, .{ .prec = -1, .tag = Node.Tag.root }, 0, .{
.keyword_or = .{ .prec = 10, .tag = .bool_or },
.keyword_and = .{ .prec = 20, .tag = .bool_and },
.invalid_ampersands = .{ .prec = 20, .tag = .bool_and },
.equal_equal = .{ .prec = 30, .tag = .equal_equal, .assoc = Assoc.none },
.bang_equal = .{ .prec = 30, .tag = .bang_equal, .assoc = Assoc.none },
.angle_bracket_left = .{ .prec = 30, .tag = .less_than, .assoc = Assoc.none },
.angle_bracket_right = .{ .prec = 30, .tag = .greater_than, .assoc = Assoc.none },
.angle_bracket_left_equal = .{ .prec = 30, .tag = .less_or_equal, .assoc = Assoc.none },
.angle_bracket_right_equal = .{ .prec = 30, .tag = .greater_or_equal, .assoc = Assoc.none },
.ampersand = .{ .prec = 40, .tag = .bit_and },
.caret = .{ .prec = 40, .tag = .bit_xor },
.pipe = .{ .prec = 40, .tag = .bit_or },
.keyword_orelse = .{ .prec = 40, .tag = .@"orelse" },
.keyword_catch = .{ .prec = 40, .tag = .@"catch" },
.angle_bracket_angle_bracket_left = .{ .prec = 50, .tag = .bit_shift_left },
.angle_bracket_angle_bracket_right = .{ .prec = 50, .tag = .bit_shift_right },
.plus = .{ .prec = 60, .tag = .add },
.minus = .{ .prec = 60, .tag = .sub },
.plus_plus = .{ .prec = 60, .tag = .array_cat },
.plus_percent = .{ .prec = 60, .tag = .add_wrap },
.minus_percent = .{ .prec = 60, .tag = .sub_wrap },
.pipe_pipe = .{ .prec = 70, .tag = .merge_error_sets },
.asterisk = .{ .prec = 70, .tag = .mul },
.slash = .{ .prec = 70, .tag = .div },
.percent = .{ .prec = 70, .tag = .mod },
.asterisk_asterisk = .{ .prec = 70, .tag = .array_mult },
.asterisk_percent = .{ .prec = 70, .tag = .mul_wrap },
});
fn parseExprPrecedence(p: *Parser, min_prec: i32) Error!Node.Index {
var node = try p.parsePrefixExpr();
if (node == 0) {
return null_node;
}
}
/// BoolAndExpr <- CompareExpr (KEYWORD_and CompareExpr)*
fn parseBoolAndExpr(p: *Parser) !Node.Index {
var res = try p.parseCompareExpr();
if (res == 0) return null_node;
var banned_prec: i8 = -1;
while (true) {
switch (p.token_tags[p.tok_i]) {
.keyword_and => {
const and_token = p.nextToken();
const rhs = try p.parseCompareExpr();
if (rhs == 0) {
return p.fail(.invalid_token);
}
res = try p.addNode(.{
.tag = .bool_and,
.main_token = and_token,
.data = .{
.lhs = res,
.rhs = rhs,
},
});
const tok_tag = p.token_tags[p.tok_i];
const info = operTable[@intCast(usize, @enumToInt(tok_tag))];
if (info.prec < min_prec or info.prec == banned_prec) {
break;
}
const oper_token = p.nextToken();
// Special-case handling for "catch" and "&&".
switch (tok_tag) {
.keyword_catch => {
_ = try p.parsePayload();
},
.invalid_ampersands => {
try p.warn(.invalid_and);
p.tok_i += 1;
return p.parseCompareExpr();
},
else => return res,
else => {},
}
const rhs = try p.parseExprPrecedence(info.prec + 1);
if (rhs == 0) {
return p.fail(.invalid_token);
}
node = try p.addNode(.{
.tag = info.tag,
.main_token = oper_token,
.data = .{
.lhs = node,
.rhs = rhs,
},
});
if (info.assoc == Assoc.none) {
banned_prec = info.prec;
}
}
}
/// CompareExpr <- BitwiseExpr (CompareOp BitwiseExpr)?
/// CompareOp
/// <- EQUALEQUAL
/// / EXCLAMATIONMARKEQUAL
/// / LARROW
/// / RARROW
/// / LARROWEQUAL
/// / RARROWEQUAL
fn parseCompareExpr(p: *Parser) !Node.Index {
const expr = try p.parseBitwiseExpr();
if (expr == 0) return null_node;
const tag: Node.Tag = switch (p.token_tags[p.tok_i]) {
.equal_equal => .equal_equal,
.bang_equal => .bang_equal,
.angle_bracket_left => .less_than,
.angle_bracket_right => .greater_than,
.angle_bracket_left_equal => .less_or_equal,
.angle_bracket_right_equal => .greater_or_equal,
else => return expr,
};
return p.addNode(.{
.tag = tag,
.main_token = p.nextToken(),
.data = .{
.lhs = expr,
.rhs = try p.expectBitwiseExpr(),
},
});
}
/// BitwiseExpr <- BitShiftExpr (BitwiseOp BitShiftExpr)*
/// BitwiseOp
/// <- AMPERSAND
/// / CARET
/// / PIPE
/// / KEYWORD_orelse
/// / KEYWORD_catch Payload?
fn parseBitwiseExpr(p: *Parser) !Node.Index {
var res = try p.parseBitShiftExpr();
if (res == 0) return null_node;
while (true) {
const tag: Node.Tag = switch (p.token_tags[p.tok_i]) {
.ampersand => .bit_and,
.caret => .bit_xor,
.pipe => .bit_or,
.keyword_orelse => .@"orelse",
.keyword_catch => {
const catch_token = p.nextToken();
_ = try p.parsePayload();
const rhs = try p.parseBitShiftExpr();
if (rhs == 0) {
return p.fail(.invalid_token);
}
res = try p.addNode(.{
.tag = .@"catch",
.main_token = catch_token,
.data = .{
.lhs = res,
.rhs = rhs,
},
});
continue;
},
else => return res,
};
res = try p.addNode(.{
.tag = tag,
.main_token = p.nextToken(),
.data = .{
.lhs = res,
.rhs = try p.expectBitShiftExpr(),
},
});
}
}
fn expectBitwiseExpr(p: *Parser) Error!Node.Index {
const node = try p.parseBitwiseExpr();
if (node == 0) {
return p.fail(.invalid_token);
} else {
return node;
}
}
/// BitShiftExpr <- AdditionExpr (BitShiftOp AdditionExpr)*
/// BitShiftOp
/// <- LARROW2
/// / RARROW2
fn parseBitShiftExpr(p: *Parser) Error!Node.Index {
var res = try p.parseAdditionExpr();
if (res == 0) return null_node;
while (true) {
const tag: Node.Tag = switch (p.token_tags[p.tok_i]) {
.angle_bracket_angle_bracket_left => .bit_shift_left,
.angle_bracket_angle_bracket_right => .bit_shift_right,
else => return res,
};
res = try p.addNode(.{
.tag = tag,
.main_token = p.nextToken(),
.data = .{
.lhs = res,
.rhs = try p.expectAdditionExpr(),
},
});
}
}
fn expectBitShiftExpr(p: *Parser) Error!Node.Index {
const node = try p.parseBitShiftExpr();
if (node == 0) {
return p.fail(.invalid_token);
} else {
return node;
}
}
/// AdditionExpr <- MultiplyExpr (AdditionOp MultiplyExpr)*
/// AdditionOp
/// <- PLUS
/// / MINUS
/// / PLUS2
/// / PLUSPERCENT
/// / MINUSPERCENT
fn parseAdditionExpr(p: *Parser) Error!Node.Index {
var res = try p.parseMultiplyExpr();
if (res == 0) return null_node;
while (true) {
const tag: Node.Tag = switch (p.token_tags[p.tok_i]) {
.plus => .add,
.minus => .sub,
.plus_plus => .array_cat,
.plus_percent => .add_wrap,
.minus_percent => .sub_wrap,
else => return res,
};
res = try p.addNode(.{
.tag = tag,
.main_token = p.nextToken(),
.data = .{
.lhs = res,
.rhs = try p.expectMultiplyExpr(),
},
});
}
}
fn expectAdditionExpr(p: *Parser) Error!Node.Index {
const node = try p.parseAdditionExpr();
if (node == 0) {
return p.fail(.invalid_token);
}
return node;
}
/// MultiplyExpr <- PrefixExpr (MultiplyOp PrefixExpr)*
/// MultiplyOp
/// <- PIPE2
/// / ASTERISK
/// / SLASH
/// / PERCENT
/// / ASTERISK2
/// / ASTERISKPERCENT
fn parseMultiplyExpr(p: *Parser) Error!Node.Index {
var res = try p.parsePrefixExpr();
if (res == 0) return null_node;
while (true) {
const tag: Node.Tag = switch (p.token_tags[p.tok_i]) {
.pipe_pipe => .merge_error_sets,
.asterisk => .mul,
.slash => .div,
.percent => .mod,
.asterisk_asterisk => .array_mult,
.asterisk_percent => .mul_wrap,
else => return res,
};
res = try p.addNode(.{
.tag = tag,
.main_token = p.nextToken(),
.data = .{
.lhs = res,
.rhs = try p.expectPrefixExpr(),
},
});
}
}
fn expectMultiplyExpr(p: *Parser) Error!Node.Index {
const node = try p.parseMultiplyExpr();
if (node == 0) {
return p.fail(.invalid_token);
}
return node;
}

View File

@ -2828,6 +2828,7 @@ test "zig fmt: precedence" {
\\ a or b and c;
\\ (a or b) and c;
\\ (a or b) and c;
\\ a == b and c == d;
\\}
\\
);
@ -4892,6 +4893,16 @@ test "recovery: missing comma" {
});
}
test "recovery: non-associative operators" {
try testError(
\\const x = a == b == c;
\\const x = a == b != c;
, &[_]Error{
.expected_token,
.expected_token,
});
}
test "recovery: extra qualifier" {
try testError(
\\const a: *const const u8;

View File

@ -638,6 +638,7 @@ pub const InitOptions = struct {
system_libs: []const []const u8 = &[0][]const u8{},
link_libc: bool = false,
link_libcpp: bool = false,
link_libunwind: bool = false,
want_pic: ?bool = null,
/// This means that if the output mode is an executable it will be a
/// Position Independent Executable. If the output mode is not an
@ -885,8 +886,13 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
};
const tsan = options.want_tsan orelse false;
// TSAN is implemented in C++ so it requires linking libc++.
const link_libcpp = options.link_libcpp or tsan;
const link_libc = link_libcpp or options.link_libc or
target_util.osRequiresLibC(options.target);
const link_libc = options.link_libc or target_util.osRequiresLibC(options.target) or tsan;
const link_libunwind = options.link_libunwind or
(link_libcpp and target_util.libcNeedsLibUnwind(options.target));
const must_dynamic_link = dl: {
if (target_util.cannotDynamicLink(options.target))
@ -972,9 +978,6 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
break :pic explicit;
} else pie or must_pic;
// TSAN is implemented in C++ so it requires linking libc++.
const link_libcpp = options.link_libcpp or tsan;
// Make a decision on whether to use Clang for translate-c and compiling C files.
const use_clang = if (options.use_clang) |explicit| explicit else blk: {
if (build_options.have_llvm) {
@ -1067,6 +1070,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
cache.hash.add(strip);
cache.hash.add(link_libc);
cache.hash.add(link_libcpp);
cache.hash.add(link_libunwind);
cache.hash.add(options.output_mode);
cache.hash.add(options.machine_code_model);
cache.hash.addOptionalEmitLoc(options.emit_bin);
@ -1262,6 +1266,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
.system_linker_hack = darwin_options.system_linker_hack,
.link_libc = link_libc,
.link_libcpp = link_libcpp,
.link_libunwind = link_libunwind,
.objects = options.link_objects,
.frameworks = options.frameworks,
.framework_dirs = options.framework_dirs,
@ -2943,6 +2948,10 @@ pub fn addCCArgs(
try argv.appendSlice(&[_][]const u8{ "-MD", "-MV", "-MF", p });
}
if (target_util.clangMightShellOutForAssembly(target)) {
try argv.append("-integrated-as");
}
if (target.os.tag == .freestanding) {
try argv.append("-ffreestanding");
}
@ -3139,7 +3148,7 @@ fn detectLibCIncludeDirs(
if (is_native_abi) {
const libc = try arena.create(LibCInstallation);
libc.* = try LibCInstallation.findNative(.{ .allocator = arena });
libc.* = try LibCInstallation.findNative(.{ .allocator = arena, .verbose = true });
return detectLibCFromLibCInstallation(arena, target, libc);
}
@ -3276,9 +3285,8 @@ fn wantBuildLibUnwindFromSource(comp: *Compilation) bool {
.Lib => comp.bin_file.options.link_mode == .Dynamic,
.Exe => true,
};
return comp.bin_file.options.link_libc and is_exe_or_dyn_lib and
comp.bin_file.options.object_format != .c and
target_util.libcNeedsLibUnwind(comp.getTarget());
return is_exe_or_dyn_lib and comp.bin_file.options.link_libunwind and
comp.bin_file.options.object_format != .c;
}
fn updateBuiltinZigFile(comp: *Compilation, mod: *Module) Allocator.Error!void {

View File

@ -905,6 +905,11 @@ pub const TypedefNameDecl = opaque {
extern fn ZigClangTypedefNameDecl_getLocation(*const TypedefNameDecl) SourceLocation;
};
pub const FileScopeAsmDecl = opaque {
pub const getAsmString = ZigClangFileScopeAsmDecl_getAsmString;
extern fn ZigClangFileScopeAsmDecl_getAsmString(*const FileScopeAsmDecl) *const StringLiteral;
};
pub const TypedefType = opaque {
pub const getDecl = ZigClangTypedefType_getDecl;
extern fn ZigClangTypedefType_getDecl(*const TypedefType) *const TypedefNameDecl;

View File

@ -960,7 +960,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
/// allocated. A second call to `copyToTmpRegister` may return the same register.
/// This can have a side effect of spilling instructions to the stack to free up a register.
fn copyToTmpRegister(self: *Self, src: LazySrcLoc, ty: Type, mcv: MCValue) !Register {
const reg = try self.register_manager.allocRegWithoutTracking(&.{});
const reg = try self.register_manager.allocReg(null, &.{});
try self.genSetReg(src, ty, reg, mcv);
return reg;
}
@ -2231,7 +2231,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
switch (mc_arg) {
.none => continue,
.register => |reg| {
try self.register_manager.getRegWithoutTracking(reg);
try self.register_manager.getReg(reg, null);
try self.genSetReg(arg.src, arg.ty, reg, arg_mcv);
},
.stack_offset => {
@ -2327,7 +2327,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
.register => |reg| {
try self.register_manager.getRegWithoutTracking(reg);
try self.register_manager.getReg(reg, null);
try self.genSetReg(arg.src, arg.ty, reg, arg_mcv);
},
.stack_offset => {
@ -2390,7 +2390,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
.register => |reg| {
try self.register_manager.getRegWithoutTracking(reg);
try self.register_manager.getReg(reg, null);
try self.genSetReg(arg.src, arg.ty, reg, arg_mcv);
},
.stack_offset => {
@ -2443,7 +2443,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.register => |reg| {
// TODO prevent this macho if block to be generated for all archs
switch (arch) {
.x86_64, .aarch64 => try self.register_manager.getRegWithoutTracking(reg),
.x86_64, .aarch64 => try self.register_manager.getReg(reg, null),
else => unreachable,
}
try self.genSetReg(arg.src, arg.ty, reg, arg_mcv);
@ -3134,7 +3134,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const arg = inst.args[i];
const arg_mcv = try self.resolveInst(arg);
try self.register_manager.getRegWithoutTracking(reg);
try self.register_manager.getReg(reg, null);
try self.genSetReg(inst.base.src, arg.ty, reg, arg_mcv);
}
@ -3167,7 +3167,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const arg = inst.args[i];
const arg_mcv = try self.resolveInst(arg);
try self.register_manager.getRegWithoutTracking(reg);
try self.register_manager.getReg(reg, null);
try self.genSetReg(inst.base.src, arg.ty, reg, arg_mcv);
}
@ -3202,7 +3202,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const arg = inst.args[i];
const arg_mcv = try self.resolveInst(arg);
try self.register_manager.getRegWithoutTracking(reg);
try self.register_manager.getReg(reg, null);
try self.genSetReg(inst.base.src, arg.ty, reg, arg_mcv);
}
@ -3235,7 +3235,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const arg = inst.args[i];
const arg_mcv = try self.resolveInst(arg);
try self.register_manager.getRegWithoutTracking(reg);
try self.register_manager.getReg(reg, null);
try self.genSetReg(inst.base.src, arg.ty, reg, arg_mcv);
}

View File

@ -1,9 +1,13 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const log = std.log.scoped(.codegen);
const spec = @import("spirv/spec.zig");
const Module = @import("../Module.zig");
const Decl = Module.Decl;
const Type = @import("../type.zig").Type;
pub const TypeMap = std.HashMap(Type, u32, Type.hash, Type.eql, std.hash_map.default_max_load_percentage);
pub fn writeInstruction(code: *std.ArrayList(u32), instr: spec.Opcode, args: []const u32) !void {
const word_count = @intCast(u32, args.len + 1);
@ -12,38 +16,89 @@ pub fn writeInstruction(code: *std.ArrayList(u32), instr: spec.Opcode, args: []c
}
pub const SPIRVModule = struct {
next_id: u32 = 0,
free_id_list: std.ArrayList(u32),
next_result_id: u32 = 0,
pub fn init(allocator: *Allocator) SPIRVModule {
target: std.Target,
types: TypeMap,
types_and_globals: std.ArrayList(u32),
fn_decls: std.ArrayList(u32),
pub fn init(target: std.Target, allocator: *Allocator) SPIRVModule {
return .{
.free_id_list = std.ArrayList(u32).init(allocator),
.target = target,
.types = TypeMap.init(allocator),
.types_and_globals = std.ArrayList(u32).init(allocator),
.fn_decls = std.ArrayList(u32).init(allocator),
};
}
pub fn deinit(self: *SPIRVModule) void {
self.free_id_list.deinit();
self.fn_decls.deinit();
self.types_and_globals.deinit();
self.types.deinit();
self.* = undefined;
}
pub fn allocId(self: *SPIRVModule) u32 {
if (self.free_id_list.popOrNull()) |id| return id;
defer self.next_id += 1;
return self.next_id;
pub fn allocResultId(self: *SPIRVModule) u32 {
defer self.next_result_id += 1;
return self.next_result_id;
}
pub fn freeId(self: *SPIRVModule, id: u32) void {
if (id + 1 == self.next_id) {
self.next_id -= 1;
} else {
// If no more memory to append the id to the free list, just ignore it.
self.free_id_list.append(id) catch {};
pub fn resultIdBound(self: *SPIRVModule) u32 {
return self.next_result_id;
}
pub fn getOrGenType(self: *SPIRVModule, t: Type) !u32 {
// We can't use getOrPut here so we can recursively generate types.
if (self.types.get(t)) |already_generated| {
return already_generated;
}
const result = self.allocResultId();
switch (t.zigTypeTag()) {
.Void => try writeInstruction(&self.types_and_globals, .OpTypeVoid, &[_]u32{ result }),
.Bool => try writeInstruction(&self.types_and_globals, .OpTypeBool, &[_]u32{ result }),
.Int => {
const int_info = t.intInfo(self.target);
try writeInstruction(&self.types_and_globals, .OpTypeInt, &[_]u32{
result,
int_info.bits,
switch (int_info.signedness) {
.unsigned => 0,
.signed => 1,
},
});
},
// TODO: Verify that floatBits() will be correct.
.Float => try writeInstruction(&self.types_and_globals, .OpTypeFloat, &[_]u32{ result, t.floatBits(self.target) }),
.Null,
.Undefined,
.EnumLiteral,
.ComptimeFloat,
.ComptimeInt,
.Type,
=> unreachable, // Must be const or comptime.
.BoundFn => unreachable, // this type will be deleted from the language.
else => return error.TODO,
}
try self.types.put(t, result);
return result;
}
pub fn gen(self: *SPIRVModule, decl: *Decl) !void {
switch (decl.ty.zigTypeTag()) {
.Fn => {
log.debug("Generating code for function '{s}'", .{ std.mem.spanZ(decl.name) });
_ = try self.getOrGenType(decl.ty.fnReturnType());
},
else => return error.TODO,
}
}
pub fn idBound(self: *SPIRVModule) u32 {
return self.next_id;
}
pub fn genDecl(self: SPIRVModule, id: u32, code: *std.ArrayList(u32), decl: *Decl) !void {}
};

View File

@ -1,26 +1,5 @@
// Copyright (c) 2014-2020 The Khronos Group Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and/or associated documentation files (the "Materials"),
// to deal in the Materials without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Materials, and to permit persons to whom the
// Materials are furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Materials.
//
// MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
// STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
// HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
//
// THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
// IN THE MATERIALS.
//! This file is auto-generated by tools/gen_spirv_spec.zig.
const Version = @import("std").builtin.Version;
pub const version = Version{ .major = 1, .minor = 5, .patch = 4 };
pub const magic_number: u32 = 0x07230203;
@ -443,8 +422,15 @@ pub const Opcode = extern enum(u16) {
OpUSubSatINTEL = 5596,
OpIMul32x16INTEL = 5597,
OpUMul32x16INTEL = 5598,
OpFunctionPointerINTEL = 5600,
OpConstFunctionPointerINTEL = 5600,
OpFunctionPointerCallINTEL = 5601,
OpAsmTargetINTEL = 5609,
OpAsmINTEL = 5610,
OpAsmCallINTEL = 5611,
OpAtomicFMinEXT = 5614,
OpAtomicFMaxEXT = 5615,
OpAssumeTrueKHR = 5630,
OpExpectKHR = 5631,
OpDecorateString = 5632,
OpDecorateStringGOOGLE = 5632,
OpMemberDecorateString = 5633,
@ -567,7 +553,12 @@ pub const Opcode = extern enum(u16) {
OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL = 5814,
OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL = 5815,
OpSubgroupAvcSicGetInterRawSadsINTEL = 5816,
OpVariableLengthArrayINTEL = 5818,
OpSaveMemoryINTEL = 5819,
OpRestoreMemoryINTEL = 5820,
OpLoopControlINTEL = 5887,
OpPtrCastToCrossWorkgroupINTEL = 5934,
OpCrossWorkgroupCastToPtrINTEL = 5938,
OpReadPipeBlockingINTEL = 5946,
OpWritePipeBlockingINTEL = 5947,
OpFPGARegINTEL = 5949,
@ -589,6 +580,10 @@ pub const Opcode = extern enum(u16) {
OpRayQueryGetIntersectionObjectToWorldKHR = 6031,
OpRayQueryGetIntersectionWorldToObjectKHR = 6032,
OpAtomicFAddEXT = 6035,
OpTypeBufferSurfaceINTEL = 6086,
OpTypeStructContinuedINTEL = 6090,
OpConstantCompositeContinuedINTEL = 6091,
OpSpecConstantCompositeContinuedINTEL = 6092,
_,
};
pub const ImageOperands = packed struct {
@ -642,8 +637,8 @@ pub const FPFastMathMode = packed struct {
_reserved_bit_13: bool = false,
_reserved_bit_14: bool = false,
_reserved_bit_15: bool = false,
_reserved_bit_16: bool = false,
_reserved_bit_17: bool = false,
AllowContractFastINTEL: bool = false,
AllowReassocINTEL: bool = false,
_reserved_bit_18: bool = false,
_reserved_bit_19: bool = false,
_reserved_bit_20: bool = false,
@ -717,7 +712,7 @@ pub const LoopControl = packed struct {
LoopCoalesceINTEL: bool = false,
MaxInterleavingINTEL: bool = false,
SpeculatedIterationsINTEL: bool = false,
_reserved_bit_23: bool = false,
NoFusionINTEL: bool = false,
_reserved_bit_24: bool = false,
_reserved_bit_25: bool = false,
_reserved_bit_26: bool = false,
@ -1037,10 +1032,16 @@ pub const ExecutionMode = extern enum(u32) {
SampleInterlockUnorderedEXT = 5369,
ShadingRateInterlockOrderedEXT = 5370,
ShadingRateInterlockUnorderedEXT = 5371,
SharedLocalMemorySizeINTEL = 5618,
RoundingModeRTPINTEL = 5620,
RoundingModeRTNINTEL = 5621,
FloatingPointModeALTINTEL = 5622,
FloatingPointModeIEEEINTEL = 5623,
MaxWorkgroupSizeINTEL = 5893,
MaxWorkDimINTEL = 5894,
NoGlobalOffsetINTEL = 5895,
NumSIMDWorkitemsINTEL = 5896,
SchedulerTargetFmaxMhzINTEL = 5903,
_,
};
pub const StorageClass = extern enum(u32) {
@ -1072,6 +1073,8 @@ pub const StorageClass = extern enum(u32) {
PhysicalStorageBuffer = 5349,
PhysicalStorageBufferEXT = 5349,
CodeSectionINTEL = 5605,
DeviceOnlyINTEL = 5936,
HostOnlyINTEL = 5937,
_,
};
pub const Dim = extern enum(u32) {
@ -1192,9 +1195,20 @@ pub const FPRoundingMode = extern enum(u32) {
RTN = 3,
_,
};
pub const FPDenormMode = extern enum(u32) {
Preserve = 0,
FlushToZero = 1,
_,
};
pub const FPOperationMode = extern enum(u32) {
IEEE = 0,
ALT = 1,
_,
};
pub const LinkageType = extern enum(u32) {
Export = 0,
Import = 1,
LinkOnceODR = 2,
_,
};
pub const AccessQualifier = extern enum(u32) {
@ -1279,12 +1293,22 @@ pub const Decoration = extern enum(u32) {
RestrictPointerEXT = 5355,
AliasedPointer = 5356,
AliasedPointerEXT = 5356,
SIMTCallINTEL = 5599,
ReferencedIndirectlyINTEL = 5602,
ClobberINTEL = 5607,
SideEffectsINTEL = 5608,
VectorComputeVariableINTEL = 5624,
FuncParamIOKindINTEL = 5625,
VectorComputeFunctionINTEL = 5626,
StackCallINTEL = 5627,
GlobalVariableOffsetINTEL = 5628,
CounterBuffer = 5634,
HlslCounterBufferGOOGLE = 5634,
UserSemantic = 5635,
HlslSemanticGOOGLE = 5635,
UserTypeGOOGLE = 5636,
FunctionRoundingModeINTEL = 5822,
FunctionDenormModeINTEL = 5823,
RegisterINTEL = 5825,
MemoryINTEL = 5826,
NumbanksINTEL = 5827,
@ -1297,6 +1321,17 @@ pub const Decoration = extern enum(u32) {
MergeINTEL = 5834,
BankBitsINTEL = 5835,
ForcePow2DepthINTEL = 5836,
BurstCoalesceINTEL = 5899,
CacheSizeINTEL = 5900,
DontStaticallyCoalesceINTEL = 5901,
PrefetchINTEL = 5902,
StallEnableINTEL = 5905,
FuseLoopsInFunctionINTEL = 5907,
BufferLocationINTEL = 5921,
IOPipeStorageINTEL = 5944,
FunctionFloatingPointModeINTEL = 6080,
SingleElementVectorINTEL = 6085,
VectorComputeCallableFunctionINTEL = 6087,
_,
};
pub const BuiltIn = extern enum(u32) {
@ -1342,14 +1377,14 @@ pub const BuiltIn = extern enum(u32) {
VertexIndex = 42,
InstanceIndex = 43,
SubgroupEqMask = 4416,
SubgroupGeMask = 4417,
SubgroupGtMask = 4418,
SubgroupLeMask = 4419,
SubgroupLtMask = 4420,
SubgroupEqMaskKHR = 4416,
SubgroupGeMask = 4417,
SubgroupGeMaskKHR = 4417,
SubgroupGtMask = 4418,
SubgroupGtMaskKHR = 4418,
SubgroupLeMask = 4419,
SubgroupLeMaskKHR = 4419,
SubgroupLtMask = 4420,
SubgroupLtMaskKHR = 4420,
BaseVertex = 4424,
BaseInstance = 4425,
@ -1520,6 +1555,9 @@ pub const Capability = extern enum(u32) {
FragmentShadingRateKHR = 4422,
SubgroupBallotKHR = 4423,
DrawParameters = 4427,
WorkgroupMemoryExplicitLayoutKHR = 4428,
WorkgroupMemoryExplicitLayout8BitAccessKHR = 4429,
WorkgroupMemoryExplicitLayout16BitAccessKHR = 4430,
SubgroupVoteKHR = 4431,
StorageBuffer16BitAccess = 4433,
StorageUniformBufferBlock16 = 4433,
@ -1610,21 +1648,41 @@ pub const Capability = extern enum(u32) {
SubgroupBufferBlockIOINTEL = 5569,
SubgroupImageBlockIOINTEL = 5570,
SubgroupImageMediaBlockIOINTEL = 5579,
RoundToInfinityINTEL = 5582,
FloatingPointModeINTEL = 5583,
IntegerFunctions2INTEL = 5584,
FunctionPointersINTEL = 5603,
IndirectReferencesINTEL = 5604,
AsmINTEL = 5606,
AtomicFloat32MinMaxEXT = 5612,
AtomicFloat64MinMaxEXT = 5613,
AtomicFloat16MinMaxEXT = 5616,
VectorComputeINTEL = 5617,
VectorAnyINTEL = 5619,
ExpectAssumeKHR = 5629,
SubgroupAvcMotionEstimationINTEL = 5696,
SubgroupAvcMotionEstimationIntraINTEL = 5697,
SubgroupAvcMotionEstimationChromaINTEL = 5698,
VariableLengthArrayINTEL = 5817,
FunctionFloatControlINTEL = 5821,
FPGAMemoryAttributesINTEL = 5824,
FPFastMathModeINTEL = 5837,
ArbitraryPrecisionIntegersINTEL = 5844,
UnstructuredLoopControlsINTEL = 5886,
FPGALoopControlsINTEL = 5888,
KernelAttributesINTEL = 5892,
FPGAKernelAttributesINTEL = 5897,
FPGAMemoryAccessesINTEL = 5898,
FPGAClusterAttributesINTEL = 5904,
LoopFuseINTEL = 5906,
FPGABufferLocationINTEL = 5920,
USMStorageClassesINTEL = 5935,
IOPipesINTEL = 5943,
BlockingPipesINTEL = 5945,
FPGARegINTEL = 5948,
AtomicFloat32AddEXT = 6033,
AtomicFloat64AddEXT = 6034,
LongConstantCompositeINTEL = 6089,
_,
};
pub const RayQueryIntersection = extern enum(u32) {

View File

@ -446,10 +446,14 @@ fn start_asm_path(comp: *Compilation, arena: *Allocator, basename: []const u8) !
try result.appendSlice(comp.zig_lib_directory.path.?);
try result.appendSlice(s ++ "libc" ++ s ++ "glibc" ++ s ++ "sysdeps" ++ s);
if (is_sparc) {
if (is_64) {
try result.appendSlice("sparc" ++ s ++ "sparc64");
if (mem.eql(u8, basename, "crti.S") or mem.eql(u8, basename, "crtn.S")) {
try result.appendSlice("sparc");
} else {
try result.appendSlice("sparc" ++ s ++ "sparc32");
if (is_64) {
try result.appendSlice("sparc" ++ s ++ "sparc64");
} else {
try result.appendSlice("sparc" ++ s ++ "sparc32");
}
}
} else if (arch.isARM()) {
try result.appendSlice("arm");

View File

@ -63,6 +63,7 @@ pub const Options = struct {
system_linker_hack: bool,
link_libc: bool,
link_libcpp: bool,
link_libunwind: bool,
function_sections: bool,
eh_frame_hdr: bool,
emit_relocs: bool,

View File

@ -1645,6 +1645,11 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
try argv.append(comp.libcxx_static_lib.?.full_object_path);
}
// libunwind dep
if (self.base.options.link_libunwind) {
try argv.append(comp.libunwind_static_lib.?.full_object_path);
}
// libc dep
if (self.base.options.link_libc) {
if (self.base.options.libc_installation != null) {
@ -1653,18 +1658,9 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
}
const needs_grouping = self.base.options.link_mode == .Static;
if (needs_grouping) try argv.append("--start-group");
// This matches the order of glibc.libs
try argv.appendSlice(&[_][]const u8{
"-lm",
"-lpthread",
"-lc",
"-ldl",
"-lrt",
"-lutil",
});
try argv.appendSlice(target_util.libcFullLinkFlags(target));
if (needs_grouping) try argv.append("--end-group");
} else if (target.isGnuLibC()) {
try argv.append(comp.libunwind_static_lib.?.full_object_path);
for (glibc.libs) |lib| {
const lib_path = try std.fmt.allocPrint(arena, "{s}{c}lib{s}.so.{d}", .{
comp.glibc_so_files.?.dir_path, fs.path.sep, lib.name, lib.sover,
@ -1673,13 +1669,10 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
}
try argv.append(try comp.get_libc_crt_file(arena, "libc_nonshared.a"));
} else if (target.isMusl()) {
try argv.append(comp.libunwind_static_lib.?.full_object_path);
try argv.append(try comp.get_libc_crt_file(arena, switch (self.base.options.link_mode) {
.Static => "libc.a",
.Dynamic => "libc.so",
}));
} else if (self.base.options.link_libcpp) {
try argv.append(comp.libunwind_static_lib.?.full_object_path);
} else {
unreachable; // Compiler was supposed to emit an error for not being able to provide libc.
}

View File

@ -363,18 +363,30 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
self.base.file = file;
// Create dSYM bundle.
const d_sym_path = try fmt.allocPrint(allocator, "{s}.dSYM/Contents/Resources/DWARF/", .{sub_path});
defer allocator.free(d_sym_path);
var d_sym_bundle = try options.emit.?.directory.handle.makeOpenPath(d_sym_path, .{});
defer d_sym_bundle.close();
const d_sym_file = try d_sym_bundle.createFile(sub_path, .{
.truncate = false,
.read = true,
});
self.d_sym = .{
.base = self,
.file = d_sym_file,
};
if (!options.strip and options.module != null) {
const dir = options.module.?.zig_cache_artifact_directory;
log.debug("creating {s}.dSYM bundle in {s}", .{ sub_path, dir.path });
const d_sym_path = try fmt.allocPrint(
allocator,
"{s}.dSYM" ++ fs.path.sep_str ++ "Contents" ++ fs.path.sep_str ++ "Resources" ++ fs.path.sep_str ++ "DWARF",
.{sub_path},
);
defer allocator.free(d_sym_path);
var d_sym_bundle = try dir.handle.makeOpenPath(d_sym_path, .{});
defer d_sym_bundle.close();
const d_sym_file = try d_sym_bundle.createFile(sub_path, .{
.truncate = false,
.read = true,
});
self.d_sym = .{
.base = self,
.file = d_sym_file,
};
}
// Index 0 is always a null symbol.
try self.locals.append(allocator, .{
@ -1198,7 +1210,9 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, symbol.n_value, required_alignment);
if (need_realloc) {
const vaddr = try self.growTextBlock(&decl.link.macho, code.len, required_alignment);
log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl.name, symbol.n_value, vaddr });
log.debug("growing {s} and moving from 0x{x} to 0x{x}", .{ decl.name, symbol.n_value, vaddr });
if (vaddr != symbol.n_value) {
log.debug(" (writing new offset table entry)", .{});
self.offset_table.items[decl.link.macho.offset_table_index] = .{
@ -1208,6 +1222,8 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
};
try self.writeOffsetTableEntry(decl.link.macho.offset_table_index);
}
symbol.n_value = vaddr;
} else if (code.len < decl.link.macho.size) {
self.shrinkTextBlock(&decl.link.macho, code.len);
}
@ -1224,7 +1240,9 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
const decl_name = mem.spanZ(decl.name);
const name_str_index = try self.makeString(decl_name);
const addr = try self.allocateTextBlock(&decl.link.macho, code.len, required_alignment);
log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, addr });
errdefer self.freeTextBlock(&decl.link.macho);
symbol.* = .{
@ -1368,15 +1386,32 @@ pub fn updateDeclExports(
continue;
}
}
const n_desc = switch (exp.options.linkage) {
.Internal => macho.REFERENCE_FLAG_PRIVATE_DEFINED,
.Strong => blk: {
if (mem.eql(u8, exp.options.name, "_start")) {
var n_type: u8 = macho.N_SECT | macho.N_EXT;
var n_desc: u16 = 0;
switch (exp.options.linkage) {
.Internal => {
// Symbol should be hidden, or in MachO lingo, private extern.
// We should also mark the symbol as Weak: n_desc == N_WEAK_DEF.
// TODO work out when to add N_WEAK_REF.
n_type |= macho.N_PEXT;
n_desc |= macho.N_WEAK_DEF;
},
.Strong => {
// Check if the export is _main, and note if os.
// Otherwise, don't do anything since we already have all the flags
// set that we need for global (strong) linkage.
// n_type == N_SECT | N_EXT
if (mem.eql(u8, exp.options.name, "_main")) {
self.entry_addr = decl_sym.n_value;
}
break :blk macho.REFERENCE_FLAG_DEFINED;
},
.Weak => macho.N_WEAK_REF,
.Weak => {
// Weak linkage is specified as part of n_desc field.
// Symbol's n_type is like for a symbol with strong linkage.
n_desc |= macho.N_WEAK_DEF;
},
.LinkOnce => {
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
module.failed_exports.putAssumeCapacityNoClobber(
@ -1385,8 +1420,8 @@ pub fn updateDeclExports(
);
continue;
},
};
const n_type = decl_sym.n_type | macho.N_EXT;
}
if (exp.link.macho.sym_index) |i| {
const sym = &self.globals.items[i];
sym.* = .{

View File

@ -16,11 +16,16 @@
//! All function declarations without a body (extern functions presumably).
//! All regular functions.
// Because SPIR-V requires re-compilation anyway, and so hot swapping will not work
// anyway, we simply generate all the code in flushModule. This keeps
// things considerably simpler.
const SpirV = @This();
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const log = std.log.scoped(.link);
const Module = @import("../Module.zig");
const Compilation = @import("../Compilation.zig");
@ -30,16 +35,15 @@ const trace = @import("../tracy.zig").trace;
const build_options = @import("build_options");
const spec = @import("../codegen/spirv/spec.zig");
// TODO: Should this struct be used at all rather than just a hashmap of aux data for every decl?
pub const FnData = struct {
id: ?u32 = null,
code: std.ArrayListUnmanaged(u32) = .{},
// We're going to fill these in flushModule, and we're going to fill them unconditionally,
// so just set it to undefined.
id: u32 = undefined
};
base: link.File,
/// TODO: Does this file need to support multiple independent modules?
spirv_module: codegen.SPIRVModule,
/// This linker backend does not try to incrementally link output SPIR-V code.
/// Instead, it tracks all declarations in this table, and iterates over it
/// in the flush function.
@ -54,7 +58,6 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*SpirV {
.file = null,
.allocator = gpa,
},
.spirv_module = codegen.SPIRVModule.init(gpa),
};
// TODO: Figure out where to put all of these
@ -94,29 +97,11 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
pub fn deinit(self: *SpirV) void {
self.decl_table.deinit(self.base.allocator);
self.spirv_module.deinit();
}
pub fn updateDecl(self: *SpirV, module: *Module, decl: *Module.Decl) !void {
const tracy = trace(@src());
defer tracy.end();
// Keep track of all decls so we can iterate over them on flush().
_ = try self.decl_table.getOrPut(self.base.allocator, decl);
const fn_data = &decl.fn_link.spirv;
if (fn_data.id == null) {
fn_data.id = self.spirv_module.allocId();
}
var managed_code = fn_data.code.toManaged(self.base.allocator);
managed_code.items.len = 0;
try self.spirv_module.genDecl(fn_data.id.?, &managed_code, decl);
fn_data.code = managed_code.toUnmanaged();
// Free excess allocated memory for this Decl.
fn_data.code.shrinkAndFree(self.base.allocator, fn_data.code.items.len);
}
pub fn updateDeclExports(
@ -128,10 +113,6 @@ pub fn updateDeclExports(
pub fn freeDecl(self: *SpirV, decl: *Module.Decl) void {
self.decl_table.removeAssertDiscard(decl);
var fn_data = decl.fn_link.spirv;
fn_data.code.deinit(self.base.allocator);
if (fn_data.id) |id| self.spirv_module.freeId(id);
decl.fn_link.spirv = undefined;
}
pub fn flush(self: *SpirV, comp: *Compilation) !void {
@ -149,51 +130,67 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void {
const module = self.base.options.module.?;
const target = comp.getTarget();
var spirv_module = codegen.SPIRVModule.init(target, self.base.allocator);
defer spirv_module.deinit();
// Allocate an ID for every declaration before generating code,
// so that we can access them before processing them.
// TODO: We're allocating an ID unconditionally now, are there
// declarations which don't generate a result?
// TODO: fn_link is used here, but thats probably not the right field. It will work anyway though.
{
for (self.decl_table.items()) |entry| {
const decl = entry.key;
if (!decl.has_tv) continue;
decl.fn_link.spirv.id = spirv_module.allocResultId();
log.debug("Allocating id {} to '{s}'", .{ decl.fn_link.spirv.id, std.mem.spanZ(decl.name) });
}
}
// Now, actually generate the code for all declarations.
{
for (self.decl_table.items()) |entry| {
const decl = entry.key;
if (!decl.has_tv) continue;
try spirv_module.gen(decl);
}
}
var binary = std.ArrayList(u32).init(self.base.allocator);
defer binary.deinit();
// Note: The order of adding sections to the final binary
// follows the SPIR-V logical module format!
try binary.appendSlice(&[_]u32{
spec.magic_number,
(spec.version.major << 16) | (spec.version.minor << 8),
0, // TODO: Register Zig compiler magic number.
self.spirv_module.idBound(),
spirv_module.resultIdBound(), // ID bound.
0, // Schema (currently reserved for future use in the SPIR-V spec).
});
try writeCapabilities(&binary, target);
try writeMemoryModel(&binary, target);
// Collect list of buffers to write.
// SPIR-V files support both little and big endian words. The actual format is
// disambiguated by the magic number, and so theoretically we don't need to worry
// about endian-ness when writing the final binary.
var all_buffers = std.ArrayList(std.os.iovec_const).init(self.base.allocator);
defer all_buffers.deinit();
// Note: The order of adding sections to the final binary
// follows the SPIR-V logical module format!
var all_buffers = [_]std.os.iovec_const{
wordsToIovConst(binary.items),
wordsToIovConst(spirv_module.types_and_globals.items),
wordsToIovConst(spirv_module.fn_decls.items),
};
// Pre-allocate enough for the binary info + all functions
try all_buffers.ensureCapacity(self.decl_table.count() + 1);
all_buffers.appendAssumeCapacity(wordsToIovConst(binary.items));
for (self.decl_table.items()) |entry| {
const decl = entry.key;
if (!decl.has_tv) continue;
const fn_data = &decl.fn_link.spirv;
all_buffers.appendAssumeCapacity(wordsToIovConst(fn_data.code.items));
}
const file = self.base.file.?;
const bytes = std.mem.sliceAsBytes(binary.items);
var file_size: u64 = 0;
for (all_buffers.items) |iov| {
for (all_buffers) |iov| {
file_size += iov.iov_len;
}
const file = self.base.file.?;
try file.seekTo(0);
try file.setEndPos(file_size);
try file.pwritevAll(all_buffers.items, 0);
try file.pwritevAll(&all_buffers, 0);
}
fn writeCapabilities(binary: *std.ArrayList(u32), target: std.Target) !void {

View File

@ -544,6 +544,7 @@ fn buildOutputType(
var ensure_libcpp_on_non_freestanding = false;
var link_libc = false;
var link_libcpp = false;
var link_libunwind = false;
var want_native_include_dirs = false;
var enable_cache: ?bool = null;
var want_pic: ?bool = null;
@ -1556,6 +1557,11 @@ fn buildOutputType(
_ = system_libs.orderedRemove(i);
continue;
}
if (mem.eql(u8, lib_name, "unwind")) {
link_libunwind = true;
_ = system_libs.orderedRemove(i);
continue;
}
if (std.fs.path.isAbsolute(lib_name)) {
fatal("cannot use absolute path as a system library: {s}", .{lib_name});
}
@ -1871,6 +1877,7 @@ fn buildOutputType(
.system_libs = system_libs.items,
.link_libc = link_libc,
.link_libcpp = link_libcpp,
.link_libunwind = link_libunwind,
.want_pic = want_pic,
.want_pie = want_pie,
.want_lto = want_lto,

View File

@ -7,6 +7,9 @@ const ir = @import("ir.zig");
const Type = @import("type.zig").Type;
const Module = @import("Module.zig");
const LazySrcLoc = Module.LazySrcLoc;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const expectEqualSlices = std.testing.expectEqualSlices;
const log = std.log.scoped(.register_manager);
@ -66,77 +69,14 @@ pub fn RegisterManager(
return self.allocated_registers & @as(FreeRegInt, 1) << shift != 0;
}
/// Returns `null` if all registers are allocated.
/// Allocates a specified number of registers, optionally
/// tracking them. Returns `null` if not enough registers are
/// free.
pub fn tryAllocRegs(
self: *Self,
comptime count: comptime_int,
insts: [count]*ir.Inst,
exceptions: []Register,
) ?[count]Register {
if (self.tryAllocRegsWithoutTracking(count, exceptions)) |regs| {
for (regs) |reg, i| {
const index = reg.allocIndex().?; // allocIndex() on a callee-preserved reg should never return null
self.registers[index] = insts[i];
self.markRegUsed(reg);
}
return regs;
} else {
return null;
}
}
/// Returns `null` if all registers are allocated.
pub fn tryAllocReg(self: *Self, inst: *ir.Inst, exceptions: []Register) ?Register {
return if (tryAllocRegs(self, 1, .{inst}, exceptions)) |regs| regs[0] else null;
}
pub fn allocRegs(
self: *Self,
comptime count: comptime_int,
insts: [count]*ir.Inst,
exceptions: []Register,
) ![count]Register {
comptime assert(count > 0 and count <= callee_preserved_regs.len);
assert(count + exceptions.len <= callee_preserved_regs.len);
return self.tryAllocRegs(count, insts, exceptions) orelse blk: {
// We'll take over the first count registers. Spill
// the instructions that were previously there to a
// stack allocations.
var regs: [count]Register = undefined;
var i: usize = 0;
for (callee_preserved_regs) |reg| {
if (i >= count) break;
if (mem.indexOfScalar(Register, exceptions, reg) != null) continue;
regs[i] = reg;
const index = reg.allocIndex().?; // allocIndex() on a callee-preserved reg should never return null
if (self.isRegFree(reg)) {
self.markRegUsed(reg);
} else {
const spilled_inst = self.registers[index].?;
try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst);
}
self.registers[index] = insts[i];
i += 1;
}
break :blk regs;
};
}
pub fn allocReg(self: *Self, inst: *ir.Inst, exceptions: []Register) !Register {
return (try self.allocRegs(1, .{inst}, exceptions))[0];
}
/// Does not track the registers.
/// Returns `null` if not enough registers are free.
pub fn tryAllocRegsWithoutTracking(
self: *Self,
comptime count: comptime_int,
exceptions: []Register,
insts: [count]?*ir.Inst,
exceptions: []const Register,
) ?[count]Register {
comptime if (callee_preserved_regs.len == 0) return null;
comptime assert(count > 0 and count <= callee_preserved_regs.len);
@ -156,18 +96,40 @@ pub fn RegisterManager(
}
}
return if (i < count) null else regs;
if (i == count) {
for (regs) |reg, j| {
if (insts[j]) |inst| {
// Track the register
const index = reg.allocIndex().?; // allocIndex() on a callee-preserved reg should never return null
self.registers[index] = inst;
self.markRegUsed(reg);
}
}
return regs;
} else return null;
}
/// Does not track the register.
/// Returns `null` if all registers are allocated.
pub fn tryAllocRegWithoutTracking(self: *Self, exceptions: []Register) ?Register {
return if (self.tryAllocRegsWithoutTracking(1, exceptions)) |regs| regs[0] else null;
/// Allocates a register and optionally tracks it with a
/// corresponding instruction. Returns `null` if all registers
/// are allocated.
pub fn tryAllocReg(self: *Self, inst: ?*ir.Inst, exceptions: []const Register) ?Register {
return if (tryAllocRegs(self, 1, .{inst}, exceptions)) |regs| regs[0] else null;
}
/// Does not track the registers
pub fn allocRegsWithoutTracking(self: *Self, comptime count: comptime_int, exceptions: []Register) ![count]Register {
return self.tryAllocRegsWithoutTracking(count, exceptions) orelse blk: {
/// Allocates a specified number of registers, optionally
/// tracking them. Asserts that count + exceptions.len is not
/// larger than the total number of registers available.
pub fn allocRegs(
self: *Self,
comptime count: comptime_int,
insts: [count]?*ir.Inst,
exceptions: []const Register,
) ![count]Register {
comptime assert(count > 0 and count <= callee_preserved_regs.len);
assert(count + exceptions.len <= callee_preserved_regs.len);
return self.tryAllocRegs(count, insts, exceptions) orelse blk: {
// We'll take over the first count registers. Spill
// the instructions that were previously there to a
// stack allocations.
@ -179,11 +141,22 @@ pub fn RegisterManager(
regs[i] = reg;
const index = reg.allocIndex().?; // allocIndex() on a callee-preserved reg should never return null
if (!self.isRegFree(reg)) {
const spilled_inst = self.registers[index].?;
try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst);
self.registers[index] = null;
self.markRegFree(reg);
if (insts[i]) |inst| {
// Track the register
if (self.isRegFree(reg)) {
self.markRegUsed(reg);
} else {
const spilled_inst = self.registers[index].?;
try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst);
}
self.registers[index] = inst;
} else {
// Don't track the register
if (!self.isRegFree(reg)) {
const spilled_inst = self.registers[index].?;
try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst);
self.freeReg(reg);
}
}
i += 1;
@ -193,39 +166,36 @@ pub fn RegisterManager(
};
}
/// Does not track the register.
pub fn allocRegWithoutTracking(self: *Self, exceptions: []Register) !Register {
return (try self.allocRegsWithoutTracking(1, exceptions))[0];
/// Allocates a register and optionally tracks it with a
/// corresponding instruction.
pub fn allocReg(self: *Self, inst: ?*ir.Inst, exceptions: []const Register) !Register {
return (try self.allocRegs(1, .{inst}, exceptions))[0];
}
/// Allocates the specified register with the specified
/// instruction. Spills the register if it is currently
/// allocated.
pub fn getReg(self: *Self, reg: Register, inst: *ir.Inst) !void {
/// Spills the register if it is currently allocated. If a
/// corresponding instruction is passed, will also track this
/// register.
pub fn getReg(self: *Self, reg: Register, inst: ?*ir.Inst) !void {
const index = reg.allocIndex() orelse return;
if (!self.isRegFree(reg)) {
// Move the instruction that was previously there to a
// stack allocation.
const spilled_inst = self.registers[index].?;
self.registers[index] = inst;
try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst);
} else {
self.getRegAssumeFree(reg, inst);
}
}
/// Spills the register if it is currently allocated.
/// Does not track the register.
pub fn getRegWithoutTracking(self: *Self, reg: Register) !void {
const index = reg.allocIndex() orelse return;
if (!self.isRegFree(reg)) {
// Move the instruction that was previously there to a
// stack allocation.
const spilled_inst = self.registers[index].?;
try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst);
self.markRegFree(reg);
if (inst) |tracked_inst|
if (!self.isRegFree(reg)) {
// Move the instruction that was previously there to a
// stack allocation.
const spilled_inst = self.registers[index].?;
self.registers[index] = tracked_inst;
try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst);
} else {
self.getRegAssumeFree(reg, tracked_inst);
}
else {
if (!self.isRegFree(reg)) {
// Move the instruction that was previously there to a
// stack allocation.
const spilled_inst = self.registers[index].?;
try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst);
self.freeReg(reg);
}
}
}
@ -250,42 +220,63 @@ pub fn RegisterManager(
};
}
const MockRegister = enum(u2) {
const MockRegister1 = enum(u2) {
r0,
r1,
r2,
r3,
pub fn allocIndex(self: MockRegister) ?u2 {
inline for (mock_callee_preserved_regs) |cpreg, i| {
pub fn allocIndex(self: MockRegister1) ?u2 {
inline for (callee_preserved_regs) |cpreg, i| {
if (self == cpreg) return i;
}
return null;
}
const callee_preserved_regs = [_]MockRegister1{ .r2, .r3 };
};
const mock_callee_preserved_regs = [_]MockRegister{ .r2, .r3 };
const MockRegister2 = enum(u2) {
r0,
r1,
r2,
r3,
const MockFunction = struct {
allocator: *Allocator,
register_manager: RegisterManager(Self, MockRegister, &mock_callee_preserved_regs) = .{},
spilled: std.ArrayListUnmanaged(MockRegister) = .{},
const Self = @This();
pub fn deinit(self: *Self) void {
self.spilled.deinit(self.allocator);
pub fn allocIndex(self: MockRegister2) ?u2 {
inline for (callee_preserved_regs) |cpreg, i| {
if (self == cpreg) return i;
}
return null;
}
pub fn spillInstruction(self: *Self, src: LazySrcLoc, reg: MockRegister, inst: *ir.Inst) !void {
try self.spilled.append(self.allocator, reg);
}
const callee_preserved_regs = [_]MockRegister2{ .r0, .r1, .r2, .r3 };
};
test "tryAllocReg: no spilling" {
fn MockFunction(comptime Register: type) type {
return struct {
allocator: *Allocator,
register_manager: RegisterManager(Self, Register, &Register.callee_preserved_regs) = .{},
spilled: std.ArrayListUnmanaged(Register) = .{},
const Self = @This();
pub fn deinit(self: *Self) void {
self.spilled.deinit(self.allocator);
}
pub fn spillInstruction(self: *Self, src: LazySrcLoc, reg: Register, inst: *ir.Inst) !void {
try self.spilled.append(self.allocator, reg);
}
};
}
const MockFunction1 = MockFunction(MockRegister1);
const MockFunction2 = MockFunction(MockRegister2);
test "default state" {
const allocator = std.testing.allocator;
var function = MockFunction{
var function = MockFunction1{
.allocator = allocator,
};
defer function.deinit();
@ -296,27 +287,48 @@ test "tryAllocReg: no spilling" {
.src = .unneeded,
};
try std.testing.expect(!function.register_manager.isRegAllocated(.r2));
try std.testing.expect(!function.register_manager.isRegAllocated(.r3));
try expect(!function.register_manager.isRegAllocated(.r2));
try expect(!function.register_manager.isRegAllocated(.r3));
try expect(function.register_manager.isRegFree(.r2));
try expect(function.register_manager.isRegFree(.r3));
}
try std.testing.expectEqual(@as(?MockRegister, .r2), function.register_manager.tryAllocReg(&mock_instruction, &.{}));
try std.testing.expectEqual(@as(?MockRegister, .r3), function.register_manager.tryAllocReg(&mock_instruction, &.{}));
try std.testing.expectEqual(@as(?MockRegister, null), function.register_manager.tryAllocReg(&mock_instruction, &.{}));
test "tryAllocReg: no spilling" {
const allocator = std.testing.allocator;
try std.testing.expect(function.register_manager.isRegAllocated(.r2));
try std.testing.expect(function.register_manager.isRegAllocated(.r3));
var function = MockFunction1{
.allocator = allocator,
};
defer function.deinit();
var mock_instruction = ir.Inst{
.tag = .breakpoint,
.ty = Type.initTag(.void),
.src = .unneeded,
};
try expectEqual(@as(?MockRegister1, .r2), function.register_manager.tryAllocReg(&mock_instruction, &.{}));
try expectEqual(@as(?MockRegister1, .r3), function.register_manager.tryAllocReg(&mock_instruction, &.{}));
try expectEqual(@as(?MockRegister1, null), function.register_manager.tryAllocReg(&mock_instruction, &.{}));
try expect(function.register_manager.isRegAllocated(.r2));
try expect(function.register_manager.isRegAllocated(.r3));
try expect(!function.register_manager.isRegFree(.r2));
try expect(!function.register_manager.isRegFree(.r3));
function.register_manager.freeReg(.r2);
function.register_manager.freeReg(.r3);
try std.testing.expect(function.register_manager.isRegAllocated(.r2));
try std.testing.expect(function.register_manager.isRegAllocated(.r3));
try expect(function.register_manager.isRegAllocated(.r2));
try expect(function.register_manager.isRegAllocated(.r3));
try expect(function.register_manager.isRegFree(.r2));
try expect(function.register_manager.isRegFree(.r3));
}
test "allocReg: spilling" {
const allocator = std.testing.allocator;
var function = MockFunction{
var function = MockFunction1{
.allocator = allocator,
};
defer function.deinit();
@ -327,26 +339,76 @@ test "allocReg: spilling" {
.src = .unneeded,
};
try std.testing.expect(!function.register_manager.isRegAllocated(.r2));
try std.testing.expect(!function.register_manager.isRegAllocated(.r3));
try std.testing.expectEqual(@as(?MockRegister, .r2), try function.register_manager.allocReg(&mock_instruction, &.{}));
try std.testing.expectEqual(@as(?MockRegister, .r3), try function.register_manager.allocReg(&mock_instruction, &.{}));
try expectEqual(@as(?MockRegister1, .r2), try function.register_manager.allocReg(&mock_instruction, &.{}));
try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(&mock_instruction, &.{}));
// Spill a register
try std.testing.expectEqual(@as(?MockRegister, .r2), try function.register_manager.allocReg(&mock_instruction, &.{}));
try std.testing.expectEqualSlices(MockRegister, &[_]MockRegister{.r2}, function.spilled.items);
try expectEqual(@as(?MockRegister1, .r2), try function.register_manager.allocReg(&mock_instruction, &.{}));
try expectEqualSlices(MockRegister1, &[_]MockRegister1{.r2}, function.spilled.items);
// No spilling necessary
function.register_manager.freeReg(.r3);
try std.testing.expectEqual(@as(?MockRegister, .r3), try function.register_manager.allocReg(&mock_instruction, &.{}));
try std.testing.expectEqualSlices(MockRegister, &[_]MockRegister{.r2}, function.spilled.items);
try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(&mock_instruction, &.{}));
try expectEqualSlices(MockRegister1, &[_]MockRegister1{.r2}, function.spilled.items);
// Exceptions
function.register_manager.freeReg(.r2);
function.register_manager.freeReg(.r3);
try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(&mock_instruction, &.{.r2}));
}
test "tryAllocRegs" {
const allocator = std.testing.allocator;
var function = MockFunction2{
.allocator = allocator,
};
defer function.deinit();
var mock_instruction = ir.Inst{
.tag = .breakpoint,
.ty = Type.initTag(.void),
.src = .unneeded,
};
try expectEqual([_]MockRegister2{ .r0, .r1, .r2 }, function.register_manager.tryAllocRegs(3, .{ null, null, null }, &.{}).?);
// Exceptions
function.register_manager.freeReg(.r0);
function.register_manager.freeReg(.r1);
function.register_manager.freeReg(.r2);
try expectEqual([_]MockRegister2{ .r0, .r2, .r3 }, function.register_manager.tryAllocRegs(3, .{ null, null, null }, &.{.r1}).?);
}
test "allocRegs" {
const allocator = std.testing.allocator;
var function = MockFunction2{
.allocator = allocator,
};
defer function.deinit();
var mock_instruction = ir.Inst{
.tag = .breakpoint,
.ty = Type.initTag(.void),
.src = .unneeded,
};
try expectEqual([_]MockRegister2{ .r0, .r1, .r2 }, try function.register_manager.allocRegs(3, .{
&mock_instruction,
&mock_instruction,
&mock_instruction,
}, &.{}));
// Exceptions
try expectEqual([_]MockRegister2{ .r0, .r2, .r3 }, try function.register_manager.allocRegs(3, .{ null, null, null }, &.{.r1}));
try expectEqualSlices(MockRegister2, &[_]MockRegister2{ .r0, .r2 }, function.spilled.items);
}
test "getReg" {
const allocator = std.testing.allocator;
var function = MockFunction{
var function = MockFunction1{
.allocator = allocator,
};
defer function.deinit();
@ -357,18 +419,19 @@ test "getReg" {
.src = .unneeded,
};
try std.testing.expect(!function.register_manager.isRegAllocated(.r2));
try std.testing.expect(!function.register_manager.isRegAllocated(.r3));
try function.register_manager.getReg(.r3, &mock_instruction);
try std.testing.expect(!function.register_manager.isRegAllocated(.r2));
try std.testing.expect(function.register_manager.isRegAllocated(.r3));
try expect(!function.register_manager.isRegAllocated(.r2));
try expect(function.register_manager.isRegAllocated(.r3));
try expect(function.register_manager.isRegFree(.r2));
try expect(!function.register_manager.isRegFree(.r3));
// Spill r3
try function.register_manager.getReg(.r3, &mock_instruction);
try std.testing.expect(!function.register_manager.isRegAllocated(.r2));
try std.testing.expect(function.register_manager.isRegAllocated(.r3));
try std.testing.expectEqualSlices(MockRegister, &[_]MockRegister{.r3}, function.spilled.items);
try expect(!function.register_manager.isRegAllocated(.r2));
try expect(function.register_manager.isRegAllocated(.r3));
try expect(function.register_manager.isRegFree(.r2));
try expect(!function.register_manager.isRegFree(.r3));
try expectEqualSlices(MockRegister1, &[_]MockRegister1{.r3}, function.spilled.items);
}

View File

@ -5485,8 +5485,8 @@ static enum ZigLLVM_AtomicRMWBinOp to_ZigLLVMAtomicRMWBinOp(AtomicRmwOp op, bool
}
static LLVMTypeRef get_atomic_abi_type(CodeGen *g, IrInstGen *instruction) {
// If the operand type of an atomic operation is not a power of two sized
// we need to widen it before using it and then truncate the result.
// If the operand type of an atomic operation is not byte sized we need to
// widen it before using it and then truncate the result.
ir_assert(instruction->value->type->id == ZigTypeIdPointer, instruction);
ZigType *operand_type = instruction->value->type->data.pointer.child_type;
@ -5498,7 +5498,7 @@ static LLVMTypeRef get_atomic_abi_type(CodeGen *g, IrInstGen *instruction) {
bool is_signed = operand_type->data.integral.is_signed;
ir_assert(bit_count != 0, instruction);
if (bit_count == 1 || !is_power_of_2(bit_count)) {
if (!is_power_of_2(bit_count) || bit_count % 8) {
return get_llvm_type(g, get_int_type(g, is_signed, operand_type->abi_size * 8));
} else {
return nullptr;

View File

@ -374,3 +374,30 @@ pub fn hasRedZone(target: std.Target) bool {
else => false,
};
}
pub fn libcFullLinkFlags(target: std.Target) []const []const u8 {
// The linking order of these is significant and should match the order other
// c compilers such as gcc or clang use.
return switch (target.os.tag) {
.netbsd, .openbsd => &[_][]const u8{
"-lm",
"-lpthread",
"-lc",
"-lutil",
},
else => &[_][]const u8{
"-lm",
"-lpthread",
"-lc",
"-ldl",
"-lrt",
"-lutil",
},
};
}
pub fn clangMightShellOutForAssembly(target: std.Target) bool {
// Clang defaults to using the system assembler over the internal one
// when targeting a non-BSD OS.
return target.cpu.arch.isSPARC();
}

View File

@ -8,6 +8,7 @@ const build_options = @import("build_options");
const enable_qemu: bool = build_options.enable_qemu;
const enable_wine: bool = build_options.enable_wine;
const enable_wasmtime: bool = build_options.enable_wasmtime;
const enable_darling: bool = build_options.enable_darling;
const glibc_multi_install_dir: ?[]const u8 = build_options.glibc_multi_install_dir;
const ThreadPool = @import("ThreadPool.zig");
const CrossTarget = std.zig.CrossTarget;
@ -901,6 +902,16 @@ pub const TestContext = struct {
} else {
return; // wasmtime not available; pass test.
},
.darling => |darling_bin_name| if (enable_darling) {
try argv.append(darling_bin_name);
// Since we use relative to cwd here, we invoke darling with
// "shell" subcommand.
try argv.append("shell");
try argv.append(exe_path);
} else {
return; // Darling not available; pass test.
},
}
try comp.makeBinFileExecutable();

View File

@ -480,6 +480,9 @@ fn declVisitor(c: *Context, decl: *const clang.Decl) Error!void {
.Empty => {
// Do nothing
},
.FileScopeAsm => {
try transFileScopeAsm(c, &c.global_scope.base, @ptrCast(*const clang.FileScopeAsmDecl, decl));
},
else => {
const decl_name = try c.str(decl.getDeclKindName());
try warn(c, &c.global_scope.base, decl.getLocation(), "ignoring {s} declaration", .{decl_name});
@ -487,6 +490,21 @@ fn declVisitor(c: *Context, decl: *const clang.Decl) Error!void {
}
}
fn transFileScopeAsm(c: *Context, scope: *Scope, file_scope_asm: *const clang.FileScopeAsmDecl) Error!void {
const asm_string = file_scope_asm.getAsmString();
var len: usize = undefined;
const bytes_ptr = asm_string.getString_bytes_begin_size(&len);
const str = try std.fmt.allocPrint(c.arena, "\"{}\"", .{std.zig.fmtEscapes(bytes_ptr[0..len])});
const str_node = try Tag.string_literal.create(c.arena, str);
const asm_node = try Tag.asm_simple.create(c.arena, str_node);
const block = try Tag.block_single.create(c.arena, asm_node);
const comptime_node = try Tag.@"comptime".create(c.arena, block);
try scope.appendNode(comptime_node);
}
fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
const fn_name = try c.str(@ptrCast(*const clang.NamedDecl, fn_decl).getName_bytes_begin());
if (c.global_scope.sym_table.contains(fn_name))

View File

@ -161,6 +161,8 @@ pub const Node = extern union {
/// @shuffle(type, a, b, mask)
shuffle,
asm_simple,
negate,
negate_wrap,
bit_not,
@ -245,6 +247,7 @@ pub const Node = extern union {
.std_mem_zeroes,
.@"return",
.@"comptime",
.asm_simple,
.discard,
.std_math_Log2Int,
.negate,
@ -1017,6 +1020,19 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
},
});
},
.asm_simple => {
const payload = node.castTag(.asm_simple).?.data;
const asm_token = try c.addToken(.keyword_asm, "asm");
_ = try c.addToken(.l_paren, "(");
return c.addNode(.{
.tag = .asm_simple,
.main_token = asm_token,
.data = .{
.lhs = try renderNode(c, payload),
.rhs = try c.addToken(.r_paren, ")"),
},
});
},
.type => {
const payload = node.castTag(.type).?.data;
return c.addNode(.{
@ -2257,6 +2273,7 @@ fn renderNodeGrouped(c: *Context, node: Node) !NodeIndex {
.@"continue",
.@"return",
.@"comptime",
.asm_simple,
.usingnamespace_builtins,
.while_true,
.if_not_break,

View File

@ -1820,6 +1820,11 @@ const ZigClangEnumDecl *ZigClangEnumDecl_getDefinition(const ZigClangEnumDecl *z
return reinterpret_cast<const ZigClangEnumDecl *>(definition);
}
const ZigClangStringLiteral *ZigClangFileScopeAsmDecl_getAsmString(const ZigClangFileScopeAsmDecl *self) {
const clang::StringLiteral *result = reinterpret_cast<const clang::FileScopeAsmDecl*>(self)->getAsmString();
return reinterpret_cast<const ZigClangStringLiteral *>(result);
}
bool ZigClangRecordDecl_isUnion(const ZigClangRecordDecl *record_decl) {
return reinterpret_cast<const clang::RecordDecl*>(record_decl)->isUnion();
}

View File

@ -124,6 +124,7 @@ struct ZigClangEnumType;
struct ZigClangExpr;
struct ZigClangFieldDecl;
struct ZigClangFileID;
struct ZigClangFileScopeAsmDecl;
struct ZigClangFloatingLiteral;
struct ZigClangForStmt;
struct ZigClangFullSourceLoc;
@ -1000,6 +1001,8 @@ ZIG_EXTERN_C unsigned ZigClangVarDecl_getAlignedAttribute(const struct ZigClangV
ZIG_EXTERN_C unsigned ZigClangFunctionDecl_getAlignedAttribute(const struct ZigClangFunctionDecl *self, const ZigClangASTContext* ctx);
ZIG_EXTERN_C unsigned ZigClangFieldDecl_getAlignedAttribute(const struct ZigClangFieldDecl *self, const ZigClangASTContext* ctx);
ZIG_EXTERN_C const struct ZigClangStringLiteral *ZigClangFileScopeAsmDecl_getAsmString(const struct ZigClangFileScopeAsmDecl *self);
ZIG_EXTERN_C struct ZigClangQualType ZigClangParmVarDecl_getOriginalType(const struct ZigClangParmVarDecl *self);
ZIG_EXTERN_C bool ZigClangRecordDecl_getPackedAttribute(const struct ZigClangRecordDecl *);

View File

@ -199,7 +199,7 @@ fn testAtomicRmwInt() !void {
test "atomics with different types" {
try testAtomicsWithType(bool, true, false);
inline for (.{ u1, i5, u15 }) |T| {
inline for (.{ u1, i4, u5, i15, u24 }) |T| {
var x: T = 0;
try testAtomicsWithType(T, 0, 1);
}

View File

@ -19,7 +19,7 @@ pub fn addCases(ctx: *TestContext) !void {
// Incorrect return type
case.addError(
\\pub export fn _start() noreturn {
\\pub export fn main() noreturn {
\\}
, &[_][]const u8{
":2:1: error: expected noreturn, found void",
@ -30,7 +30,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\extern "c" fn write(usize, usize, usize) usize;
\\extern "c" fn exit(usize) noreturn;
\\
\\pub export fn _start() noreturn {
\\pub export fn main() noreturn {
\\ print();
\\
\\ exit(0);
@ -45,12 +45,39 @@ pub fn addCases(ctx: *TestContext) !void {
"Hello, World!\n",
);
// Now change the message only
// Print it 4 times and force growth and realloc.
case.addCompareOutput(
\\extern "c" fn write(usize, usize, usize) usize;
\\extern "c" fn exit(usize) noreturn;
\\
\\pub export fn _start() noreturn {
\\pub export fn main() noreturn {
\\ print();
\\ print();
\\ print();
\\ print();
\\
\\ exit(0);
\\}
\\
\\fn print() void {
\\ const msg = @ptrToInt("Hello, World!\n");
\\ const len = 14;
\\ _ = write(1, msg, len);
\\}
,
\\Hello, World!
\\Hello, World!
\\Hello, World!
\\Hello, World!
\\
);
// Print it once, and change the message.
case.addCompareOutput(
\\extern "c" fn write(usize, usize, usize) usize;
\\extern "c" fn exit(usize) noreturn;
\\
\\export fn _main() noreturn {
\\ print();
\\
\\ exit(0);
@ -70,7 +97,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\extern "c" fn write(usize, usize, usize) usize;
\\extern "c" fn exit(usize) noreturn;
\\
\\pub export fn _start() noreturn {
\\pub export fn main() noreturn {
\\ print();
\\ print();
\\
@ -96,7 +123,7 @@ pub fn addCases(ctx: *TestContext) !void {
case.addCompareOutput(
\\extern "c" fn exit(usize) noreturn;
\\
\\pub export fn _start() noreturn {
\\pub export fn main() noreturn {
\\ exit(0);
\\}
,
@ -107,7 +134,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\extern "c" fn exit(usize) noreturn;
\\extern "c" fn write(usize, usize, usize) usize;
\\
\\pub export fn _start() noreturn {
\\pub export fn main() noreturn {
\\ _ = write(1, @ptrToInt("Hey!\n"), 5);
\\ exit(0);
\\}

View File

@ -503,6 +503,7 @@ pub fn addPkgTests(
is_wine_enabled: bool,
is_qemu_enabled: bool,
is_wasmtime_enabled: bool,
is_darling_enabled: bool,
glibc_dir: ?[]const u8,
) *build.Step {
const step = b.step(b.fmt("test-{s}", .{name}), desc);
@ -564,6 +565,7 @@ pub fn addPkgTests(
these_tests.enable_wine = is_wine_enabled;
these_tests.enable_qemu = is_qemu_enabled;
these_tests.enable_wasmtime = is_wasmtime_enabled;
these_tests.enable_darling = is_darling_enabled;
these_tests.glibc_multi_install_dir = glibc_dir;
these_tests.addIncludeDir("test");

View File

@ -3499,4 +3499,18 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ '\u{1f4af}',
\\};
});
cases.add("global assembly",
\\__asm__(".globl func\n\t"
\\ ".type func, @function\n\t"
\\ "func:\n\t"
\\ ".cfi_startproc\n\t"
\\ "movl $42, %eax\n\t"
\\ "ret\n\t"
\\ ".cfi_endproc");
, &[_][]const u8{
\\comptime {
\\ asm (".globl func\n\t.type func, @function\n\tfunc:\n\t.cfi_startproc\n\tmovl $42, %eax\n\tret\n\t.cfi_endproc");
\\}
});
}

View File

@ -1,96 +1,5 @@
const std = @import("std");
const Writer = std.ArrayList(u8).Writer;
//! See https://www.khronos.org/registry/spir-v/specs/unified1/MachineReadableGrammar.html
//! and the files in https://github.com/KhronosGroup/SPIRV-Headers/blob/master/include/spirv/unified1/
//! Note: Non-canonical casing in these structs used to match SPIR-V spec json.
const Registry = union(enum) {
core: CoreRegistry,
extension: ExtensionRegistry,
};
const CoreRegistry = struct {
copyright: [][]const u8,
/// Hexadecimal representation of the magic number
magic_number: []const u8,
major_version: u32,
minor_version: u32,
revision: u32,
instruction_printing_class: []InstructionPrintingClass,
instructions: []Instruction,
operand_kinds: []OperandKind,
};
const ExtensionRegistry = struct {
copyright: [][]const u8,
version: u32,
revision: u32,
instructions: []Instruction,
operand_kinds: []OperandKind = &[_]OperandKind{},
};
const InstructionPrintingClass = struct {
tag: []const u8,
heading: ?[]const u8 = null,
};
const Instruction = struct {
opname: []const u8,
class: ?[]const u8 = null, // Note: Only available in the core registry.
opcode: u32,
operands: []Operand = &[_]Operand{},
capabilities: [][]const u8 = &[_][]const u8{},
extensions: [][]const u8 = &[_][]const u8{},
version: ?[]const u8 = null,
lastVersion: ?[]const u8 = null,
};
const Operand = struct {
kind: []const u8,
/// If this field is 'null', the operand is only expected once.
quantifier: ?Quantifier = null,
name: []const u8 = "",
};
const Quantifier = enum {
/// zero or once
@"?",
/// zero or more
@"*",
};
const OperandCategory = enum {
BitEnum,
ValueEnum,
Id,
Literal,
Composite,
};
const OperandKind = struct {
category: OperandCategory,
/// The name
kind: []const u8,
doc: ?[]const u8 = null,
enumerants: ?[]Enumerant = null,
bases: ?[]const []const u8 = null,
};
const Enumerant = struct {
enumerant: []const u8,
value: union(enum) {
bitflag: []const u8, // Hexadecimal representation of the value
int: u31,
},
capabilities: [][]const u8 = &[_][]const u8{},
/// Valid for .ValueEnum and .BitEnum
extensions: [][]const u8 = &[_][]const u8{},
/// `quantifier` will always be `null`.
parameters: []Operand = &[_]Operand{},
version: ?[]const u8 = null,
lastVersion: ?[]const u8 = null,
};
const g = @import("spirv/grammar.zig");
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
@ -106,24 +15,25 @@ pub fn main() !void {
const spec = try std.fs.cwd().readFileAlloc(allocator, spec_path, std.math.maxInt(usize));
var tokens = std.json.TokenStream.init(spec);
var registry = try std.json.parse(Registry, &tokens, .{.allocator = allocator});
var registry = try std.json.parse(g.Registry, &tokens, .{.allocator = allocator});
var buf = std.ArrayList(u8).init(allocator);
defer buf.deinit();
try render(buf.writer(), registry);
const tree = try std.zig.parse(allocator, buf.items);
_ = try std.zig.render(allocator, std.io.getStdOut().writer(), tree);
var bw = std.io.bufferedWriter(std.io.getStdOut().writer());
try render(bw.writer(), registry);
try bw.flush();
}
fn render(writer: Writer, registry: Registry) !void {
fn render(writer: anytype, registry: g.Registry) !void {
try writer.writeAll(
\\//! This file is auto-generated by tools/gen_spirv_spec.zig.
\\
\\const Version = @import("std").builtin.Version;
\\
);
switch (registry) {
.core => |core_reg| {
try renderCopyRight(writer, core_reg.copyright);
try writer.print(
\\const Version = @import("builtin").Version;
\\pub const version = Version{{.major = {}, .minor = {}, .patch = {}}};
\\pub const version = Version{{ .major = {}, .minor = {}, .patch = {} }};
\\pub const magic_number: u32 = {s};
\\
, .{ core_reg.major_version, core_reg.minor_version, core_reg.revision, core_reg.magic_number },
@ -132,10 +42,8 @@ fn render(writer: Writer, registry: Registry) !void {
try renderOperandKinds(writer, core_reg.operand_kinds);
},
.extension => |ext_reg| {
try renderCopyRight(writer, ext_reg.copyright);
try writer.print(
\\const Version = @import("builtin").Version;
\\pub const version = Version{{.major = {}, .minor = 0, .patch = {}}};
\\pub const version = Version{{ .major = {}, .minor = 0, .patch = {} }};
\\
, .{ ext_reg.version, ext_reg.revision },
);
@ -145,21 +53,15 @@ fn render(writer: Writer, registry: Registry) !void {
}
}
fn renderCopyRight(writer: Writer, copyright: []const []const u8) !void {
for (copyright) |line| {
try writer.print("// {s}\n", .{ line });
}
}
fn renderOpcodes(writer: Writer, instructions: []const Instruction) !void {
fn renderOpcodes(writer: anytype, instructions: []const g.Instruction) !void {
try writer.writeAll("pub const Opcode = extern enum(u16) {\n");
for (instructions) |instr| {
try writer.print("{} = {},\n", .{ std.zig.fmtId(instr.opname), instr.opcode });
try writer.print(" {} = {},\n", .{ std.zig.fmtId(instr.opname), instr.opcode });
}
try writer.writeAll("_,\n};\n");
try writer.writeAll(" _,\n};\n");
}
fn renderOperandKinds(writer: Writer, kinds: []const OperandKind) !void {
fn renderOperandKinds(writer: anytype, kinds: []const g.OperandKind) !void {
for (kinds) |kind| {
switch (kind.category) {
.ValueEnum => try renderValueEnum(writer, kind),
@ -169,20 +71,20 @@ fn renderOperandKinds(writer: Writer, kinds: []const OperandKind) !void {
}
}
fn renderValueEnum(writer: Writer, enumeration: OperandKind) !void {
fn renderValueEnum(writer: anytype, enumeration: g.OperandKind) !void {
try writer.print("pub const {s} = extern enum(u32) {{\n", .{ enumeration.kind });
const enumerants = enumeration.enumerants orelse return error.InvalidRegistry;
for (enumerants) |enumerant| {
if (enumerant.value != .int) return error.InvalidRegistry;
try writer.print("{} = {},\n", .{ std.zig.fmtId(enumerant.enumerant), enumerant.value.int });
try writer.print(" {} = {},\n", .{ std.zig.fmtId(enumerant.enumerant), enumerant.value.int });
}
try writer.writeAll("_,\n};\n");
try writer.writeAll(" _,\n};\n");
}
fn renderBitEnum(writer: Writer, enumeration: OperandKind) !void {
fn renderBitEnum(writer: anytype, enumeration: g.OperandKind) !void {
try writer.print("pub const {s} = packed struct {{\n", .{ enumeration.kind });
var flags_by_bitpos = [_]?[]const u8{null} ** 32;
@ -205,6 +107,7 @@ fn renderBitEnum(writer: Writer, enumeration: OperandKind) !void {
}
for (flags_by_bitpos) |maybe_flag_name, bitpos| {
try writer.writeAll(" ");
if (maybe_flag_name) |flag_name| {
try writer.writeAll(flag_name);
} else {
@ -215,7 +118,7 @@ fn renderBitEnum(writer: Writer, enumeration: OperandKind) !void {
if (bitpos == 0) { // Force alignment to integer boundaries
try writer.writeAll("align(@alignOf(u32)) ");
}
try writer.writeAll("= false, ");
try writer.writeAll("= false,\n");
}
try writer.writeAll("};\n");

90
tools/spirv/grammar.zig Normal file
View File

@ -0,0 +1,90 @@
//! See https://www.khronos.org/registry/spir-v/specs/unified1/MachineReadableGrammar.html
//! and the files in https://github.com/KhronosGroup/SPIRV-Headers/blob/master/include/spirv/unified1/
//! Note: Non-canonical casing in these structs used to match SPIR-V spec json.
pub const Registry = union(enum) {
core: CoreRegistry,
extension: ExtensionRegistry,
};
pub const CoreRegistry = struct {
copyright: [][]const u8,
/// Hexadecimal representation of the magic number
magic_number: []const u8,
major_version: u32,
minor_version: u32,
revision: u32,
instruction_printing_class: []InstructionPrintingClass,
instructions: []Instruction,
operand_kinds: []OperandKind,
};
pub const ExtensionRegistry = struct {
copyright: [][]const u8,
version: u32,
revision: u32,
instructions: []Instruction,
operand_kinds: []OperandKind = &[_]OperandKind{},
};
pub const InstructionPrintingClass = struct {
tag: []const u8,
heading: ?[]const u8 = null,
};
pub const Instruction = struct {
opname: []const u8,
class: ?[]const u8 = null, // Note: Only available in the core registry.
opcode: u32,
operands: []Operand = &[_]Operand{},
capabilities: [][]const u8 = &[_][]const u8{},
extensions: [][]const u8 = &[_][]const u8{},
version: ?[]const u8 = null,
lastVersion: ?[]const u8 = null,
};
pub const Operand = struct {
kind: []const u8,
/// If this field is 'null', the operand is only expected once.
quantifier: ?Quantifier = null,
name: []const u8 = "",
};
pub const Quantifier = enum {
/// zero or once
@"?",
/// zero or more
@"*",
};
pub const OperandCategory = enum {
BitEnum,
ValueEnum,
Id,
Literal,
Composite,
};
pub const OperandKind = struct {
category: OperandCategory,
/// The name
kind: []const u8,
doc: ?[]const u8 = null,
enumerants: ?[]Enumerant = null,
bases: ?[]const []const u8 = null,
};
pub const Enumerant = struct {
enumerant: []const u8,
value: union(enum) {
bitflag: []const u8, // Hexadecimal representation of the value
int: u31,
},
capabilities: [][]const u8 = &[_][]const u8{},
/// Valid for .ValueEnum and .BitEnum
extensions: [][]const u8 = &[_][]const u8{},
/// `quantifier` will always be `null`.
parameters: []Operand = &[_]Operand{},
version: ?[]const u8 = null,
lastVersion: ?[]const u8 = null,
};

View File

@ -0,0 +1,321 @@
const std = @import("std");
const fs = std.fs;
const Allocator = std.mem.Allocator;
const g = @import("spirv/grammar.zig");
//! This tool generates SPIR-V features from the grammar files in the SPIRV-Headers
//! (https://github.com/KhronosGroup/SPIRV-Headers/) and SPIRV-Registry (https://github.com/KhronosGroup/SPIRV-Registry/)
//! repositories. Currently it only generates a basic feature set definition consisting of versions, extensions and capabilities.
//! There is a lot left to be desired, as currently dependencies of extensions and dependencies on extensions aren't generated.
//! This is because there are some peculiarities in the SPIR-V registries:
//! - Capabilities may depend on multiple extensions, which cannot be modelled yet by std.Target.
//! - Extension dependencies are not documented in a machine-readable manner.
//! - Note that the grammar spec also contains definitions from extensions which aren't actually official. Most of these seem to be
//! from an intel project (https://github.com/intel/llvm/, https://github.com/intel/llvm/tree/sycl/sycl/doc/extensions/SPIRV),
//! and so ONLY extensions in the SPIRV-Registry should be included.
const Version = struct {
major: u32,
minor: u32,
fn parse(str: []const u8) !Version {
var it = std.mem.split(str, ".");
const major = it.next() orelse return error.InvalidVersion;
const minor = it.next() orelse return error.InvalidVersion;
if (it.next() != null) return error.InvalidVersion;
return Version{
.major = std.fmt.parseInt(u32, major, 10) catch return error.InvalidVersion,
.minor = std.fmt.parseInt(u32, minor, 10) catch return error.InvalidVersion,
};
}
fn eql(a: Version, b: Version) bool {
return a.major == b.major and a.minor == b.minor;
}
fn lessThan(ctx: void, a: Version, b: Version) bool {
return if (a.major == b.major)
a.minor < b.minor
else
a.major < b.major;
}
};
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const allocator = &arena.allocator;
const args = try std.process.argsAlloc(allocator);
if (args.len <= 1) {
usageAndExit(std.io.getStdErr(), args[0], 1);
}
if (std.mem.eql(u8, args[1], "--help")) {
usageAndExit(std.io.getStdErr(), args[0], 0);
}
if (args.len != 3) {
usageAndExit(std.io.getStdErr(), args[0], 1);
}
const spirv_headers_root = args[1];
const spirv_registry_root = args[2];
if (std.mem.startsWith(u8, spirv_headers_root, "-") or std.mem.startsWith(u8, spirv_registry_root, "-")) {
usageAndExit(std.io.getStdErr(), args[0], 1);
}
const registry_path = try fs.path.join(allocator, &.{ spirv_headers_root, "include", "spirv", "unified1", "spirv.core.grammar.json" });
const registry_json = try std.fs.cwd().readFileAlloc(allocator, registry_path, std.math.maxInt(usize));
var tokens = std.json.TokenStream.init(registry_json);
const registry = try std.json.parse(g.CoreRegistry, &tokens, .{ .allocator = allocator });
const capabilities = for (registry.operand_kinds) |opkind| {
if (std.mem.eql(u8, opkind.kind, "Capability"))
break opkind.enumerants orelse return error.InvalidRegistry;
} else return error.InvalidRegistry;
const extensions = try gather_extensions(allocator, spirv_registry_root);
const versions = try gatherVersions(allocator, registry);
var bw = std.io.bufferedWriter(std.io.getStdOut().writer());
const w = bw.writer();
try w.writeAll(
\\//! This file is auto-generated by tools/update_spirv_features.zig.
\\//! TODO: Dependencies of capabilities on extensions.
\\//! TODO: Dependencies of extensions on extensions.
\\//! TODO: Dependencies of extensions on versions.
\\
\\const std = @import("../std.zig");
\\const CpuFeature = std.Target.Cpu.Feature;
\\const CpuModel = std.Target.Cpu.Model;
\\
\\pub const Feature = enum {
\\
);
for (versions) |ver| {
try w.print(" v{}_{},\n", .{ ver.major, ver.minor });
}
for (extensions) |ext| {
try w.print(" {},\n", .{ std.zig.fmtId(ext) });
}
for (capabilities) |cap| {
try w.print(" {},\n", .{ std.zig.fmtId(cap.enumerant) });
}
try w.writeAll(
\\};
\\
\\pub usingnamespace CpuFeature.feature_set_fns(Feature);
\\
\\pub const all_features = blk: {
\\ @setEvalBranchQuota(2000);
\\ const len = @typeInfo(Feature).Enum.fields.len;
\\ std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
\\ var result: [len]CpuFeature = undefined;
\\
);
for (versions) |ver, i| {
try w.print(
\\ result[@enumToInt(Feature.v{0}_{1})] = .{{
\\ .llvm_name = null,
\\ .description = "SPIR-V version {0}.{1}",
\\
, .{ ver.major, ver.minor }
);
if (i == 0) {
try w.writeAll(
\\ .dependencies = featureSet(&[_]Feature{}),
\\ };
\\
);
} else {
try w.print(
\\ .dependencies = featureSet(&[_]Feature{{
\\ .v{}_{},
\\ }}),
\\ }};
\\
, .{ versions[i - 1].major, versions[i - 1].minor }
);
}
}
// TODO: Extension dependencies.
for (extensions) |ext| {
try w.print(
\\ result[@enumToInt(Feature.{s})] = .{{
\\ .llvm_name = null,
\\ .description = "SPIR-V extension {s}",
\\ .dependencies = featureSet(&[_]Feature{{}}),
\\ }};
\\
, .{
std.zig.fmtId(ext),
ext,
}
);
}
// TODO: Capability extension dependencies.
for (capabilities) |cap| {
try w.print(
\\ result[@enumToInt(Feature.{s})] = .{{
\\ .llvm_name = null,
\\ .description = "Enable SPIR-V capability {s}",
\\ .dependencies = featureSet(&[_]Feature{{
\\
, .{
std.zig.fmtId(cap.enumerant),
cap.enumerant,
}
);
if (cap.version) |ver_str| {
if (!std.mem.eql(u8, ver_str, "None")) {
const ver = try Version.parse(ver_str);
try w.print(" .v{}_{},\n", .{ ver.major, ver.minor });
}
}
for (cap.capabilities) |cap_dep| {
try w.print(" .{},\n", .{ std.zig.fmtId(cap_dep) });
}
try w.writeAll(
\\ }),
\\ };
\\
);
}
try w.writeAll(
\\ const ti = @typeInfo(Feature);
\\ for (result) |*elem, i| {
\\ elem.index = i;
\\ elem.name = ti.Enum.fields[i].name;
\\ }
\\ break :blk result;
\\};
\\
);
try bw.flush();
}
/// SPIRV-Registry should hold all extensions currently registered for SPIR-V.
/// The *.grammar.json in SPIRV-Headers should have most of these as well, but with this we're sure to get only the actually
/// registered ones.
/// TODO: Unfortunately, neither repository contains a machine-readable list of extension dependencies.
fn gather_extensions(allocator: *Allocator, spirv_registry_root: []const u8) ![]const []const u8 {
const extensions_path = try fs.path.join(allocator, &.{spirv_registry_root, "extensions"});
var extensions_dir = try fs.cwd().openDir(extensions_path, .{ .iterate = true });
defer extensions_dir.close();
var extensions = std.ArrayList([]const u8).init(allocator);
var vendor_it = extensions_dir.iterate();
while (try vendor_it.next()) |vendor_entry| {
std.debug.assert(vendor_entry.kind == .Directory); // If this fails, the structure of SPIRV-Registry has changed.
const vendor_dir = try extensions_dir.openDir(vendor_entry.name, .{ .iterate = true });
var ext_it = vendor_dir.iterate();
while (try ext_it.next()) |ext_entry| {
// There is both a HTML and asciidoc version of every spec (as well as some other directories),
// we need just the name, but to avoid duplicates here we will just skip anything thats not asciidoc.
if (!std.mem.endsWith(u8, ext_entry.name, ".asciidoc"))
continue;
// Unfortunately, some extension filenames are incorrect, so we need to look for the string in tne 'Name Strings' section.
// This has the following format:
// ```
// Name Strings
// ------------
//
// SPV_EXT_name
// ```
// OR
// ```
// == Name Strings
//
// SPV_EXT_name
// ```
const ext_spec = try vendor_dir.readFileAlloc(allocator, ext_entry.name, std.math.maxInt(usize));
const name_strings = "Name Strings";
const name_strings_offset = std.mem.indexOf(u8, ext_spec, name_strings) orelse return error.InvalidRegistry;
// As the specs are inconsistent on this next part, just skip any newlines/minuses
var ext_start = name_strings_offset + name_strings.len + 1;
while (ext_spec[ext_start] == '\n' or ext_spec[ext_start] == '-') {
ext_start += 1;
}
const ext_end = std.mem.indexOfScalarPos(u8, ext_spec, ext_start, '\n') orelse return error.InvalidRegistry;
const ext = ext_spec[ext_start .. ext_end];
std.debug.assert(std.mem.startsWith(u8, ext, "SPV_")); // Sanity check, all extensions should have a name like SPV_VENDOR_extension.
try extensions.append(try allocator.dupe(u8, ext));
}
}
return extensions.items;
}
fn insertVersion(versions: *std.ArrayList(Version), version: ?[]const u8) !void {
const ver_str = version orelse return;
if (std.mem.eql(u8, ver_str, "None"))
return;
const ver = try Version.parse(ver_str);
for (versions.items) |existing_ver| {
if (ver.eql(existing_ver)) return;
}
try versions.append(ver);
}
fn gatherVersions(allocator: *Allocator, registry: g.CoreRegistry) ![]const Version {
// Expected number of versions is small
var versions = std.ArrayList(Version).init(allocator);
for (registry.instructions) |inst| {
try insertVersion(&versions, inst.version);
}
for (registry.operand_kinds) |opkind| {
const enumerants = opkind.enumerants orelse continue;
for (enumerants) |enumerant| {
try insertVersion(&versions, enumerant.version);
}
}
std.sort.sort(Version, versions.items, {}, Version.lessThan);
return versions.items;
}
fn usageAndExit(file: fs.File, arg0: []const u8, code: u8) noreturn {
file.writer().print(
\\Usage: {s} /path/git/SPIRV-Headers /path/git/SPIRV-Registry
\\
\\Prints to stdout Zig code which can be used to replace the file lib/std/target/spirv.zig.
\\
\\SPIRV-Headers can be cloned from https://github.com/KhronosGroup/SPIRV-Headers,
\\SPIRV-Registry can be cloned from https://github.com/KhronosGroup/SPIRV-Registry.
\\
, .{arg0}
) catch std.process.exit(1);
std.process.exit(code);
}