std: update std.builtin.Type fields to follow naming conventions

The compiler actually doesn't need any functional changes for this: Sema
does reification based on the tag indices of `std.builtin.Type` already!
So, no zig1.wasm update is necessary.

This change is necessary to disallow name clashes between fields and
decls on a type, which is a prerequisite of #9938.
This commit is contained in:
mlugg 2024-08-28 02:35:53 +01:00
parent 1a178d4995
commit 0fe3fd01dd
No known key found for this signature in database
GPG Key ID: 3F5B7DCCBF4AF02E
336 changed files with 4105 additions and 4112 deletions

View File

@ -4299,7 +4299,7 @@ comptime {
Asserts that {#syntax#}@sizeOf(@TypeOf(value)) == @sizeOf(DestType){#endsyntax#}.
</p>
<p>
Asserts that {#syntax#}@typeInfo(DestType) != .Pointer{#endsyntax#}. Use {#syntax#}@ptrCast{#endsyntax#} or {#syntax#}@ptrFromInt{#endsyntax#} if you need this.
Asserts that {#syntax#}@typeInfo(DestType) != .pointer{#endsyntax#}. Use {#syntax#}@ptrCast{#endsyntax#} or {#syntax#}@ptrFromInt{#endsyntax#} if you need this.
</p>
<p>
Can be used for these things for example:
@ -4517,7 +4517,7 @@ comptime {
{#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
<p>{#syntax#}@typeInfo(@TypeOf(ptr)).Pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
<p>{#syntax#}@typeInfo(@TypeOf(ptr)).pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
<p>{#syntax#}AtomicOrder{#endsyntax#} can be found with {#syntax#}@import("std").builtin.AtomicOrder{#endsyntax#}.</p>
{#see_also|@atomicStore|@atomicLoad|@atomicRmw|@fence|@cmpxchgWeak#}
{#header_close#}
@ -4549,7 +4549,7 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
{#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
<p>{#syntax#}@typeInfo(@TypeOf(ptr)).Pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
<p>{#syntax#}@typeInfo(@TypeOf(ptr)).pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
<p>{#syntax#}AtomicOrder{#endsyntax#} can be found with {#syntax#}@import("std").builtin.AtomicOrder{#endsyntax#}.</p>
{#see_also|@atomicStore|@atomicLoad|@atomicRmw|@fence|@cmpxchgStrong#}
{#header_close#}
@ -4672,7 +4672,7 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
<p>
Floored division. Rounds toward negative infinity. For unsigned integers it is
the same as {#syntax#}numerator / denominator{#endsyntax#}. Caller guarantees {#syntax#}denominator != 0{#endsyntax#} and
{#syntax#}!(@typeInfo(T) == .Int and T.is_signed and numerator == std.math.minInt(T) and denominator == -1){#endsyntax#}.
{#syntax#}!(@typeInfo(T) == .int and T.is_signed and numerator == std.math.minInt(T) and denominator == -1){#endsyntax#}.
</p>
<ul>
<li>{#syntax#}@divFloor(-5, 3) == -2{#endsyntax#}</li>
@ -4686,7 +4686,7 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
<p>
Truncated division. Rounds toward zero. For unsigned integers it is
the same as {#syntax#}numerator / denominator{#endsyntax#}. Caller guarantees {#syntax#}denominator != 0{#endsyntax#} and
{#syntax#}!(@typeInfo(T) == .Int and T.is_signed and numerator == std.math.minInt(T) and denominator == -1){#endsyntax#}.
{#syntax#}!(@typeInfo(T) == .int and T.is_signed and numerator == std.math.minInt(T) and denominator == -1){#endsyntax#}.
</p>
<ul>
<li>{#syntax#}@divTrunc(-5, 3) == -1{#endsyntax#}</li>
@ -5320,8 +5320,8 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
any bits that disagree with the resultant sign bit are shifted out.
</p>
<p>
The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(@typeInfo(T).Int.bits){#endsyntax#} bits.
This is because {#syntax#}shift_amt >= @typeInfo(T).Int.bits{#endsyntax#} is undefined behavior.
The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(@typeInfo(T).int.bits){#endsyntax#} bits.
This is because {#syntax#}shift_amt >= @typeInfo(T).int.bits{#endsyntax#} is undefined behavior.
</p>
<p>
{#syntax#}comptime_int{#endsyntax#} is modeled as an integer with an infinite number of bits,
@ -5337,8 +5337,8 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
Performs {#syntax#}a << b{#endsyntax#} and returns a tuple with the result and a possible overflow bit.
</p>
<p>
The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(@typeInfo(@TypeOf(a)).Int.bits){#endsyntax#} bits.
This is because {#syntax#}shift_amt >= @typeInfo(@TypeOf(a)).Int.bits{#endsyntax#} is undefined behavior.
The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(@typeInfo(@TypeOf(a)).int.bits){#endsyntax#} bits.
This is because {#syntax#}shift_amt >= @typeInfo(@TypeOf(a)).int.bits{#endsyntax#} is undefined behavior.
</p>
{#see_also|@shlExact|@shrExact#}
{#header_close#}
@ -5350,8 +5350,8 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
that the shift will not shift any 1 bits out.
</p>
<p>
The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(@typeInfo(T).Int.bits){#endsyntax#} bits.
This is because {#syntax#}shift_amt >= @typeInfo(T).Int.bits{#endsyntax#} is undefined behavior.
The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(@typeInfo(T).int.bits){#endsyntax#} bits.
This is because {#syntax#}shift_amt >= @typeInfo(T).int.bits{#endsyntax#} is undefined behavior.
</p>
{#see_also|@shlExact|@shlWithOverflow#}
{#header_close#}
@ -5405,7 +5405,7 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
This size may contain padding bytes. If there were two consecutive T in memory, the padding would be the offset
in bytes between element at index 0 and the element at index 1. For {#link|integer|Integers#},
consider whether you want to use {#syntax#}@sizeOf(T){#endsyntax#} or
{#syntax#}@typeInfo(T).Int.bits{#endsyntax#}.
{#syntax#}@typeInfo(T).int.bits{#endsyntax#}.
</p>
<p>
This function measures the size at runtime. For types that are disallowed at runtime, such as

View File

@ -1,7 +1,7 @@
fn isFieldOptional(comptime T: type, field_index: usize) !bool {
const fields = @typeInfo(T).Struct.fields;
const fields = @typeInfo(T).@"struct".fields;
return switch (field_index) {
inline 0...fields.len - 1 => |idx| @typeInfo(fields[idx].type) == .Optional,
inline 0...fields.len - 1 => |idx| @typeInfo(fields[idx].type) == .optional,
else => return error.IndexOutOfBounds,
};
}

View File

@ -1,13 +1,13 @@
const Writer = struct {
pub fn printValue(self: *Writer, value: anytype) !void {
switch (@typeInfo(@TypeOf(value))) {
.Int => {
.int => {
return self.writeInt(value);
},
.Float => {
.float => {
return self.writeFloat(value);
},
.Pointer => {
.pointer => {
return self.write(value);
},
else => {

View File

@ -95,13 +95,13 @@ const Small = enum {
four,
};
test "std.meta.Tag" {
try expect(@typeInfo(Small).Enum.tag_type == u2);
try expect(@typeInfo(Small).@"enum".tag_type == u2);
}
// @typeInfo tells us the field count and the fields names:
test "@typeInfo" {
try expect(@typeInfo(Small).Enum.fields.len == 4);
try expect(mem.eql(u8, @typeInfo(Small).Enum.fields[1].name, "two"));
try expect(@typeInfo(Small).@"enum".fields.len == 4);
try expect(mem.eql(u8, @typeInfo(Small).@"enum".fields[1].name, "two"));
}
// @tagName gives a [:0]const u8 representation of an enum value:

View File

@ -10,10 +10,10 @@ test "error union" {
foo = error.SomeError;
// Use compile-time reflection to access the payload type of an error union:
try comptime expect(@typeInfo(@TypeOf(foo)).ErrorUnion.payload == i32);
try comptime expect(@typeInfo(@TypeOf(foo)).error_union.payload == i32);
// Use compile-time reflection to access the error set type of an error union:
try comptime expect(@typeInfo(@TypeOf(foo)).ErrorUnion.error_set == anyerror);
try comptime expect(@typeInfo(@TypeOf(foo)).error_union.error_set == anyerror);
}
// test

View File

@ -3,10 +3,10 @@ const math = std.math;
const testing = std.testing;
test "fn reflection" {
try testing.expect(@typeInfo(@TypeOf(testing.expect)).Fn.params[0].type.? == bool);
try testing.expect(@typeInfo(@TypeOf(testing.tmpDir)).Fn.return_type.? == testing.TmpDir);
try testing.expect(@typeInfo(@TypeOf(testing.expect)).@"fn".params[0].type.? == bool);
try testing.expect(@typeInfo(@TypeOf(testing.tmpDir)).@"fn".return_type.? == testing.TmpDir);
try testing.expect(@typeInfo(@TypeOf(math.Log2Int)).Fn.is_generic);
try testing.expect(@typeInfo(@TypeOf(math.Log2Int)).@"fn".is_generic);
}
// test

View File

@ -17,8 +17,8 @@ const AnySlice = union(enum) {
};
fn withFor(any: AnySlice) usize {
const Tag = @typeInfo(AnySlice).Union.tag_type.?;
inline for (@typeInfo(Tag).Enum.fields) |field| {
const Tag = @typeInfo(AnySlice).@"union".tag_type.?;
inline for (@typeInfo(Tag).@"enum".fields) |field| {
// With `inline for` the function gets generated as
// a series of `if` statements relying on the optimizer
// to convert it to a switch.

View File

@ -3,11 +3,11 @@ const expect = std.testing.expect;
const expectError = std.testing.expectError;
fn isFieldOptional(comptime T: type, field_index: usize) !bool {
const fields = @typeInfo(T).Struct.fields;
const fields = @typeInfo(T).@"struct".fields;
return switch (field_index) {
// This prong is analyzed twice with `idx` being a
// comptime-known value each time.
inline 0, 1 => |idx| @typeInfo(fields[idx].type) == .Optional,
inline 0, 1 => |idx| @typeInfo(fields[idx].type) == .optional,
else => return error.IndexOutOfBounds,
};
}

View File

@ -8,7 +8,7 @@ test "optional type" {
foo = 1234;
// Use compile-time reflection to access the child type of the optional:
try comptime expect(@typeInfo(@TypeOf(foo)).Optional.child == i32);
try comptime expect(@typeInfo(@TypeOf(foo)).optional.child == i32);
}
// test

View File

@ -17,7 +17,7 @@ test "pointer casting" {
test "pointer child type" {
// pointer types have a `child` field which tells you the type they point to.
try expect(@typeInfo(*u32).Pointer.child == u32);
try expect(@typeInfo(*u32).pointer.child == u32);
}
// test

View File

@ -8,7 +8,7 @@ test "variable alignment" {
try expect(@TypeOf(&x) == *i32);
try expect(*i32 == *align(align_of_i32) i32);
if (builtin.target.cpu.arch == .x86_64) {
try expect(@typeInfo(*i32).Pointer.alignment == 4);
try expect(@typeInfo(*i32).pointer.alignment == 4);
}
}

View File

@ -3,7 +3,7 @@ const expect = @import("std").testing.expect;
var foo: u8 align(4) = 100;
test "global variable alignment" {
try expect(@typeInfo(@TypeOf(&foo)).Pointer.alignment == 4);
try expect(@typeInfo(@TypeOf(&foo)).pointer.alignment == 4);
try expect(@TypeOf(&foo) == *align(4) u8);
const as_pointer_to_array: *align(4) [1]u8 = &foo;
const as_slice: []align(4) u8 = as_pointer_to_array;

View File

@ -5,7 +5,7 @@ pub extern "c" fn printf(format: [*:0]const u8, ...) c_int;
test "variadic function" {
try testing.expect(printf("Hello, world!\n") == 14);
try testing.expect(@typeInfo(@TypeOf(printf)).Fn.is_var_args);
try testing.expect(@typeInfo(@TypeOf(printf)).@"fn".is_var_args);
}
// test

View File

@ -67,7 +67,7 @@ pub fn requiredArgCount(attr: Tag) u32 {
comptime {
const fields = std.meta.fields(@field(attributes, @tagName(tag)));
for (fields) |arg_field| {
if (!mem.eql(u8, arg_field.name, "__name_tok") and @typeInfo(arg_field.type) != .Optional) needed += 1;
if (!mem.eql(u8, arg_field.name, "__name_tok") and @typeInfo(arg_field.type) != .optional) needed += 1;
}
}
return needed;
@ -93,7 +93,7 @@ pub fn maxArgCount(attr: Tag) u32 {
fn UnwrapOptional(comptime T: type) type {
return switch (@typeInfo(T)) {
.Optional => |optional| optional.child,
.optional => |optional| optional.child,
else => T,
};
}
@ -110,7 +110,7 @@ pub const Formatting = struct {
if (fields.len == 0) unreachable;
const Unwrapped = UnwrapOptional(fields[0].type);
if (@typeInfo(Unwrapped) != .Enum) unreachable;
if (@typeInfo(Unwrapped) != .@"enum") unreachable;
return if (Unwrapped.opts.enum_kind == .identifier) "'" else "\"";
},
@ -127,9 +127,9 @@ pub const Formatting = struct {
if (fields.len == 0) unreachable;
const Unwrapped = UnwrapOptional(fields[0].type);
if (@typeInfo(Unwrapped) != .Enum) unreachable;
if (@typeInfo(Unwrapped) != .@"enum") unreachable;
const enum_fields = @typeInfo(Unwrapped).Enum.fields;
const enum_fields = @typeInfo(Unwrapped).@"enum".fields;
@setEvalBranchQuota(3000);
const quote = comptime quoteChar(@enumFromInt(@intFromEnum(tag)));
comptime var values: []const u8 = quote ++ enum_fields[0].name ++ quote;
@ -152,7 +152,7 @@ pub fn wantsIdentEnum(attr: Tag) bool {
if (fields.len == 0) return false;
const Unwrapped = UnwrapOptional(fields[0].type);
if (@typeInfo(Unwrapped) != .Enum) return false;
if (@typeInfo(Unwrapped) != .@"enum") return false;
return Unwrapped.opts.enum_kind == .identifier;
},
@ -165,7 +165,7 @@ pub fn diagnoseIdent(attr: Tag, arguments: *Arguments, ident: []const u8) ?Diagn
const fields = std.meta.fields(@field(attributes, @tagName(tag)));
if (fields.len == 0) unreachable;
const Unwrapped = UnwrapOptional(fields[0].type);
if (@typeInfo(Unwrapped) != .Enum) unreachable;
if (@typeInfo(Unwrapped) != .@"enum") unreachable;
if (std.meta.stringToEnum(Unwrapped, normalize(ident))) |enum_val| {
@field(@field(arguments, @tagName(tag)), fields[0].name) = enum_val;
return null;
@ -239,7 +239,7 @@ fn diagnoseField(
const key = p.comp.interner.get(res.val.ref());
switch (key) {
.int => {
if (@typeInfo(Wanted) == .Int) {
if (@typeInfo(Wanted) == .int) {
@field(@field(arguments, decl.name), field.name) = res.val.toInt(Wanted, p.comp) orelse return .{
.tag = .attribute_int_out_of_range,
.extra = .{ .str = try res.str(p) },
@ -258,7 +258,7 @@ fn diagnoseField(
}
@field(@field(arguments, decl.name), field.name) = try p.removeNull(res.val);
return null;
} else if (@typeInfo(Wanted) == .Enum and @hasDecl(Wanted, "opts") and Wanted.opts.enum_kind == .string) {
} else if (@typeInfo(Wanted) == .@"enum" and @hasDecl(Wanted, "opts") and Wanted.opts.enum_kind == .string) {
const str = bytes[0 .. bytes.len - 1];
if (std.meta.stringToEnum(Wanted, str)) |enum_val| {
@field(@field(arguments, decl.name), field.name) = enum_val;
@ -293,7 +293,7 @@ fn invalidArgMsg(comptime Expected: type, actual: ArgumentType) Diagnostics.Mess
Alignment => .alignment,
CallingConvention => .identifier,
else => switch (@typeInfo(Expected)) {
.Enum => if (Expected.opts.enum_kind == .string) .string else .identifier,
.@"enum" => if (Expected.opts.enum_kind == .string) .string else .identifier,
else => unreachable,
},
}, .actual = actual } },
@ -303,7 +303,7 @@ fn invalidArgMsg(comptime Expected: type, actual: ArgumentType) Diagnostics.Mess
pub fn diagnose(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Parser.Result, node: Tree.Node, p: *Parser) !?Diagnostics.Message {
switch (attr) {
inline else => |tag| {
const decl = @typeInfo(attributes).Struct.decls[@intFromEnum(tag)];
const decl = @typeInfo(attributes).@"struct".decls[@intFromEnum(tag)];
const max_arg_count = comptime maxArgCount(tag);
if (arg_idx >= max_arg_count) return Diagnostics.Message{
.tag = .attribute_too_many_args,
@ -641,7 +641,7 @@ const attributes = struct {
pub const Tag = std.meta.DeclEnum(attributes);
pub const Arguments = blk: {
const decls = @typeInfo(attributes).Struct.decls;
const decls = @typeInfo(attributes).@"struct".decls;
var union_fields: [decls.len]ZigType.UnionField = undefined;
for (decls, &union_fields) |decl, *field| {
field.* = .{
@ -652,7 +652,7 @@ pub const Arguments = blk: {
}
break :blk @Type(.{
.Union = .{
.@"union" = .{
.layout = .auto,
.tag_type = null,
.fields = &union_fields,
@ -662,7 +662,7 @@ pub const Arguments = blk: {
};
pub fn ArgumentsForTag(comptime tag: Tag) type {
const decl = @typeInfo(attributes).Struct.decls[@intFromEnum(tag)];
const decl = @typeInfo(attributes).@"struct".decls[@intFromEnum(tag)];
return @field(attributes, decl.name);
}

View File

@ -61,7 +61,7 @@ pub const Environment = struct {
var env: Environment = .{};
errdefer env.deinit(allocator);
inline for (@typeInfo(@TypeOf(env)).Struct.fields) |field| {
inline for (@typeInfo(@TypeOf(env)).@"struct".fields) |field| {
std.debug.assert(@field(env, field.name) == null);
var env_var_buf: [field.name.len]u8 = undefined;
@ -78,7 +78,7 @@ pub const Environment = struct {
/// Use this only if environment slices were allocated with `allocator` (such as via `loadAll`)
pub fn deinit(self: *Environment, allocator: std.mem.Allocator) void {
inline for (@typeInfo(@TypeOf(self.*)).Struct.fields) |field| {
inline for (@typeInfo(@TypeOf(self.*)).@"struct".fields) |field| {
if (@field(self, field.name)) |slice| {
allocator.free(slice);
}

View File

@ -707,7 +707,7 @@ fn dumpAttribute(tree: *const Tree, attr: Attribute, writer: anytype) !void {
switch (attr.tag) {
inline else => |tag| {
const args = @field(attr.args, @tagName(tag));
const fields = @typeInfo(@TypeOf(args)).Struct.fields;
const fields = @typeInfo(@TypeOf(args)).@"struct".fields;
if (fields.len == 0) {
try writer.writeByte('\n');
return;
@ -724,7 +724,7 @@ fn dumpAttribute(tree: *const Tree, attr: Attribute, writer: anytype) !void {
Interner.Ref => try writer.print("\"{s}\"", .{tree.interner.get(@field(args, f.name)).bytes}),
?Interner.Ref => try writer.print("\"{?s}\"", .{if (@field(args, f.name)) |str| tree.interner.get(str).bytes else null}),
else => switch (@typeInfo(f.type)) {
.Enum => try writer.writeAll(@tagName(@field(args, f.name))),
.@"enum" => try writer.writeAll(@tagName(@field(args, f.name))),
else => try writer.print("{any}", .{@field(args, f.name)}),
},
}

View File

@ -24,7 +24,7 @@ pub fn intern(comp: *Compilation, k: Interner.Key) !Value {
pub fn int(i: anytype, comp: *Compilation) !Value {
const info = @typeInfo(@TypeOf(i));
if (info == .ComptimeInt or info.Int.signedness == .unsigned) {
if (info == .comptime_int or info.int.signedness == .unsigned) {
return intern(comp, .{ .int = .{ .u64 = i } });
} else {
return intern(comp, .{ .int = .{ .i64 = i } });

View File

@ -505,7 +505,7 @@ pub fn put(i: *Interner, gpa: Allocator, key: Key) !Ref {
});
},
.record_ty => |elems| {
try i.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.Record).Struct.fields.len +
try i.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.Record).@"struct".fields.len +
elems.len);
i.items.appendAssumeCapacity(.{
.tag = .record_ty,
@ -527,14 +527,14 @@ pub fn put(i: *Interner, gpa: Allocator, key: Key) !Ref {
}
fn addExtra(i: *Interner, gpa: Allocator, extra: anytype) Allocator.Error!u32 {
const fields = @typeInfo(@TypeOf(extra)).Struct.fields;
const fields = @typeInfo(@TypeOf(extra)).@"struct".fields;
try i.extra.ensureUnusedCapacity(gpa, fields.len);
return i.addExtraAssumeCapacity(extra);
}
fn addExtraAssumeCapacity(i: *Interner, extra: anytype) u32 {
const result = @as(u32, @intCast(i.extra.items.len));
inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
inline for (@typeInfo(@TypeOf(extra)).@"struct".fields) |field| {
i.extra.appendAssumeCapacity(switch (field.type) {
Ref => @intFromEnum(@field(extra, field.name)),
u32 => @field(extra, field.name),
@ -631,7 +631,7 @@ fn extraData(i: *const Interner, comptime T: type, index: usize) T {
fn extraDataTrail(i: *const Interner, comptime T: type, index: usize) struct { data: T, end: u32 } {
var result: T = undefined;
const fields = @typeInfo(T).Struct.fields;
const fields = @typeInfo(T).@"struct".fields;
inline for (fields, 0..) |field, field_i| {
const int32 = i.extra.items[field_i + index];
@field(result, field.name) = switch (field.type) {

View File

@ -168,7 +168,7 @@ pub fn translate(
context.pattern_list.deinit(gpa);
}
inline for (@typeInfo(std.zig.c_builtins).Struct.decls) |decl| {
inline for (@typeInfo(std.zig.c_builtins).@"struct".decls) |decl| {
const builtin_fn = try ZigTag.pub_var_simple.create(arena, .{
.name = decl.name,
.init = try ZigTag.import_c_builtin.create(arena, decl.name),

View File

@ -224,7 +224,7 @@ pub const Compression = enum(u32) {
};
fn structFieldsLittleToNative(comptime T: type, x: *T) void {
inline for (@typeInfo(T).Struct.fields) |field| {
inline for (@typeInfo(T).@"struct".fields) |field| {
@field(x, field.name) = std.mem.littleToNative(field.type, @field(x, field.name));
}
}

View File

@ -259,7 +259,7 @@ pub const CodePage = enum(u16) {
pub fn getByIdentifier(identifier: u16) !CodePage {
// There's probably a more efficient way to do this (e.g. ComptimeHashMap?) but
// this should be fine, especially since this function likely won't be called much.
inline for (@typeInfo(CodePage).Enum.fields) |enumField| {
inline for (@typeInfo(CodePage).@"enum".fields) |enumField| {
if (identifier == enumField.value) {
return @field(CodePage, enumField.name);
}

View File

@ -250,7 +250,7 @@ pub const ErrorDetails = struct {
});
pub fn writeCommaSeparated(self: ExpectedTypes, writer: anytype) !void {
const struct_info = @typeInfo(ExpectedTypes).Struct;
const struct_info = @typeInfo(ExpectedTypes).@"struct";
const num_real_fields = struct_info.fields.len - 1;
const num_padding_bits = @bitSizeOf(ExpectedTypes) - num_real_fields;
const mask = std.math.maxInt(struct_info.backing_integer.?) >> num_padding_bits;

View File

@ -14,7 +14,7 @@ pub fn read(allocator: std.mem.Allocator, reader: anytype, max_size: u64) ReadEr
// Some Reader implementations have an empty ReadError error set which would
// cause 'unreachable else' if we tried to use an else in the switch, so we
// need to detect this case and not try to translate to ReadError
const empty_reader_errorset = @typeInfo(@TypeOf(reader).Error).ErrorSet == null or @typeInfo(@TypeOf(reader).Error).ErrorSet.?.len == 0;
const empty_reader_errorset = @typeInfo(@TypeOf(reader).Error).error_set == null or @typeInfo(@TypeOf(reader).Error).error_set.?.len == 0;
if (empty_reader_errorset) {
return readAnyError(allocator, reader, max_size) catch |err| switch (err) {
error.EndOfStream => error.UnexpectedEOF,

View File

@ -87,7 +87,7 @@ pub fn tagToId(tag: []const u8) error{InvalidLanguageTag}!?LanguageId {
if (parsed.multiple_suffixes) return null;
const longest_known_tag = comptime blk: {
var len = 0;
for (@typeInfo(LanguageId).Enum.fields) |field| {
for (@typeInfo(LanguageId).@"enum".fields) |field| {
if (field.name.len > len) len = field.name.len;
}
break :blk len;

View File

@ -265,7 +265,7 @@ fn mainTerminal() void {
pub fn log(
comptime message_level: std.log.Level,
comptime scope: @Type(.EnumLiteral),
comptime scope: @Type(.enum_literal),
comptime format: []const u8,
args: anytype,
) void {

View File

@ -7,7 +7,7 @@ const normalize = common.normalize;
///
/// https://github.com/llvm/llvm-project/blob/02d85149a05cb1f6dc49f0ba7a2ceca53718ae17/compiler-rt/lib/builtins/fp_add_impl.inc
pub inline fn addf3(comptime T: type, a: T, b: T) T {
const bits = @typeInfo(T).Float.bits;
const bits = @typeInfo(T).float.bits;
const Z = std.meta.Int(.unsigned, bits);
const typeWidth = bits;

View File

@ -130,7 +130,7 @@ pub fn ceilq(x: f128) callconv(.C) f128 {
}
pub fn ceill(x: c_longdouble) callconv(.C) c_longdouble {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __ceilh(x),
32 => return ceilf(x),
64 => return ceil(x),

View File

@ -219,8 +219,8 @@ pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
}
}
pub fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T).Float.bits)) i32 {
const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
pub fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T).float.bits)) i32 {
const Z = std.meta.Int(.unsigned, @typeInfo(T).float.bits);
const integerBit = @as(Z, 1) << std.math.floatFractionalBits(T);
const shift = @clz(significand.*) - @clz(integerBit);
@ -230,8 +230,8 @@ pub fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeIn
pub inline fn fneg(a: anytype) @TypeOf(a) {
const F = @TypeOf(a);
const bits = @typeInfo(F).Float.bits;
const U = @Type(.{ .Int = .{
const bits = @typeInfo(F).float.bits;
const U = @Type(.{ .int = .{
.signedness = .unsigned,
.bits = bits,
} });
@ -244,7 +244,7 @@ pub inline fn fneg(a: anytype) @TypeOf(a) {
/// signed or unsigned integers.
pub fn HalveInt(comptime T: type, comptime signed_half: bool) type {
return extern union {
pub const bits = @divExact(@typeInfo(T).Int.bits, 2);
pub const bits = @divExact(@typeInfo(T).int.bits, 2);
pub const HalfTU = std.meta.Int(.unsigned, bits);
pub const HalfTS = std.meta.Int(.signed, bits);
pub const HalfT = if (signed_half) HalfTS else HalfTU;

View File

@ -17,7 +17,7 @@ pub const GE = enum(i32) {
};
pub inline fn cmpf2(comptime T: type, comptime RT: type, a: T, b: T) RT {
const bits = @typeInfo(T).Float.bits;
const bits = @typeInfo(T).float.bits;
const srep_t = std.meta.Int(.signed, bits);
const rep_t = std.meta.Int(.unsigned, bits);
@ -109,7 +109,7 @@ test "cmp_f80" {
}
pub inline fn unordcmp(comptime T: type, a: T, b: T) i32 {
const rep_t = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
const rep_t = std.meta.Int(.unsigned, @typeInfo(T).float.bits);
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);

View File

@ -124,7 +124,7 @@ pub fn cosq(a: f128) callconv(.C) f128 {
}
pub fn cosl(x: c_longdouble) callconv(.C) c_longdouble {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __cosh(x),
32 => return cosf(x),
64 => return cos(x),

View File

@ -203,7 +203,7 @@ pub fn __ctzti2(a: i128) callconv(.C) i32 {
}
inline fn ffsXi2(comptime T: type, a: T) i32 {
var x: std.meta.Int(.unsigned, @typeInfo(T).Int.bits) = @bitCast(a);
var x: std.meta.Int(.unsigned, @typeInfo(T).int.bits) = @bitCast(a);
var n: T = 1;
// adapted from Number of trailing zeroes (see ctzXi2)
var mask: @TypeOf(x) = std.math.maxInt(@TypeOf(x));

View File

@ -201,7 +201,7 @@ pub fn expq(a: f128) callconv(.C) f128 {
}
pub fn expl(x: c_longdouble) callconv(.C) c_longdouble {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __exph(x),
32 => return expf(x),
64 => return exp(x),

View File

@ -168,7 +168,7 @@ pub fn exp2q(x: f128) callconv(.C) f128 {
}
pub fn exp2l(x: c_longdouble) callconv(.C) c_longdouble {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __exp2h(x),
32 => return exp2f(x),
64 => return exp2(x),

View File

@ -3,10 +3,10 @@ const std = @import("std");
pub inline fn extendf(
comptime dst_t: type,
comptime src_t: type,
a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits),
a: std.meta.Int(.unsigned, @typeInfo(src_t).float.bits),
) dst_t {
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).float.bits);
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
const dstSigBits = std.math.floatMantissaBits(dst_t);
@ -70,8 +70,8 @@ pub inline fn extendf(
return @bitCast(result);
}
pub inline fn extend_f80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) f80 {
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
pub inline fn extend_f80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).float.bits)) f80 {
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).float.bits);
const src_sig_bits = std.math.floatMantissaBits(src_t);
const dst_int_bit = 0x8000000000000000;
const dst_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit

View File

@ -38,7 +38,7 @@ pub fn fabsq(a: f128) callconv(.C) f128 {
}
pub fn fabsl(x: c_longdouble) callconv(.C) c_longdouble {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __fabsh(x),
32 => return fabsf(x),
64 => return fabs(x),
@ -50,7 +50,7 @@ pub fn fabsl(x: c_longdouble) callconv(.C) c_longdouble {
inline fn generic_fabs(x: anytype) @TypeOf(x) {
const T = @TypeOf(x);
const TBits = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
const TBits = std.meta.Int(.unsigned, @typeInfo(T).float.bits);
const float_bits: TBits = @bitCast(x);
const remove_sign = ~@as(TBits, 0) >> 1;
return @bitCast(float_bits & remove_sign);

View File

@ -18,7 +18,7 @@ pub fn floatFromInt(comptime T: type, x: anytype) T {
const max_exp = exp_bias;
// Sign
const abs_val = if (@TypeOf(x) == comptime_int or @typeInfo(@TypeOf(x)).Int.signedness == .signed) @abs(x) else x;
const abs_val = if (@TypeOf(x) == comptime_int or @typeInfo(@TypeOf(x)).int.signedness == .signed) @abs(x) else x;
const sign_bit = if (x < 0) @as(uT, 1) << (float_bits - 1) else 0;
var result: uT = sign_bit;

View File

@ -160,7 +160,7 @@ pub fn floorq(x: f128) callconv(.C) f128 {
}
pub fn floorl(x: c_longdouble) callconv(.C) c_longdouble {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __floorh(x),
32 => return floorf(x),
64 => return floor(x),

View File

@ -151,7 +151,7 @@ pub fn fmaq(x: f128, y: f128, z: f128) callconv(.C) f128 {
}
pub fn fmal(x: c_longdouble, y: c_longdouble, z: c_longdouble) callconv(.C) c_longdouble {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __fmah(x, y, z),
32 => return fmaf(x, y, z),
64 => return fma(x, y, z),

View File

@ -39,7 +39,7 @@ pub fn fmaxq(x: f128, y: f128) callconv(.C) f128 {
}
pub fn fmaxl(x: c_longdouble, y: c_longdouble) callconv(.C) c_longdouble {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __fmaxh(x, y),
32 => return fmaxf(x, y),
64 => return fmax(x, y),

View File

@ -39,7 +39,7 @@ pub fn fminq(x: f128, y: f128) callconv(.C) f128 {
}
pub fn fminl(x: c_longdouble, y: c_longdouble) callconv(.C) c_longdouble {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __fminh(x, y),
32 => return fminf(x, y),
64 => return fmin(x, y),

View File

@ -251,7 +251,7 @@ pub fn fmodq(a: f128, b: f128) callconv(.C) f128 {
}
pub fn fmodl(a: c_longdouble, b: c_longdouble) callconv(.C) c_longdouble {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __fmodh(a, b),
32 => return fmodf(a, b),
64 => return fmod(a, b),
@ -262,7 +262,7 @@ pub fn fmodl(a: c_longdouble, b: c_longdouble) callconv(.C) c_longdouble {
}
inline fn generic_fmod(comptime T: type, x: T, y: T) T {
const bits = @typeInfo(T).Float.bits;
const bits = @typeInfo(T).float.bits;
const uint = std.meta.Int(.unsigned, bits);
comptime assert(T == f32 or T == f64);
const digits = if (T == f32) 23 else 52;

View File

@ -4,8 +4,8 @@ const Log2Int = math.Log2Int;
pub inline fn intFromFloat(comptime I: type, a: anytype) I {
const F = @TypeOf(a);
const float_bits = @typeInfo(F).Float.bits;
const int_bits = @typeInfo(I).Int.bits;
const float_bits = @typeInfo(F).float.bits;
const int_bits = @typeInfo(I).int.bits;
const rep_t = Int(.unsigned, float_bits);
const sig_bits = math.floatMantissaBits(F);
const exp_bits = math.floatExponentBits(F);
@ -26,7 +26,7 @@ pub inline fn intFromFloat(comptime I: type, a: anytype) I {
if (exponent < 0) return 0;
// If the value is too large for the integer type, saturate.
switch (@typeInfo(I).Int.signedness) {
switch (@typeInfo(I).int.signedness) {
.unsigned => {
if (negative) return 0;
if (@as(c_uint, @intCast(exponent)) >= @min(int_bits, max_exp)) return math.maxInt(I);
@ -45,7 +45,7 @@ pub inline fn intFromFloat(comptime I: type, a: anytype) I {
result = @as(I, @intCast(significand)) << @intCast(exponent - fractional_bits);
}
if ((@typeInfo(I).Int.signedness == .signed) and negative)
if ((@typeInfo(I).int.signedness == .signed) and negative)
return ~result +% 1;
return result;
}

View File

@ -149,7 +149,7 @@ pub fn logq(a: f128) callconv(.C) f128 {
}
pub fn logl(x: c_longdouble) callconv(.C) c_longdouble {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __logh(x),
32 => return logf(x),
64 => return log(x),

View File

@ -177,7 +177,7 @@ pub fn log10q(a: f128) callconv(.C) f128 {
}
pub fn log10l(x: c_longdouble) callconv(.C) c_longdouble {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __log10h(x),
32 => return log10f(x),
64 => return log10(x),

View File

@ -169,7 +169,7 @@ pub fn log2q(a: f128) callconv(.C) f128 {
}
pub fn log2l(x: c_longdouble) callconv(.C) c_longdouble {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __log2h(x),
32 => return log2f(x),
64 => return log2(x),

View File

@ -7,7 +7,7 @@ const common = @import("./common.zig");
/// https://github.com/llvm/llvm-project/blob/2ffb1b0413efa9a24eb3c49e710e36f92e2cb50b/compiler-rt/lib/builtins/fp_mul_impl.inc
pub inline fn mulf3(comptime T: type, a: T, b: T) T {
@setRuntimeSafety(builtin.is_test);
const typeWidth = @typeInfo(T).Float.bits;
const typeWidth = @typeInfo(T).float.bits;
const significandBits = math.floatMantissaBits(T);
const fractionalBits = math.floatFractionalBits(T);
const exponentBits = math.floatExponentBits(T);
@ -16,7 +16,7 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T {
// ZSignificand is large enough to contain the significand, including an explicit integer bit
const ZSignificand = PowerOfTwoSignificandZ(T);
const ZSignificandBits = @typeInfo(ZSignificand).Int.bits;
const ZSignificandBits = @typeInfo(ZSignificand).int.bits;
const roundBit = (1 << (ZSignificandBits - 1));
const signBit = (@as(Z, 1) << (significandBits + exponentBits));
@ -164,7 +164,7 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T {
/// This is analogous to an shr version of `@shlWithOverflow`
fn wideShrWithTruncation(comptime Z: type, hi: *Z, lo: *Z, count: u32) bool {
@setRuntimeSafety(builtin.is_test);
const typeWidth = @typeInfo(Z).Int.bits;
const typeWidth = @typeInfo(Z).int.bits;
var inexact = false;
if (count < typeWidth) {
inexact = (lo.* << @intCast(typeWidth -% count)) != 0;

View File

@ -26,7 +26,7 @@ pub fn __parityti2(a: i128) callconv(.C) i32 {
}
inline fn parityXi2(comptime T: type, a: T) i32 {
var x: std.meta.Int(.unsigned, @typeInfo(T).Int.bits) = @bitCast(a);
var x: std.meta.Int(.unsigned, @typeInfo(T).int.bits) = @bitCast(a);
// Bit Twiddling Hacks: Compute parity in parallel
comptime var shift: u8 = @bitSizeOf(T) / 2;
inline while (shift > 2) {

View File

@ -142,7 +142,7 @@ pub fn roundq(x_: f128) callconv(.C) f128 {
}
pub fn roundl(x: c_longdouble) callconv(.C) c_longdouble {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __roundh(x),
32 => return roundf(x),
64 => return round(x),

View File

@ -130,7 +130,7 @@ pub fn sinq(x: f128) callconv(.C) f128 {
}
pub fn sinl(x: c_longdouble) callconv(.C) c_longdouble {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __sinh(x),
32 => return sinf(x),
64 => return sin(x),

View File

@ -198,7 +198,7 @@ pub fn sincosq(x: f128, r_sin: *f128, r_cos: *f128) callconv(.C) void {
}
pub fn sincosl(x: c_longdouble, r_sin: *c_longdouble, r_cos: *c_longdouble) callconv(.C) void {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __sincosh(x, r_sin, r_cos),
32 => return sincosf(x, r_sin, r_cos),
64 => return sincos(x, r_sin, r_cos),
@ -216,7 +216,7 @@ pub const rem_pio2_generic = @compileError("TODO");
/// * trig.cos_generic ported from __cosl.c
inline fn sincos_generic(comptime F: type, x: F, r_sin: *F, r_cos: *F) void {
const sc1pio4: F = 1.0 * math.pi / 4.0;
const bits = @typeInfo(F).Float.bits;
const bits = @typeInfo(F).float.bits;
const I = std.meta.Int(.unsigned, bits);
const ix = @as(I, @bitCast(x)) & (math.maxInt(I) >> 1);
const se: u16 = @truncate(ix >> (bits - 16));

View File

@ -243,7 +243,7 @@ pub fn sqrtq(x: f128) callconv(.C) f128 {
}
pub fn sqrtl(x: c_longdouble) callconv(.C) c_longdouble {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __sqrth(x),
32 => return sqrtf(x),
64 => return sqrt(x),

View File

@ -116,7 +116,7 @@ pub fn tanq(x: f128) callconv(.C) f128 {
}
pub fn tanl(x: c_longdouble) callconv(.C) c_longdouble {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __tanh(x),
32 => return tanf(x),
64 => return tan(x),

View File

@ -100,7 +100,7 @@ pub fn truncq(x: f128) callconv(.C) f128 {
}
pub fn truncl(x: c_longdouble) callconv(.C) c_longdouble {
switch (@typeInfo(c_longdouble).Float.bits) {
switch (@typeInfo(c_longdouble).float.bits) {
16 => return __trunch(x),
32 => return truncf(x),
64 => return trunc(x),

View File

@ -1,14 +1,14 @@
const std = @import("std");
pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).float.bits);
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
const dstSigBits = std.math.floatMantissaBits(dst_t);
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
const srcBits = @typeInfo(src_t).Float.bits;
const srcBits = @typeInfo(src_t).float.bits;
const srcExpBits = srcBits - srcSigBits - 1;
const srcInfExp = (1 << srcExpBits) - 1;
const srcExpBias = srcInfExp >> 1;
@ -23,7 +23,7 @@ pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t
const srcQNaN = 1 << (srcSigBits - 1);
const srcNaNCode = srcQNaN - 1;
const dstBits = @typeInfo(dst_t).Float.bits;
const dstBits = @typeInfo(dst_t).float.bits;
const dstExpBits = dstBits - dstSigBits - 1;
const dstInfExp = (1 << dstExpBits) - 1;
const dstExpBias = dstInfExp >> 1;
@ -100,7 +100,7 @@ pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t
}
pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t {
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).float.bits);
const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
const dst_sig_bits = std.math.floatMantissaBits(dst_t);
@ -109,7 +109,7 @@ pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t {
const round_mask = (1 << (src_sig_bits - dst_sig_bits)) - 1;
const halfway = 1 << (src_sig_bits - dst_sig_bits - 1);
const dst_bits = @typeInfo(dst_t).Float.bits;
const dst_bits = @typeInfo(dst_t).float.bits;
const dst_exp_bits = dst_bits - dst_sig_bits - 1;
const dst_inf_exp = (1 << dst_exp_bits) - 1;
const dst_exp_bias = dst_inf_exp >> 1;

View File

@ -14,7 +14,7 @@ pub fn __trunctfxf2(a: f128) callconv(.C) f80 {
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
const src_bits = @typeInfo(f128).Float.bits;
const src_bits = @typeInfo(f128).float.bits;
const src_exp_bits = src_bits - src_sig_bits - 1;
const src_inf_exp = 0x7FFF;

View File

@ -36,7 +36,7 @@ pub const Category = union(enum(u8)) {
/// A function that returns a type.
type_function: Ast.Node.Index,
pub const Tag = @typeInfo(Category).Union.tag_type.?;
pub const Tag = @typeInfo(Category).@"union".tag_type.?;
};
pub const File = struct {

View File

@ -170,7 +170,7 @@ pub fn ExtraData(comptime T: type) type {
}
pub fn extraData(doc: Document, comptime T: type, index: ExtraIndex) ExtraData(T) {
const fields = @typeInfo(T).Struct.fields;
const fields = @typeInfo(T).@"struct".fields;
var i: usize = @intFromEnum(index);
var result: T = undefined;
inline for (fields) |field| {

View File

@ -1564,7 +1564,7 @@ fn parseInlines(p: *Parser, content: []const u8) !ExtraIndex {
}
pub fn extraData(p: Parser, comptime T: type, index: ExtraIndex) ExtraData(T) {
const fields = @typeInfo(T).Struct.fields;
const fields = @typeInfo(T).@"struct".fields;
var i: usize = @intFromEnum(index);
var result: T = undefined;
inline for (fields) |field| {

View File

@ -13,7 +13,7 @@ var log_file: ?std.fs.File = null;
fn logOverride(
comptime level: std.log.Level,
comptime scope: @TypeOf(.EnumLiteral),
comptime scope: @Type(.enum_literal),
comptime format: []const u8,
args: anytype,
) void {
@ -178,7 +178,7 @@ const Fuzzer = struct {
addr: usize,
flags: packed struct(usize) {
entry: bool,
_: @Type(.{ .Int = .{ .signedness = .unsigned, .bits = @bitSizeOf(usize) - 1 } }),
_: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(usize) - 1 } }),
},
};

View File

@ -408,7 +408,7 @@ fn createChildOnly(
fn userInputOptionsFromArgs(allocator: Allocator, args: anytype) UserInputOptionsMap {
var user_input_options = UserInputOptionsMap.init(allocator);
inline for (@typeInfo(@TypeOf(args)).Struct.fields) |field| {
inline for (@typeInfo(@TypeOf(args)).@"struct".fields) |field| {
const v = @field(args, field.name);
const T = @TypeOf(v);
switch (T) {
@ -454,28 +454,28 @@ fn userInputOptionsFromArgs(allocator: Allocator, args: anytype) UserInputOption
}) catch @panic("OOM");
},
else => switch (@typeInfo(T)) {
.Bool => {
.bool => {
user_input_options.put(field.name, .{
.name = field.name,
.value = .{ .scalar = if (v) "true" else "false" },
.used = false,
}) catch @panic("OOM");
},
.Enum, .EnumLiteral => {
.@"enum", .enum_literal => {
user_input_options.put(field.name, .{
.name = field.name,
.value = .{ .scalar = @tagName(v) },
.used = false,
}) catch @panic("OOM");
},
.ComptimeInt, .Int => {
.comptime_int, .int => {
user_input_options.put(field.name, .{
.name = field.name,
.value = .{ .scalar = std.fmt.allocPrint(allocator, "{d}", .{v}) catch @panic("OOM") },
.used = false,
}) catch @panic("OOM");
},
.ComptimeFloat, .Float => {
.comptime_float, .float => {
user_input_options.put(field.name, .{
.name = field.name,
.value = .{ .scalar = std.fmt.allocPrint(allocator, "{e}", .{v}) catch @panic("OOM") },
@ -1111,7 +1111,7 @@ pub fn option(b: *Build, comptime T: type, name_raw: []const u8, description_raw
const description = b.dupe(description_raw);
const type_id = comptime typeToEnum(T);
const enum_options = if (type_id == .@"enum" or type_id == .enum_list) blk: {
const EnumType = if (type_id == .enum_list) @typeInfo(T).Pointer.child else T;
const EnumType = if (type_id == .enum_list) @typeInfo(T).pointer.child else T;
const fields = comptime std.meta.fields(EnumType);
var options = ArrayList([]const u8).initCapacity(b.allocator, fields.len) catch @panic("OOM");
@ -1265,7 +1265,7 @@ pub fn option(b: *Build, comptime T: type, name_raw: []const u8, description_raw
return null;
},
.scalar => |s| {
const Child = @typeInfo(T).Pointer.child;
const Child = @typeInfo(T).pointer.child;
const value = std.meta.stringToEnum(Child, s) orelse {
log.err("Expected -D{s} to be of type {s}.", .{ name, @typeName(Child) });
b.markInvalidUserInput();
@ -1274,7 +1274,7 @@ pub fn option(b: *Build, comptime T: type, name_raw: []const u8, description_raw
return b.allocator.dupe(Child, &[_]Child{value}) catch @panic("OOM");
},
.list => |lst| {
const Child = @typeInfo(T).Pointer.child;
const Child = @typeInfo(T).pointer.child;
var new_list = b.allocator.alloc(Child, lst.items.len) catch @panic("OOM");
for (lst.items, 0..) |str, i| {
const value = std.meta.stringToEnum(Child, str) orelse {
@ -1542,15 +1542,15 @@ fn typeToEnum(comptime T: type) TypeId {
return switch (T) {
std.zig.BuildId => .build_id,
else => return switch (@typeInfo(T)) {
.Int => .int,
.Float => .float,
.Bool => .bool,
.Enum => .@"enum",
.Pointer => |pointer| switch (pointer.child) {
.int => .int,
.float => .float,
.bool => .bool,
.@"enum" => .@"enum",
.pointer => |pointer| switch (pointer.child) {
u8 => .string,
[]const u8 => .list,
else => switch (@typeInfo(pointer.child)) {
.Enum => .enum_list,
.@"enum" => .enum_list,
else => @compileError("Unsupported type: " ++ @typeName(T)),
},
},
@ -1726,7 +1726,7 @@ pub fn fmt(b: *Build, comptime format: []const u8, args: anytype) []u8 {
}
fn supportedWindowsProgramExtension(ext: []const u8) bool {
inline for (@typeInfo(std.process.Child.WindowsExtension).Enum.fields) |field| {
inline for (@typeInfo(std.process.Child.WindowsExtension).@"enum".fields) |field| {
if (std.ascii.eqlIgnoreCase(ext, "." ++ field.name)) return true;
}
return false;
@ -1925,7 +1925,7 @@ inline fn findImportPkgHashOrFatal(b: *Build, comptime asking_build_zig: type, c
const build_runner = @import("root");
const deps = build_runner.dependencies;
const b_pkg_hash, const b_pkg_deps = comptime for (@typeInfo(deps.packages).Struct.decls) |decl| {
const b_pkg_hash, const b_pkg_deps = comptime for (@typeInfo(deps.packages).@"struct".decls) |decl| {
const pkg_hash = decl.name;
const pkg = @field(deps.packages, pkg_hash);
if (@hasDecl(pkg, "build_zig") and pkg.build_zig == asking_build_zig) break .{ pkg_hash, pkg.deps };
@ -1963,7 +1963,7 @@ pub fn lazyDependency(b: *Build, name: []const u8, args: anytype) ?*Dependency {
const deps = build_runner.dependencies;
const pkg_hash = findPkgHashOrFatal(b, name);
inline for (@typeInfo(deps.packages).Struct.decls) |decl| {
inline for (@typeInfo(deps.packages).@"struct".decls) |decl| {
if (mem.eql(u8, decl.name, pkg_hash)) {
const pkg = @field(deps.packages, decl.name);
const available = !@hasDecl(pkg, "available") or pkg.available;
@ -1983,7 +1983,7 @@ pub fn dependency(b: *Build, name: []const u8, args: anytype) *Dependency {
const deps = build_runner.dependencies;
const pkg_hash = findPkgHashOrFatal(b, name);
inline for (@typeInfo(deps.packages).Struct.decls) |decl| {
inline for (@typeInfo(deps.packages).@"struct".decls) |decl| {
if (mem.eql(u8, decl.name, pkg_hash)) {
const pkg = @field(deps.packages, decl.name);
if (@hasDecl(pkg, "available")) {
@ -2013,7 +2013,7 @@ pub inline fn lazyImport(
const deps = build_runner.dependencies;
const pkg_hash = findImportPkgHashOrFatal(b, asking_build_zig, dep_name);
inline for (@typeInfo(deps.packages).Struct.decls) |decl| {
inline for (@typeInfo(deps.packages).@"struct".decls) |decl| {
if (comptime mem.eql(u8, decl.name, pkg_hash)) {
const pkg = @field(deps.packages, decl.name);
const available = !@hasDecl(pkg, "available") or pkg.available;
@ -2042,7 +2042,7 @@ pub fn dependencyFromBuildZig(
const deps = build_runner.dependencies;
find_dep: {
const pkg, const pkg_hash = inline for (@typeInfo(deps.packages).Struct.decls) |decl| {
const pkg, const pkg_hash = inline for (@typeInfo(deps.packages).@"struct".decls) |decl| {
const pkg_hash = decl.name;
const pkg = @field(deps.packages, pkg_hash);
if (@hasDecl(pkg, "build_zig") and pkg.build_zig == build_zig) break .{ pkg, pkg_hash };
@ -2150,9 +2150,9 @@ fn dependencyInner(
}
pub fn runBuild(b: *Build, build_zig: anytype) anyerror!void {
switch (@typeInfo(@typeInfo(@TypeOf(build_zig.build)).Fn.return_type.?)) {
.Void => build_zig.build(b),
.ErrorUnion => try build_zig.build(b),
switch (@typeInfo(@typeInfo(@TypeOf(build_zig.build)).@"fn".return_type.?)) {
.void => build_zig.build(b),
.error_union => try build_zig.build(b),
else => @compileError("expected return type of build to be 'void' or '!void'"),
}
}

View File

@ -222,7 +222,7 @@ pub const HashHelper = struct {
.hexstring => |hex_string| hh.addBytes(hex_string.toSlice()),
},
else => switch (@typeInfo(@TypeOf(x))) {
.Bool, .Int, .Enum, .Array => hh.addBytes(mem.asBytes(&x)),
.bool, .int, .@"enum", .array => hh.addBytes(mem.asBytes(&x)),
else => @compileError("unable to hash type " ++ @typeName(@TypeOf(x))),
},
}
@ -1014,7 +1014,7 @@ pub const Manifest = struct {
}
pub fn populateFileSystemInputs(man: *Manifest, buf: *std.ArrayListUnmanaged(u8)) Allocator.Error!void {
assert(@typeInfo(std.zig.Server.Message.PathPrefix).Enum.fields.len == man.cache.prefixes_len);
assert(@typeInfo(std.zig.Server.Message.PathPrefix).@"enum".fields.len == man.cache.prefixes_len);
buf.clearRetainingCapacity();
const gpa = man.cache.gpa;
const files = man.files.keys();
@ -1032,7 +1032,7 @@ pub const Manifest = struct {
pub fn populateOtherManifest(man: *Manifest, other: *Manifest, prefix_map: [4]u8) Allocator.Error!void {
const gpa = other.cache.gpa;
assert(@typeInfo(std.zig.Server.Message.PathPrefix).Enum.fields.len == man.cache.prefixes_len);
assert(@typeInfo(std.zig.Server.Message.PathPrefix).@"enum".fields.len == man.cache.prefixes_len);
assert(man.cache.prefixes_len == 4);
for (man.files.keys()) |file| {
const prefixed_path: PrefixedPath = .{

View File

@ -189,8 +189,8 @@ pub const TableAdapter = struct {
pub fn hash(self: TableAdapter, a: Cache.Path) u32 {
_ = self;
const seed = switch (@typeInfo(@TypeOf(a.root_dir.handle.fd))) {
.Pointer => @intFromPtr(a.root_dir.handle.fd),
.Int => @as(u32, @bitCast(a.root_dir.handle.fd)),
.pointer => @intFromPtr(a.root_dir.handle.fd),
.int => @as(u32, @bitCast(a.root_dir.handle.fd)),
else => @compileError("unimplemented hash function"),
};
return @truncate(Hash.hash(seed, a.sub_path));

View File

@ -109,47 +109,47 @@ pub fn getOutput(config_header: *ConfigHeader) std.Build.LazyPath {
}
fn addValuesInner(config_header: *ConfigHeader, values: anytype) !void {
inline for (@typeInfo(@TypeOf(values)).Struct.fields) |field| {
inline for (@typeInfo(@TypeOf(values)).@"struct".fields) |field| {
try putValue(config_header, field.name, field.type, @field(values, field.name));
}
}
fn putValue(config_header: *ConfigHeader, field_name: []const u8, comptime T: type, v: T) !void {
switch (@typeInfo(T)) {
.Null => {
.null => {
try config_header.values.put(field_name, .undef);
},
.Void => {
.void => {
try config_header.values.put(field_name, .defined);
},
.Bool => {
.bool => {
try config_header.values.put(field_name, .{ .boolean = v });
},
.Int => {
.int => {
try config_header.values.put(field_name, .{ .int = v });
},
.ComptimeInt => {
.comptime_int => {
try config_header.values.put(field_name, .{ .int = v });
},
.EnumLiteral => {
.enum_literal => {
try config_header.values.put(field_name, .{ .ident = @tagName(v) });
},
.Optional => {
.optional => {
if (v) |x| {
return putValue(config_header, field_name, @TypeOf(x), x);
} else {
try config_header.values.put(field_name, .undef);
}
},
.Pointer => |ptr| {
.pointer => |ptr| {
switch (@typeInfo(ptr.child)) {
.Array => |array| {
.array => |array| {
if (ptr.size == .One and array.child == u8) {
try config_header.values.put(field_name, .{ .string = v });
return;
}
},
.Int => {
.int => {
if (ptr.size == .Slice and ptr.child == u8) {
try config_header.values.put(field_name, .{ .string = v });
return;

View File

@ -151,7 +151,7 @@ fn printType(options: *Options, out: anytype, comptime T: type, value: T, indent
}
switch (@typeInfo(T)) {
.Array => {
.array => {
if (name) |some| {
try out.print("pub const {}: {s} = ", .{ std.zig.fmtId(some), @typeName(T) });
}
@ -171,7 +171,7 @@ fn printType(options: *Options, out: anytype, comptime T: type, value: T, indent
}
return;
},
.Pointer => |p| {
.pointer => |p| {
if (p.size != .Slice) {
@compileError("Non-slice pointers are not yet supported in build options");
}
@ -195,7 +195,7 @@ fn printType(options: *Options, out: anytype, comptime T: type, value: T, indent
}
return;
},
.Optional => {
.optional => {
if (name) |some| {
try out.print("pub const {}: {s} = ", .{ std.zig.fmtId(some), @typeName(T) });
}
@ -216,12 +216,12 @@ fn printType(options: *Options, out: anytype, comptime T: type, value: T, indent
}
return;
},
.Void,
.Bool,
.Int,
.ComptimeInt,
.Float,
.Null,
.void,
.bool,
.int,
.comptime_int,
.float,
.null,
=> {
if (name) |some| {
try out.print("pub const {}: {s} = {any};\n", .{ std.zig.fmtId(some), @typeName(T), value });
@ -230,7 +230,7 @@ fn printType(options: *Options, out: anytype, comptime T: type, value: T, indent
}
return;
},
.Enum => |info| {
.@"enum" => |info| {
try printEnum(options, out, T, info, indent);
if (name) |some| {
@ -242,7 +242,7 @@ fn printType(options: *Options, out: anytype, comptime T: type, value: T, indent
}
return;
},
.Struct => |info| {
.@"struct" => |info| {
try printStruct(options, out, T, info, indent);
if (name) |some| {
@ -260,10 +260,10 @@ fn printType(options: *Options, out: anytype, comptime T: type, value: T, indent
fn printUserDefinedType(options: *Options, out: anytype, comptime T: type, indent: u8) !void {
switch (@typeInfo(T)) {
.Enum => |info| {
.@"enum" => |info| {
return try printEnum(options, out, T, info, indent);
},
.Struct => |info| {
.@"struct" => |info| {
return try printStruct(options, out, T, info, indent);
},
else => {},
@ -323,8 +323,8 @@ fn printStruct(options: *Options, out: anytype, comptime T: type, comptime val:
try out.writeAll(" = ");
switch (@typeInfo(@TypeOf(default_value))) {
.Enum => try out.print(".{s},\n", .{@tagName(default_value)}),
.Struct => |info| {
.@"enum" => try out.print(".{s},\n", .{@tagName(default_value)}),
.@"struct" => |info| {
try printStructValue(options, out, info, default_value, indent + 4);
},
else => try printType(options, out, @TypeOf(default_value), default_value, indent, null),
@ -359,8 +359,8 @@ fn printStructValue(options: *Options, out: anytype, comptime struct_val: std.bu
const field_name = @field(val, field.name);
switch (@typeInfo(@TypeOf(field_name))) {
.Enum => try out.print(".{s},\n", .{@tagName(field_name)}),
.Struct => |struct_info| {
.@"enum" => try out.print(".{s},\n", .{@tagName(field_name)}),
.@"struct" => |struct_info| {
try printStructValue(options, out, struct_info, field_name, indent + 4);
},
else => try printType(options, out, @TypeOf(field_name), field_name, indent, null),

View File

@ -614,7 +614,7 @@ fn checksContainStderr(checks: []const StdIo.Check) bool {
const IndexedOutput = struct {
index: usize,
tag: @typeInfo(Arg).Union.tag_type.?,
tag: @typeInfo(Arg).@"union".tag_type.?,
output: *Output,
};
fn make(step: *Step, options: Step.MakeOptions) !void {

View File

@ -95,8 +95,8 @@ pub const Node = struct {
/// Not thread-safe.
fn getIpcFd(s: Storage) ?posix.fd_t {
return if (s.estimated_total_count == std.math.maxInt(u32)) switch (@typeInfo(posix.fd_t)) {
.Int => @bitCast(s.completed_count),
.Pointer => @ptrFromInt(s.completed_count),
.int => @bitCast(s.completed_count),
.pointer => @ptrFromInt(s.completed_count),
else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)),
} else null;
}
@ -104,8 +104,8 @@ pub const Node = struct {
/// Thread-safe.
fn setIpcFd(s: *Storage, fd: posix.fd_t) void {
const integer: u32 = switch (@typeInfo(posix.fd_t)) {
.Int => @bitCast(fd),
.Pointer => @intFromPtr(fd),
.int => @bitCast(fd),
.pointer => @intFromPtr(fd),
else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)),
};
// `estimated_total_count` max int indicates the special state that
@ -276,8 +276,8 @@ pub const Node = struct {
const storage = storageByIndex(index);
const int = @atomicLoad(u32, &storage.completed_count, .monotonic);
return switch (@typeInfo(posix.fd_t)) {
.Int => @bitCast(int),
.Pointer => @ptrFromInt(int),
.int => @bitCast(int),
.pointer => @ptrFromInt(int),
else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)),
};
}
@ -381,8 +381,8 @@ pub fn start(options: Options) Node {
if (std.process.parseEnvVarInt("ZIG_PROGRESS", u31, 10)) |ipc_fd| {
global_progress.update_thread = std.Thread.spawn(.{}, ipcThreadRun, .{
@as(posix.fd_t, switch (@typeInfo(posix.fd_t)) {
.Int => ipc_fd,
.Pointer => @ptrFromInt(ipc_fd),
.int => ipc_fd,
.pointer => @ptrFromInt(ipc_fd),
else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)),
}),
}) catch |err| {
@ -633,7 +633,7 @@ const TreeSymbol = enum {
fn maxByteLen(symbol: TreeSymbol) usize {
var max: usize = 0;
inline for (@typeInfo(Encoding).Enum.fields) |field| {
inline for (@typeInfo(Encoding).@"enum".fields) |field| {
const len = symbol.bytes(@field(Encoding, field.name)).len;
max = @max(max, len);
}

View File

@ -34,9 +34,9 @@ fillFn: *const fn (ptr: *anyopaque, buf: []u8) void,
pub fn init(pointer: anytype, comptime fillFn: fn (ptr: @TypeOf(pointer), buf: []u8) void) Random {
const Ptr = @TypeOf(pointer);
assert(@typeInfo(Ptr) == .Pointer); // Must be a pointer
assert(@typeInfo(Ptr).Pointer.size == .One); // Must be a single-item pointer
assert(@typeInfo(@typeInfo(Ptr).Pointer.child) == .Struct); // Must point to a struct
assert(@typeInfo(Ptr) == .pointer); // Must be a pointer
assert(@typeInfo(Ptr).pointer.size == .One); // Must be a single-item pointer
assert(@typeInfo(@typeInfo(Ptr).pointer.child) == .@"struct"); // Must point to a struct
const gen = struct {
fn fill(ptr: *anyopaque, buf: []u8) void {
const self: Ptr = @ptrCast(@alignCast(ptr));
@ -79,7 +79,7 @@ pub inline fn enumValue(r: Random, comptime EnumType: type) EnumType {
/// See `uintLessThan`, which this function uses in most cases,
/// for commentary on the runtime of this function.
pub fn enumValueWithIndex(r: Random, comptime EnumType: type, comptime Index: type) EnumType {
comptime assert(@typeInfo(EnumType) == .Enum);
comptime assert(@typeInfo(EnumType) == .@"enum");
// We won't use int -> enum casting because enum elements can have
// arbitrary values. Instead we'll randomly pick one of the type's values.
@ -100,7 +100,7 @@ pub fn enumValueWithIndex(r: Random, comptime EnumType: type, comptime Index: ty
/// Returns a random int `i` such that `minInt(T) <= i <= maxInt(T)`.
/// `i` is evenly distributed.
pub fn int(r: Random, comptime T: type) T {
const bits = @typeInfo(T).Int.bits;
const bits = @typeInfo(T).int.bits;
const UnsignedT = std.meta.Int(.unsigned, bits);
const ceil_bytes = comptime std.math.divCeil(u16, bits, 8) catch unreachable;
const ByteAlignedT = std.meta.Int(.unsigned, ceil_bytes * 8);
@ -119,7 +119,7 @@ pub fn int(r: Random, comptime T: type) T {
/// Constant-time implementation off `uintLessThan`.
/// The results of this function may be biased.
pub fn uintLessThanBiased(r: Random, comptime T: type, less_than: T) T {
comptime assert(@typeInfo(T).Int.signedness == .unsigned);
comptime assert(@typeInfo(T).int.signedness == .unsigned);
assert(0 < less_than);
return limitRangeBiased(T, r.int(T), less_than);
}
@ -133,8 +133,8 @@ pub fn uintLessThanBiased(r: Random, comptime T: type, less_than: T) T {
/// this function is guaranteed to return.
/// If you need deterministic runtime bounds, use `uintLessThanBiased`.
pub fn uintLessThan(r: Random, comptime T: type, less_than: T) T {
comptime assert(@typeInfo(T).Int.signedness == .unsigned);
const bits = @typeInfo(T).Int.bits;
comptime assert(@typeInfo(T).int.signedness == .unsigned);
const bits = @typeInfo(T).int.bits;
assert(0 < less_than);
// adapted from:
@ -164,7 +164,7 @@ pub fn uintLessThan(r: Random, comptime T: type, less_than: T) T {
/// Constant-time implementation off `uintAtMost`.
/// The results of this function may be biased.
pub fn uintAtMostBiased(r: Random, comptime T: type, at_most: T) T {
assert(@typeInfo(T).Int.signedness == .unsigned);
assert(@typeInfo(T).int.signedness == .unsigned);
if (at_most == maxInt(T)) {
// have the full range
return r.int(T);
@ -176,7 +176,7 @@ pub fn uintAtMostBiased(r: Random, comptime T: type, at_most: T) T {
/// See `uintLessThan`, which this function uses in most cases,
/// for commentary on the runtime of this function.
pub fn uintAtMost(r: Random, comptime T: type, at_most: T) T {
assert(@typeInfo(T).Int.signedness == .unsigned);
assert(@typeInfo(T).int.signedness == .unsigned);
if (at_most == maxInt(T)) {
// have the full range
return r.int(T);
@ -188,7 +188,7 @@ pub fn uintAtMost(r: Random, comptime T: type, at_most: T) T {
/// The results of this function may be biased.
pub fn intRangeLessThanBiased(r: Random, comptime T: type, at_least: T, less_than: T) T {
assert(at_least < less_than);
const info = @typeInfo(T).Int;
const info = @typeInfo(T).int;
if (info.signedness == .signed) {
// Two's complement makes this math pretty easy.
const UnsignedT = std.meta.Int(.unsigned, info.bits);
@ -207,7 +207,7 @@ pub fn intRangeLessThanBiased(r: Random, comptime T: type, at_least: T, less_tha
/// for commentary on the runtime of this function.
pub fn intRangeLessThan(r: Random, comptime T: type, at_least: T, less_than: T) T {
assert(at_least < less_than);
const info = @typeInfo(T).Int;
const info = @typeInfo(T).int;
if (info.signedness == .signed) {
// Two's complement makes this math pretty easy.
const UnsignedT = std.meta.Int(.unsigned, info.bits);
@ -225,7 +225,7 @@ pub fn intRangeLessThan(r: Random, comptime T: type, at_least: T, less_than: T)
/// The results of this function may be biased.
pub fn intRangeAtMostBiased(r: Random, comptime T: type, at_least: T, at_most: T) T {
assert(at_least <= at_most);
const info = @typeInfo(T).Int;
const info = @typeInfo(T).int;
if (info.signedness == .signed) {
// Two's complement makes this math pretty easy.
const UnsignedT = std.meta.Int(.unsigned, info.bits);
@ -244,7 +244,7 @@ pub fn intRangeAtMostBiased(r: Random, comptime T: type, at_least: T, at_most: T
/// for commentary on the runtime of this function.
pub fn intRangeAtMost(r: Random, comptime T: type, at_least: T, at_most: T) T {
assert(at_least <= at_most);
const info = @typeInfo(T).Int;
const info = @typeInfo(T).int;
if (info.signedness == .signed) {
// Two's complement makes this math pretty easy.
const UnsignedT = std.meta.Int(.unsigned, info.bits);
@ -392,12 +392,12 @@ pub fn weightedIndex(r: Random, comptime T: type, proportions: []const T) usize
};
const point = switch (@typeInfo(T)) {
.Int => |int_info| switch (int_info.signedness) {
.int => |int_info| switch (int_info.signedness) {
.signed => r.intRangeLessThan(T, 0, sum),
.unsigned => r.uintLessThan(T, sum),
},
// take care that imprecision doesn't lead to a value slightly greater than sum
.Float => @min(r.float(T) * sum, sum - std.math.floatEps(T)),
.float => @min(r.float(T) * sum, sum - std.math.floatEps(T)),
else => @compileError("weightedIndex does not support proportions of type " ++
@typeName(T)),
};
@ -415,8 +415,8 @@ pub fn weightedIndex(r: Random, comptime T: type, proportions: []const T) usize
/// into an integer 0 <= result < less_than.
/// This function introduces a minor bias.
pub fn limitRangeBiased(comptime T: type, random_int: T, less_than: T) T {
comptime assert(@typeInfo(T).Int.signedness == .unsigned);
const bits = @typeInfo(T).Int.bits;
comptime assert(@typeInfo(T).int.signedness == .unsigned);
const bits = @typeInfo(T).int.bits;
// adapted from:
// http://www.pcg-random.org/posts/bounded-rands.html
@ -427,9 +427,9 @@ pub fn limitRangeBiased(comptime T: type, random_int: T, less_than: T) T {
/// Returns the smallest of `Index` and `usize`.
fn MinArrayIndex(comptime Index: type) type {
const index_info = @typeInfo(Index).Int;
const index_info = @typeInfo(Index).int;
assert(index_info.signedness == .unsigned);
return if (index_info.bits >= @typeInfo(usize).Int.bits) usize else Index;
return if (index_info.bits >= @typeInfo(usize).int.bits) usize else Index;
}
test {

View File

@ -154,7 +154,7 @@ pub const Os = struct {
};
}
pub inline fn getVersionRangeTag(tag: Tag) @typeInfo(TaggedVersionRange).Union.tag_type.? {
pub inline fn getVersionRangeTag(tag: Tag) @typeInfo(TaggedVersionRange).@"union".tag_type.? {
return switch (tag) {
.freestanding,
.fuchsia,
@ -1458,7 +1458,7 @@ pub const Cpu = struct {
fn allCpusFromDecls(comptime cpus: type) []const *const Cpu.Model {
@setEvalBranchQuota(2000);
const decls = @typeInfo(cpus).Struct.decls;
const decls = @typeInfo(cpus).@"struct".decls;
var array: [decls.len]*const Cpu.Model = undefined;
for (decls, 0..) |decl, i| {
array[i] = &@field(cpus, decl.name);

View File

@ -53,7 +53,7 @@ pub const CpuModel = union(enum) {
explicit: *const Target.Cpu.Model,
pub fn eql(a: CpuModel, b: CpuModel) bool {
const Tag = @typeInfo(CpuModel).Union.tag_type.?;
const Tag = @typeInfo(CpuModel).@"union".tag_type.?;
const a_tag: Tag = a;
const b_tag: Tag = b;
if (a_tag != b_tag) return false;
@ -70,7 +70,7 @@ pub const OsVersion = union(enum) {
windows: Target.Os.WindowsVersion,
pub fn eql(a: OsVersion, b: OsVersion) bool {
const Tag = @typeInfo(OsVersion).Union.tag_type.?;
const Tag = @typeInfo(OsVersion).@"union".tag_type.?;
const a_tag: Tag = a;
const b_tag: Tag = b;
if (a_tag != b_tag) return false;

View File

@ -242,7 +242,7 @@ pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
@setEvalBranchQuota(2000);
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.a510)] = .{
@ -1660,7 +1660,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -183,7 +183,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.@"16_bit_insts")] = .{
@ -1284,7 +1284,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -14,7 +14,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.norm)] = .{
@ -25,7 +25,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -215,7 +215,7 @@ pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
@setEvalBranchQuota(10000);
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.@"32bit")] = .{
@ -1735,7 +1735,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -49,7 +49,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.addsubiw)] = .{
@ -340,7 +340,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -16,7 +16,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.alu32)] = .{
@ -37,7 +37,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -76,7 +76,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.@"10e60")] = .{
@ -418,7 +418,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -55,7 +55,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.audio)] = .{
@ -298,7 +298,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -28,7 +28,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.@"32bit")] = .{
@ -115,7 +115,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -36,7 +36,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.isa_68000)] = .{
@ -170,7 +170,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -65,7 +65,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.abs2008)] = .{
@ -389,7 +389,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -17,7 +17,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.ext)] = .{
@ -43,7 +43,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -58,7 +58,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.ptx32)] = .{
@ -289,7 +289,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -95,7 +95,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.@"64bit")] = .{
@ -606,7 +606,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -201,7 +201,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.@"32bit")] = .{
@ -1298,7 +1298,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -55,7 +55,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.backchain)] = .{
@ -271,7 +271,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -60,7 +60,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.deprecated_v8)] = .{
@ -303,7 +303,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -301,7 +301,7 @@ pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
@setEvalBranchQuota(2000);
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.v1_1)] = .{
@ -2077,7 +2077,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -14,7 +14,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.vpu)] = .{
@ -25,7 +25,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -26,7 +26,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.atomics)] = .{
@ -97,7 +97,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -199,7 +199,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.@"16bit_mode")] = .{
@ -1272,7 +1272,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -14,7 +14,7 @@ pub const featureSetHasAny = CpuFeature.FeatureSetFns(Feature).featureSetHasAny;
pub const featureSetHasAll = CpuFeature.FeatureSetFns(Feature).featureSetHasAll;
pub const all_features = blk: {
const len = @typeInfo(Feature).Enum.fields.len;
const len = @typeInfo(Feature).@"enum".fields.len;
std.debug.assert(len <= CpuFeature.Set.needed_bit_count);
var result: [len]CpuFeature = undefined;
result[@intFromEnum(Feature.density)] = .{
@ -25,7 +25,7 @@ pub const all_features = blk: {
const ti = @typeInfo(Feature);
for (&result, 0..) |*elem, i| {
elem.index = i;
elem.name = ti.Enum.fields[i].name;
elem.name = ti.@"enum".fields[i].name;
}
break :blk result;
};

View File

@ -401,15 +401,15 @@ fn callFn(comptime f: anytype, args: anytype) switch (Impl) {
const default_value = if (Impl == PosixThreadImpl) null else 0;
const bad_fn_ret = "expected return type of startFn to be 'u8', 'noreturn', '!noreturn', 'void', or '!void'";
switch (@typeInfo(@typeInfo(@TypeOf(f)).Fn.return_type.?)) {
.NoReturn => {
switch (@typeInfo(@typeInfo(@TypeOf(f)).@"fn".return_type.?)) {
.noreturn => {
@call(.auto, f, args);
},
.Void => {
.void => {
@call(.auto, f, args);
return default_value;
},
.Int => |info| {
.int => |info| {
if (info.bits != 8) {
@compileError(bad_fn_ret);
}
@ -422,7 +422,7 @@ fn callFn(comptime f: anytype, args: anytype) switch (Impl) {
// pthreads don't support exit status, ignore value
return default_value;
},
.ErrorUnion => |info| {
.error_union => |info| {
switch (info.payload) {
void, noreturn => {
@call(.auto, f, args) catch |err| {
@ -850,17 +850,17 @@ const WasiThreadImpl = struct {
fn entry(ptr: usize) void {
const w: *@This() = @ptrFromInt(ptr);
const bad_fn_ret = "expected return type of startFn to be 'u8', 'noreturn', 'void', or '!void'";
switch (@typeInfo(@typeInfo(@TypeOf(f)).Fn.return_type.?)) {
.NoReturn, .Void => {
switch (@typeInfo(@typeInfo(@TypeOf(f)).@"fn".return_type.?)) {
.noreturn, .void => {
@call(.auto, f, w.args);
},
.Int => |info| {
.int => |info| {
if (info.bits != 8) {
@compileError(bad_fn_ret);
}
_ = @call(.auto, f, w.args); // WASI threads don't support exit status, ignore value
},
.ErrorUnion => |info| {
.error_union => |info| {
if (info.payload != void) {
@compileError(bad_fn_ret);
}

View File

@ -2584,17 +2584,17 @@ pub fn getAutoEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K
pub fn autoEqlIsCheap(comptime K: type) bool {
return switch (@typeInfo(K)) {
.Bool,
.Int,
.Float,
.Pointer,
.ComptimeFloat,
.ComptimeInt,
.Enum,
.Fn,
.ErrorSet,
.AnyFrame,
.EnumLiteral,
.bool,
.int,
.float,
.pointer,
.comptime_float,
.comptime_int,
.@"enum",
.@"fn",
.error_set,
.@"anyframe",
.enum_literal,
=> true,
else => false,
};

View File

@ -319,10 +319,10 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
const mask_info: std.builtin.Type = @typeInfo(MaskIntType);
// Make sure the mask int is indeed an int
if (mask_info != .Int) @compileError("ArrayBitSet can only operate on integer masks, but was passed " ++ @typeName(MaskIntType));
if (mask_info != .int) @compileError("ArrayBitSet can only operate on integer masks, but was passed " ++ @typeName(MaskIntType));
// It must also be unsigned.
if (mask_info.Int.signedness != .unsigned) @compileError("ArrayBitSet requires an unsigned integer mask type, but was passed " ++ @typeName(MaskIntType));
if (mask_info.int.signedness != .unsigned) @compileError("ArrayBitSet requires an unsigned integer mask type, but was passed " ++ @typeName(MaskIntType));
// And it must not be empty.
if (MaskIntType == u0)

View File

@ -257,30 +257,30 @@ pub const TypeId = std.meta.Tag(Type);
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const Type = union(enum) {
Type: void,
Void: void,
Bool: void,
NoReturn: void,
Int: Int,
Float: Float,
Pointer: Pointer,
Array: Array,
Struct: Struct,
ComptimeFloat: void,
ComptimeInt: void,
Undefined: void,
Null: void,
Optional: Optional,
ErrorUnion: ErrorUnion,
ErrorSet: ErrorSet,
Enum: Enum,
Union: Union,
Fn: Fn,
Opaque: Opaque,
Frame: Frame,
AnyFrame: AnyFrame,
Vector: Vector,
EnumLiteral: void,
type: void,
void: void,
bool: void,
noreturn: void,
int: Int,
float: Float,
pointer: Pointer,
array: Array,
@"struct": Struct,
comptime_float: void,
comptime_int: void,
undefined: void,
null: void,
optional: Optional,
error_union: ErrorUnion,
error_set: ErrorSet,
@"enum": Enum,
@"union": Union,
@"fn": Fn,
@"opaque": Opaque,
frame: Frame,
@"anyframe": AnyFrame,
vector: Vector,
enum_literal: void,
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.

View File

@ -54,7 +54,7 @@ pub const EXC = enum(exception_type_t) {
/// Abnormal process exited to corpse state
CORPSE_NOTIFY = 13,
pub const TYPES_COUNT = @typeInfo(EXC).Enum.fields.len;
pub const TYPES_COUNT = @typeInfo(EXC).@"enum".fields.len;
pub const SOFT_SIGNAL = 0x10003;
pub const MASK = packed struct(u32) {

View File

@ -455,7 +455,7 @@ test "generate a Huffman code for the 30 possible relative distances (LZ77 dista
// Reverse bit-by-bit a N-bit code.
fn bitReverse(comptime T: type, value: T, n: usize) T {
const r = @bitReverse(value);
return r >> @as(math.Log2Int(T), @intCast(@typeInfo(T).Int.bits - n));
return r >> @as(math.Log2Int(T), @intCast(@typeInfo(T).int.bits - n));
}
test bitReverse {

View File

@ -8,7 +8,7 @@ pub const CmacAes128 = Cmac(crypto.core.aes.Aes128);
/// NIST Special Publication 800-38B - The CMAC Mode for Authentication
/// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-38b.pdf
pub fn Cmac(comptime BlockCipher: type) type {
const BlockCipherCtx = @typeInfo(@TypeOf(BlockCipher.initEnc)).Fn.return_type.?;
const BlockCipherCtx = @typeInfo(@TypeOf(BlockCipher.initEnc)).@"fn".return_type.?;
const Block = [BlockCipher.block.block_length]u8;
return struct {

View File

@ -848,7 +848,7 @@ const ct_protected = struct {
// Multiplies two limbs and returns the result as a wide limb.
fn mulWide(x: Limb, y: Limb) WideLimb {
const half_bits = @typeInfo(Limb).Int.bits / 2;
const half_bits = @typeInfo(Limb).int.bits / 2;
const Half = meta.Int(.unsigned, half_bits);
const x0 = @as(Half, @truncate(x));
const x1 = @as(Half, @truncate(x >> half_bits));
@ -901,7 +901,7 @@ const ct_unprotected = struct {
fn mulWide(x: Limb, y: Limb) WideLimb {
const wide = math.mulWide(Limb, x, y);
return .{
.hi = @as(Limb, @truncate(wide >> @typeInfo(Limb).Int.bits)),
.hi = @as(Limb, @truncate(wide >> @typeInfo(Limb).int.bits)),
.lo = @as(Limb, @truncate(wide)),
};
}

View File

@ -91,7 +91,7 @@ pub fn deserialize(comptime HashResult: type, str: []const u8) Error!HashResult
if (mem.eql(u8, opt_version.key, version_param_name)) {
if (@hasField(HashResult, "alg_version")) {
const value_type_info = switch (@typeInfo(@TypeOf(out.alg_version))) {
.Optional => |opt| comptime @typeInfo(opt.child),
.optional => |opt| @typeInfo(opt.child),
else => |t| t,
};
out.alg_version = fmt.parseUnsigned(
@ -114,16 +114,16 @@ pub fn deserialize(comptime HashResult: type, str: []const u8) Error!HashResult
inline for (comptime meta.fields(HashResult)) |p| {
if (mem.eql(u8, p.name, param.key)) {
switch (@typeInfo(p.type)) {
.Int => @field(out, p.name) = fmt.parseUnsigned(
.int => @field(out, p.name) = fmt.parseUnsigned(
p.type,
param.value,
10,
) catch return Error.InvalidEncoding,
.Pointer => |ptr| {
.pointer => |ptr| {
if (!ptr.is_const) @compileError("Value slice must be constant");
@field(out, p.name) = param.value;
},
.Struct => try @field(out, p.name).fromB64(param.value),
.@"struct" => try @field(out, p.name).fromB64(param.value),
else => std.debug.panic(
"Value for [{s}] must be an integer, a constant slice or a BinValue",
.{p.name},
@ -164,7 +164,7 @@ pub fn deserialize(comptime HashResult: type, str: []const u8) Error!HashResult
// with default values
var expected_fields: usize = 0;
inline for (comptime meta.fields(HashResult)) |p| {
if (@typeInfo(p.type) != .Optional and p.default_value == null) {
if (@typeInfo(p.type) != .optional and p.default_value == null) {
expected_fields += 1;
}
}
@ -202,7 +202,7 @@ fn serializeTo(params: anytype, out: anytype) !void {
try out.writeAll(params.alg_id);
if (@hasField(HashResult, "alg_version")) {
if (@typeInfo(@TypeOf(params.alg_version)) == .Optional) {
if (@typeInfo(@TypeOf(params.alg_version)) == .optional) {
if (params.alg_version) |alg_version| {
try out.print(
"{s}{s}{s}{}",
@ -226,12 +226,12 @@ fn serializeTo(params: anytype, out: anytype) !void {
{
const value = @field(params, p.name);
try out.writeAll(if (has_params) params_delimiter else fields_delimiter);
if (@typeInfo(p.type) == .Struct) {
if (@typeInfo(p.type) == .@"struct") {
var buf: [@TypeOf(value).max_encoded_length]u8 = undefined;
try out.print("{s}{s}{s}", .{ p.name, kv_delimiter, try value.toB64(&buf) });
} else {
try out.print(
if (@typeInfo(@TypeOf(value)) == .Pointer) "{s}{s}{s}" else "{s}{s}{}",
if (@typeInfo(@TypeOf(value)) == .pointer) "{s}{s}{s}" else "{s}{s}{}",
.{ p.name, kv_delimiter, value },
);
}

Some files were not shown because too many files have changed in this diff Show More