mirror of
https://github.com/ziglang/zig.git
synced 2024-11-15 00:26:57 +00:00
Merge pull request #6246 from Vexu/field
Remove deprecated fields on `type`
This commit is contained in:
commit
41bbadbb9a
@ -2156,7 +2156,7 @@ test "pointer casting" {
|
||||
|
||||
test "pointer child type" {
|
||||
// pointer types have a `child` field which tells you the type they point to.
|
||||
assert((*u32).Child == u32);
|
||||
assert(@typeInfo(*u32).Pointer.child == u32);
|
||||
}
|
||||
{#code_end#}
|
||||
{#header_open|Alignment#}
|
||||
@ -2184,7 +2184,7 @@ test "variable alignment" {
|
||||
assert(@TypeOf(&x) == *i32);
|
||||
assert(*i32 == *align(align_of_i32) i32);
|
||||
if (std.Target.current.cpu.arch == .x86_64) {
|
||||
assert((*i32).alignment == 4);
|
||||
assert(@typeInfo(*i32).Pointer.alignment == 4);
|
||||
}
|
||||
}
|
||||
{#code_end#}
|
||||
@ -2202,7 +2202,7 @@ const assert = @import("std").debug.assert;
|
||||
var foo: u8 align(4) = 100;
|
||||
|
||||
test "global variable alignment" {
|
||||
assert(@TypeOf(&foo).alignment == 4);
|
||||
assert(@typeInfo(@TypeOf(&foo)).Pointer.alignment == 4);
|
||||
assert(@TypeOf(&foo) == *align(4) u8);
|
||||
const as_pointer_to_array: *[1]u8 = &foo;
|
||||
const as_slice: []u8 = as_pointer_to_array;
|
||||
@ -4310,8 +4310,8 @@ test "fn type inference" {
|
||||
const assert = @import("std").debug.assert;
|
||||
|
||||
test "fn reflection" {
|
||||
assert(@TypeOf(assert).ReturnType == void);
|
||||
assert(@TypeOf(assert).is_var_args == false);
|
||||
assert(@typeInfo(@TypeOf(assert)).Fn.return_type.? == void);
|
||||
assert(@typeInfo(@TypeOf(assert)).Fn.is_var_args == false);
|
||||
}
|
||||
{#code_end#}
|
||||
{#header_close#}
|
||||
@ -4611,10 +4611,10 @@ test "error union" {
|
||||
foo = error.SomeError;
|
||||
|
||||
// Use compile-time reflection to access the payload type of an error union:
|
||||
comptime assert(@TypeOf(foo).Payload == i32);
|
||||
comptime assert(@typeInfo(@TypeOf(foo)).ErrorUnion.payload == i32);
|
||||
|
||||
// Use compile-time reflection to access the error set type of an error union:
|
||||
comptime assert(@TypeOf(foo).ErrorSet == anyerror);
|
||||
comptime assert(@typeInfo(@TypeOf(foo)).ErrorUnion.error_set == anyerror);
|
||||
}
|
||||
{#code_end#}
|
||||
{#header_open|Merging Error Sets#}
|
||||
@ -4991,7 +4991,7 @@ test "optional type" {
|
||||
foo = 1234;
|
||||
|
||||
// Use compile-time reflection to access the child type of the optional:
|
||||
comptime assert(@TypeOf(foo).Child == i32);
|
||||
comptime assert(@typeInfo(@TypeOf(foo)).Optional.child == i32);
|
||||
}
|
||||
{#code_end#}
|
||||
{#header_close#}
|
||||
@ -7211,7 +7211,7 @@ fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_v
|
||||
{#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
|
||||
an integer or an enum.
|
||||
</p>
|
||||
<p>{#syntax#}@TypeOf(ptr).alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
|
||||
<p>{#syntax#}@typeInfo(@TypeOf(ptr)).Pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
|
||||
{#see_also|Compile Variables|cmpxchgWeak#}
|
||||
{#header_close#}
|
||||
{#header_open|@cmpxchgWeak#}
|
||||
@ -7240,7 +7240,7 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
|
||||
{#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
|
||||
an integer or an enum.
|
||||
</p>
|
||||
<p>{#syntax#}@TypeOf(ptr).alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
|
||||
<p>{#syntax#}@typeInfo(@TypeOf(ptr)).Pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
|
||||
{#see_also|Compile Variables|cmpxchgStrong#}
|
||||
{#header_close#}
|
||||
|
||||
|
@ -275,9 +275,7 @@ pub const ChildProcess = struct {
|
||||
}
|
||||
|
||||
fn handleWaitResult(self: *ChildProcess, status: u32) void {
|
||||
// TODO https://github.com/ziglang/zig/issues/3190
|
||||
var term = self.cleanupAfterWait(status);
|
||||
self.term = term;
|
||||
self.term = self.cleanupAfterWait(status);
|
||||
}
|
||||
|
||||
fn cleanupStreams(self: *ChildProcess) void {
|
||||
|
@ -9,10 +9,10 @@ const testing = std.testing;
|
||||
/// Read a single unsigned LEB128 value from the given reader as type T,
|
||||
/// or error.Overflow if the value cannot fit.
|
||||
pub fn readULEB128(comptime T: type, reader: anytype) !T {
|
||||
const U = if (T.bit_count < 8) u8 else T;
|
||||
const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
|
||||
const ShiftT = std.math.Log2Int(U);
|
||||
|
||||
const max_group = (U.bit_count + 6) / 7;
|
||||
const max_group = (@typeInfo(U).Int.bits + 6) / 7;
|
||||
|
||||
var value = @as(U, 0);
|
||||
var group = @as(ShiftT, 0);
|
||||
@ -40,7 +40,7 @@ pub fn readULEB128(comptime T: type, reader: anytype) !T {
|
||||
/// Write a single unsigned integer as unsigned LEB128 to the given writer.
|
||||
pub fn writeULEB128(writer: anytype, uint_value: anytype) !void {
|
||||
const T = @TypeOf(uint_value);
|
||||
const U = if (T.bit_count < 8) u8 else T;
|
||||
const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
|
||||
var value = @intCast(U, uint_value);
|
||||
|
||||
while (true) {
|
||||
@ -68,7 +68,7 @@ pub fn readULEB128Mem(comptime T: type, ptr: *[]const u8) !T {
|
||||
/// returning the number of bytes written.
|
||||
pub fn writeULEB128Mem(ptr: []u8, uint_value: anytype) !usize {
|
||||
const T = @TypeOf(uint_value);
|
||||
const max_group = (T.bit_count + 6) / 7;
|
||||
const max_group = (@typeInfo(T).Int.bits + 6) / 7;
|
||||
var buf = std.io.fixedBufferStream(ptr);
|
||||
try writeULEB128(buf.writer(), uint_value);
|
||||
return buf.pos;
|
||||
@ -77,11 +77,11 @@ pub fn writeULEB128Mem(ptr: []u8, uint_value: anytype) !usize {
|
||||
/// Read a single signed LEB128 value from the given reader as type T,
|
||||
/// or error.Overflow if the value cannot fit.
|
||||
pub fn readILEB128(comptime T: type, reader: anytype) !T {
|
||||
const S = if (T.bit_count < 8) i8 else T;
|
||||
const U = std.meta.Int(false, S.bit_count);
|
||||
const S = if (@typeInfo(T).Int.bits < 8) i8 else T;
|
||||
const U = std.meta.Int(false, @typeInfo(S).Int.bits);
|
||||
const ShiftU = std.math.Log2Int(U);
|
||||
|
||||
const max_group = (U.bit_count + 6) / 7;
|
||||
const max_group = (@typeInfo(U).Int.bits + 6) / 7;
|
||||
|
||||
var value = @as(U, 0);
|
||||
var group = @as(ShiftU, 0);
|
||||
@ -97,7 +97,7 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T {
|
||||
if (@bitCast(S, temp) >= 0) return error.Overflow;
|
||||
|
||||
// and all the overflowed bits are 1
|
||||
const remaining_shift = @intCast(u3, U.bit_count - @as(u16, shift));
|
||||
const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift));
|
||||
const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift;
|
||||
if (remaining_bits != -1) return error.Overflow;
|
||||
}
|
||||
@ -127,8 +127,8 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T {
|
||||
/// Write a single signed integer as signed LEB128 to the given writer.
|
||||
pub fn writeILEB128(writer: anytype, int_value: anytype) !void {
|
||||
const T = @TypeOf(int_value);
|
||||
const S = if (T.bit_count < 8) i8 else T;
|
||||
const U = std.meta.Int(false, S.bit_count);
|
||||
const S = if (@typeInfo(T).Int.bits < 8) i8 else T;
|
||||
const U = std.meta.Int(false, @typeInfo(S).Int.bits);
|
||||
|
||||
var value = @intCast(S, int_value);
|
||||
|
||||
@ -173,7 +173,7 @@ pub fn writeILEB128Mem(ptr: []u8, int_value: anytype) !usize {
|
||||
/// different value without shifting all the following code.
|
||||
pub fn writeUnsignedFixed(comptime l: usize, ptr: *[l]u8, int: std.meta.Int(false, l * 7)) void {
|
||||
const T = @TypeOf(int);
|
||||
const U = if (T.bit_count < 8) u8 else T;
|
||||
const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
|
||||
var value = @intCast(U, int);
|
||||
|
||||
comptime var i = 0;
|
||||
@ -346,28 +346,29 @@ test "deserialize unsigned LEB128" {
|
||||
|
||||
fn test_write_leb128(value: anytype) !void {
|
||||
const T = @TypeOf(value);
|
||||
const t_signed = @typeInfo(T).Int.is_signed;
|
||||
|
||||
const writeStream = if (T.is_signed) writeILEB128 else writeULEB128;
|
||||
const writeMem = if (T.is_signed) writeILEB128Mem else writeULEB128Mem;
|
||||
const readStream = if (T.is_signed) readILEB128 else readULEB128;
|
||||
const readMem = if (T.is_signed) readILEB128Mem else readULEB128Mem;
|
||||
const writeStream = if (t_signed) writeILEB128 else writeULEB128;
|
||||
const writeMem = if (t_signed) writeILEB128Mem else writeULEB128Mem;
|
||||
const readStream = if (t_signed) readILEB128 else readULEB128;
|
||||
const readMem = if (t_signed) readILEB128Mem else readULEB128Mem;
|
||||
|
||||
// decode to a larger bit size too, to ensure sign extension
|
||||
// is working as expected
|
||||
const larger_type_bits = ((T.bit_count + 8) / 8) * 8;
|
||||
const B = std.meta.Int(T.is_signed, larger_type_bits);
|
||||
const larger_type_bits = ((@typeInfo(T).Int.bits + 8) / 8) * 8;
|
||||
const B = std.meta.Int(t_signed, larger_type_bits);
|
||||
|
||||
const bytes_needed = bn: {
|
||||
const S = std.meta.Int(T.is_signed, @sizeOf(T) * 8);
|
||||
if (T.bit_count <= 7) break :bn @as(u16, 1);
|
||||
const S = std.meta.Int(t_signed, @sizeOf(T) * 8);
|
||||
if (@typeInfo(T).Int.bits <= 7) break :bn @as(u16, 1);
|
||||
|
||||
const unused_bits = if (value < 0) @clz(T, ~value) else @clz(T, value);
|
||||
const used_bits: u16 = (T.bit_count - unused_bits) + @boolToInt(T.is_signed);
|
||||
const used_bits: u16 = (@typeInfo(T).Int.bits - unused_bits) + @boolToInt(t_signed);
|
||||
if (used_bits <= 7) break :bn @as(u16, 1);
|
||||
break :bn ((used_bits + 6) / 7);
|
||||
};
|
||||
|
||||
const max_groups = if (T.bit_count == 0) 1 else (T.bit_count + 6) / 7;
|
||||
const max_groups = if (@typeInfo(T).Int.bits == 0) 1 else (@typeInfo(T).Int.bits + 6) / 7;
|
||||
|
||||
var buf: [max_groups]u8 = undefined;
|
||||
var fbs = std.io.fixedBufferStream(&buf);
|
||||
@ -414,7 +415,7 @@ test "serialize unsigned LEB128" {
|
||||
const T = std.meta.Int(false, t);
|
||||
const min = std.math.minInt(T);
|
||||
const max = std.math.maxInt(T);
|
||||
var i = @as(std.meta.Int(false, T.bit_count + 1), min);
|
||||
var i = @as(std.meta.Int(false, @typeInfo(T).Int.bits + 1), min);
|
||||
|
||||
while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i));
|
||||
}
|
||||
@ -432,7 +433,7 @@ test "serialize signed LEB128" {
|
||||
const T = std.meta.Int(true, t);
|
||||
const min = std.math.minInt(T);
|
||||
const max = std.math.maxInt(T);
|
||||
var i = @as(std.meta.Int(true, T.bit_count + 1), min);
|
||||
var i = @as(std.meta.Int(true, @typeInfo(T).Int.bits + 1), min);
|
||||
|
||||
while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i));
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ pub fn format(
|
||||
if (@typeInfo(@TypeOf(args)) != .Struct) {
|
||||
@compileError("Expected tuple or struct argument, found " ++ @typeName(@TypeOf(args)));
|
||||
}
|
||||
if (args.len > ArgSetType.bit_count) {
|
||||
if (args.len > @typeInfo(ArgSetType).Int.bits) {
|
||||
@compileError("32 arguments max are supported per format call");
|
||||
}
|
||||
|
||||
@ -327,7 +327,7 @@ pub fn formatType(
|
||||
max_depth: usize,
|
||||
) @TypeOf(writer).Error!void {
|
||||
if (comptime std.mem.eql(u8, fmt, "*")) {
|
||||
try writer.writeAll(@typeName(@TypeOf(value).Child));
|
||||
try writer.writeAll(@typeName(@typeInfo(@TypeOf(value)).Pointer.child));
|
||||
try writer.writeAll("@");
|
||||
try formatInt(@ptrToInt(value), 16, false, FormatOptions{}, writer);
|
||||
return;
|
||||
@ -432,12 +432,12 @@ pub fn formatType(
|
||||
if (info.child == u8) {
|
||||
return formatText(value, fmt, options, writer);
|
||||
}
|
||||
return format(writer, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) });
|
||||
return format(writer, "{}@{x}", .{ @typeName(@typeInfo(T).Pointer.child), @ptrToInt(value) });
|
||||
},
|
||||
.Enum, .Union, .Struct => {
|
||||
return formatType(value.*, fmt, options, writer, max_depth);
|
||||
},
|
||||
else => return format(writer, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) }),
|
||||
else => return format(writer, "{}@{x}", .{ @typeName(@typeInfo(T).Pointer.child), @ptrToInt(value) }),
|
||||
},
|
||||
.Many, .C => {
|
||||
if (ptr_info.sentinel) |sentinel| {
|
||||
@ -448,7 +448,7 @@ pub fn formatType(
|
||||
return formatText(mem.span(value), fmt, options, writer);
|
||||
}
|
||||
}
|
||||
return format(writer, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) });
|
||||
return format(writer, "{}@{x}", .{ @typeName(@typeInfo(T).Pointer.child), @ptrToInt(value) });
|
||||
},
|
||||
.Slice => {
|
||||
if (fmt.len > 0 and ((fmt[0] == 'x') or (fmt[0] == 'X'))) {
|
||||
@ -538,7 +538,7 @@ pub fn formatIntValue(
|
||||
radix = 10;
|
||||
uppercase = false;
|
||||
} else if (comptime std.mem.eql(u8, fmt, "c")) {
|
||||
if (@TypeOf(int_value).bit_count <= 8) {
|
||||
if (@typeInfo(@TypeOf(int_value)).Int.bits <= 8) {
|
||||
return formatAsciiChar(@as(u8, int_value), options, writer);
|
||||
} else {
|
||||
@compileError("Cannot print integer that is larger than 8 bits as a ascii");
|
||||
@ -947,7 +947,7 @@ pub fn formatInt(
|
||||
} else
|
||||
value;
|
||||
|
||||
if (@TypeOf(int_value).is_signed) {
|
||||
if (@typeInfo(@TypeOf(int_value)).Int.is_signed) {
|
||||
return formatIntSigned(int_value, base, uppercase, options, writer);
|
||||
} else {
|
||||
return formatIntUnsigned(int_value, base, uppercase, options, writer);
|
||||
@ -989,9 +989,10 @@ fn formatIntUnsigned(
|
||||
writer: anytype,
|
||||
) !void {
|
||||
assert(base >= 2);
|
||||
var buf: [math.max(@TypeOf(value).bit_count, 1)]u8 = undefined;
|
||||
const min_int_bits = comptime math.max(@TypeOf(value).bit_count, @TypeOf(base).bit_count);
|
||||
const MinInt = std.meta.Int(@TypeOf(value).is_signed, min_int_bits);
|
||||
const value_info = @typeInfo(@TypeOf(value)).Int;
|
||||
var buf: [math.max(value_info.bits, 1)]u8 = undefined;
|
||||
const min_int_bits = comptime math.max(value_info.bits, @typeInfo(@TypeOf(base)).Int.bits);
|
||||
const MinInt = std.meta.Int(value_info.is_signed, min_int_bits);
|
||||
var a: MinInt = value;
|
||||
var index: usize = buf.len;
|
||||
|
||||
|
@ -374,7 +374,7 @@ test "fmt.parseFloat" {
|
||||
const epsilon = 1e-7;
|
||||
|
||||
inline for ([_]type{ f16, f32, f64, f128 }) |T| {
|
||||
const Z = std.meta.Int(false, T.bit_count);
|
||||
const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
|
||||
|
||||
testing.expectError(error.InvalidCharacter, parseFloat(T, ""));
|
||||
testing.expectError(error.InvalidCharacter, parseFloat(T, " 1"));
|
||||
|
@ -113,7 +113,7 @@ pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void {
|
||||
.Array => hashArray(hasher, key, strat),
|
||||
|
||||
.Vector => |info| {
|
||||
if (info.child.bit_count % 8 == 0) {
|
||||
if (std.meta.bitCount(info.child) % 8 == 0) {
|
||||
// If there's no unused bits in the child type, we can just hash
|
||||
// this as an array of bytes.
|
||||
hasher.update(mem.asBytes(&key));
|
||||
|
@ -952,7 +952,7 @@ pub fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) mem.Allocator
|
||||
// very near usize?
|
||||
if (mem.page_size << 2 > maxInt(usize)) return;
|
||||
|
||||
const USizeShift = std.meta.Int(false, std.math.log2(usize.bit_count));
|
||||
const USizeShift = std.meta.Int(false, std.math.log2(std.meta.bitCount(usize)));
|
||||
const large_align = @as(u29, mem.page_size << 2);
|
||||
|
||||
var align_mask: usize = undefined;
|
||||
|
@ -198,28 +198,28 @@ pub fn Reader(
|
||||
|
||||
/// Reads a native-endian integer
|
||||
pub fn readIntNative(self: Self, comptime T: type) !T {
|
||||
const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
|
||||
const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
|
||||
return mem.readIntNative(T, &bytes);
|
||||
}
|
||||
|
||||
/// Reads a foreign-endian integer
|
||||
pub fn readIntForeign(self: Self, comptime T: type) !T {
|
||||
const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
|
||||
const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
|
||||
return mem.readIntForeign(T, &bytes);
|
||||
}
|
||||
|
||||
pub fn readIntLittle(self: Self, comptime T: type) !T {
|
||||
const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
|
||||
const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
|
||||
return mem.readIntLittle(T, &bytes);
|
||||
}
|
||||
|
||||
pub fn readIntBig(self: Self, comptime T: type) !T {
|
||||
const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
|
||||
const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
|
||||
return mem.readIntBig(T, &bytes);
|
||||
}
|
||||
|
||||
pub fn readInt(self: Self, comptime T: type, endian: builtin.Endian) !T {
|
||||
const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
|
||||
const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
|
||||
return mem.readInt(T, &bytes, endian);
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
|
||||
|
||||
const U = std.meta.Int(false, t_bit_count);
|
||||
const Log2U = math.Log2Int(U);
|
||||
const int_size = (U.bit_count + 7) / 8;
|
||||
const int_size = (t_bit_count + 7) / 8;
|
||||
|
||||
if (packing == .Bit) {
|
||||
const result = try self.in_stream.readBitsNoEof(U, t_bit_count);
|
||||
@ -73,7 +73,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
|
||||
|
||||
if (int_size == 1) {
|
||||
if (t_bit_count == 8) return @bitCast(T, buffer[0]);
|
||||
const PossiblySignedByte = std.meta.Int(T.is_signed, 8);
|
||||
const PossiblySignedByte = std.meta.Int(@typeInfo(T).Int.is_signed, 8);
|
||||
return @truncate(T, @bitCast(PossiblySignedByte, buffer[0]));
|
||||
}
|
||||
|
||||
@ -247,7 +247,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
|
||||
|
||||
const U = std.meta.Int(false, t_bit_count);
|
||||
const Log2U = math.Log2Int(U);
|
||||
const int_size = (U.bit_count + 7) / 8;
|
||||
const int_size = (t_bit_count + 7) / 8;
|
||||
|
||||
const u_value = @bitCast(U, value);
|
||||
|
||||
|
@ -53,7 +53,7 @@ pub fn Writer(
|
||||
/// Write a native-endian integer.
|
||||
/// TODO audit non-power-of-two int sizes
|
||||
pub fn writeIntNative(self: Self, comptime T: type, value: T) Error!void {
|
||||
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
|
||||
var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
|
||||
mem.writeIntNative(T, &bytes, value);
|
||||
return self.writeAll(&bytes);
|
||||
}
|
||||
@ -61,28 +61,28 @@ pub fn Writer(
|
||||
/// Write a foreign-endian integer.
|
||||
/// TODO audit non-power-of-two int sizes
|
||||
pub fn writeIntForeign(self: Self, comptime T: type, value: T) Error!void {
|
||||
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
|
||||
var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
|
||||
mem.writeIntForeign(T, &bytes, value);
|
||||
return self.writeAll(&bytes);
|
||||
}
|
||||
|
||||
/// TODO audit non-power-of-two int sizes
|
||||
pub fn writeIntLittle(self: Self, comptime T: type, value: T) Error!void {
|
||||
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
|
||||
var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
|
||||
mem.writeIntLittle(T, &bytes, value);
|
||||
return self.writeAll(&bytes);
|
||||
}
|
||||
|
||||
/// TODO audit non-power-of-two int sizes
|
||||
pub fn writeIntBig(self: Self, comptime T: type, value: T) Error!void {
|
||||
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
|
||||
var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
|
||||
mem.writeIntBig(T, &bytes, value);
|
||||
return self.writeAll(&bytes);
|
||||
}
|
||||
|
||||
/// TODO audit non-power-of-two int sizes
|
||||
pub fn writeInt(self: Self, comptime T: type, value: T, endian: builtin.Endian) Error!void {
|
||||
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
|
||||
var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
|
||||
mem.writeInt(T, &bytes, value, endian);
|
||||
return self.writeAll(&bytes);
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ test "" {
|
||||
pub fn floatMantissaBits(comptime T: type) comptime_int {
|
||||
assert(@typeInfo(T) == .Float);
|
||||
|
||||
return switch (T.bit_count) {
|
||||
return switch (@typeInfo(T).Float.bits) {
|
||||
16 => 10,
|
||||
32 => 23,
|
||||
64 => 52,
|
||||
@ -208,7 +208,7 @@ pub fn floatMantissaBits(comptime T: type) comptime_int {
|
||||
pub fn floatExponentBits(comptime T: type) comptime_int {
|
||||
assert(@typeInfo(T) == .Float);
|
||||
|
||||
return switch (T.bit_count) {
|
||||
return switch (@typeInfo(T).Float.bits) {
|
||||
16 => 5,
|
||||
32 => 8,
|
||||
64 => 11,
|
||||
@ -347,9 +347,9 @@ pub fn shlExact(comptime T: type, a: T, shift_amt: Log2Int(T)) !T {
|
||||
/// A negative shift amount results in a right shift.
|
||||
pub fn shl(comptime T: type, a: T, shift_amt: anytype) T {
|
||||
const abs_shift_amt = absCast(shift_amt);
|
||||
const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt);
|
||||
const casted_shift_amt = if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0 else @intCast(Log2Int(T), abs_shift_amt);
|
||||
|
||||
if (@TypeOf(shift_amt) == comptime_int or @TypeOf(shift_amt).is_signed) {
|
||||
if (@TypeOf(shift_amt) == comptime_int or @typeInfo(@TypeOf(shift_amt)).Int.is_signed) {
|
||||
if (shift_amt < 0) {
|
||||
return a >> casted_shift_amt;
|
||||
}
|
||||
@ -373,9 +373,9 @@ test "math.shl" {
|
||||
/// A negative shift amount results in a left shift.
|
||||
pub fn shr(comptime T: type, a: T, shift_amt: anytype) T {
|
||||
const abs_shift_amt = absCast(shift_amt);
|
||||
const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt);
|
||||
const casted_shift_amt = if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0 else @intCast(Log2Int(T), abs_shift_amt);
|
||||
|
||||
if (@TypeOf(shift_amt) == comptime_int or @TypeOf(shift_amt).is_signed) {
|
||||
if (@TypeOf(shift_amt) == comptime_int or @typeInfo(@TypeOf(shift_amt)).Int.is_signed) {
|
||||
if (shift_amt >= 0) {
|
||||
return a >> casted_shift_amt;
|
||||
} else {
|
||||
@ -400,11 +400,11 @@ test "math.shr" {
|
||||
/// Rotates right. Only unsigned values can be rotated.
|
||||
/// Negative shift values results in shift modulo the bit count.
|
||||
pub fn rotr(comptime T: type, x: T, r: anytype) T {
|
||||
if (T.is_signed) {
|
||||
if (@typeInfo(T).Int.is_signed) {
|
||||
@compileError("cannot rotate signed integer");
|
||||
} else {
|
||||
const ar = @mod(r, T.bit_count);
|
||||
return shr(T, x, ar) | shl(T, x, T.bit_count - ar);
|
||||
const ar = @mod(r, @typeInfo(T).Int.bits);
|
||||
return shr(T, x, ar) | shl(T, x, @typeInfo(T).Int.bits - ar);
|
||||
}
|
||||
}
|
||||
|
||||
@ -419,11 +419,11 @@ test "math.rotr" {
|
||||
/// Rotates left. Only unsigned values can be rotated.
|
||||
/// Negative shift values results in shift modulo the bit count.
|
||||
pub fn rotl(comptime T: type, x: T, r: anytype) T {
|
||||
if (T.is_signed) {
|
||||
if (@typeInfo(T).Int.is_signed) {
|
||||
@compileError("cannot rotate signed integer");
|
||||
} else {
|
||||
const ar = @mod(r, T.bit_count);
|
||||
return shl(T, x, ar) | shr(T, x, T.bit_count - ar);
|
||||
const ar = @mod(r, @typeInfo(T).Int.bits);
|
||||
return shl(T, x, ar) | shr(T, x, @typeInfo(T).Int.bits - ar);
|
||||
}
|
||||
}
|
||||
|
||||
@ -438,7 +438,7 @@ test "math.rotl" {
|
||||
pub fn Log2Int(comptime T: type) type {
|
||||
// comptime ceil log2
|
||||
comptime var count = 0;
|
||||
comptime var s = T.bit_count - 1;
|
||||
comptime var s = @typeInfo(T).Int.bits - 1;
|
||||
inline while (s != 0) : (s >>= 1) {
|
||||
count += 1;
|
||||
}
|
||||
@ -524,7 +524,7 @@ fn testOverflow() void {
|
||||
pub fn absInt(x: anytype) !@TypeOf(x) {
|
||||
const T = @TypeOf(x);
|
||||
comptime assert(@typeInfo(T) == .Int); // must pass an integer to absInt
|
||||
comptime assert(T.is_signed); // must pass a signed integer to absInt
|
||||
comptime assert(@typeInfo(T).Int.is_signed); // must pass a signed integer to absInt
|
||||
|
||||
if (x == minInt(@TypeOf(x))) {
|
||||
return error.Overflow;
|
||||
@ -557,7 +557,7 @@ fn testAbsFloat() void {
|
||||
pub fn divTrunc(comptime T: type, numerator: T, denominator: T) !T {
|
||||
@setRuntimeSafety(false);
|
||||
if (denominator == 0) return error.DivisionByZero;
|
||||
if (@typeInfo(T) == .Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
|
||||
if (@typeInfo(T) == .Int and @typeInfo(T).Int.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
|
||||
return @divTrunc(numerator, denominator);
|
||||
}
|
||||
|
||||
@ -578,7 +578,7 @@ fn testDivTrunc() void {
|
||||
pub fn divFloor(comptime T: type, numerator: T, denominator: T) !T {
|
||||
@setRuntimeSafety(false);
|
||||
if (denominator == 0) return error.DivisionByZero;
|
||||
if (@typeInfo(T) == .Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
|
||||
if (@typeInfo(T) == .Int and @typeInfo(T).Int.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
|
||||
return @divFloor(numerator, denominator);
|
||||
}
|
||||
|
||||
@ -652,7 +652,7 @@ fn testDivCeil() void {
|
||||
pub fn divExact(comptime T: type, numerator: T, denominator: T) !T {
|
||||
@setRuntimeSafety(false);
|
||||
if (denominator == 0) return error.DivisionByZero;
|
||||
if (@typeInfo(T) == .Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
|
||||
if (@typeInfo(T) == .Int and @typeInfo(T).Int.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
|
||||
const result = @divTrunc(numerator, denominator);
|
||||
if (result * denominator != numerator) return error.UnexpectedRemainder;
|
||||
return result;
|
||||
@ -757,10 +757,10 @@ test "math.absCast" {
|
||||
|
||||
/// Returns the negation of the integer parameter.
|
||||
/// Result is a signed integer.
|
||||
pub fn negateCast(x: anytype) !std.meta.Int(true, @TypeOf(x).bit_count) {
|
||||
if (@TypeOf(x).is_signed) return negate(x);
|
||||
pub fn negateCast(x: anytype) !std.meta.Int(true, std.meta.bitCount(@TypeOf(x))) {
|
||||
if (@typeInfo(@TypeOf(x)).Int.is_signed) return negate(x);
|
||||
|
||||
const int = std.meta.Int(true, @TypeOf(x).bit_count);
|
||||
const int = std.meta.Int(true, std.meta.bitCount(@TypeOf(x)));
|
||||
if (x > -minInt(int)) return error.Overflow;
|
||||
|
||||
if (x == -minInt(int)) return minInt(int);
|
||||
@ -823,7 +823,7 @@ pub fn floorPowerOfTwo(comptime T: type, value: T) T {
|
||||
var x = value;
|
||||
|
||||
comptime var i = 1;
|
||||
inline while (T.bit_count > i) : (i *= 2) {
|
||||
inline while (@typeInfo(T).Int.bits > i) : (i *= 2) {
|
||||
x |= (x >> i);
|
||||
}
|
||||
|
||||
@ -847,13 +847,13 @@ fn testFloorPowerOfTwo() void {
|
||||
/// Returns the next power of two (if the value is not already a power of two).
|
||||
/// Only unsigned integers can be used. Zero is not an allowed input.
|
||||
/// Result is a type with 1 more bit than the input type.
|
||||
pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(T.is_signed, T.bit_count + 1) {
|
||||
pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits + 1) {
|
||||
comptime assert(@typeInfo(T) == .Int);
|
||||
comptime assert(!T.is_signed);
|
||||
comptime assert(!@typeInfo(T).Int.is_signed);
|
||||
assert(value != 0);
|
||||
comptime const PromotedType = std.meta.Int(T.is_signed, T.bit_count + 1);
|
||||
comptime const PromotedType = std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits + 1);
|
||||
comptime const shiftType = std.math.Log2Int(PromotedType);
|
||||
return @as(PromotedType, 1) << @intCast(shiftType, T.bit_count - @clz(T, value - 1));
|
||||
return @as(PromotedType, 1) << @intCast(shiftType, @typeInfo(T).Int.bits - @clz(T, value - 1));
|
||||
}
|
||||
|
||||
/// Returns the next power of two (if the value is not already a power of two).
|
||||
@ -861,9 +861,10 @@ pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(T.is_signe
|
||||
/// If the value doesn't fit, returns an error.
|
||||
pub fn ceilPowerOfTwo(comptime T: type, value: T) (error{Overflow}!T) {
|
||||
comptime assert(@typeInfo(T) == .Int);
|
||||
comptime assert(!T.is_signed);
|
||||
comptime const PromotedType = std.meta.Int(T.is_signed, T.bit_count + 1);
|
||||
comptime const overflowBit = @as(PromotedType, 1) << T.bit_count;
|
||||
const info = @typeInfo(T).Int;
|
||||
comptime assert(!info.is_signed);
|
||||
comptime const PromotedType = std.meta.Int(info.is_signed, info.bits + 1);
|
||||
comptime const overflowBit = @as(PromotedType, 1) << info.bits;
|
||||
var x = ceilPowerOfTwoPromote(T, value);
|
||||
if (overflowBit & x != 0) {
|
||||
return error.Overflow;
|
||||
@ -911,7 +912,7 @@ fn testCeilPowerOfTwo() !void {
|
||||
|
||||
pub fn log2_int(comptime T: type, x: T) Log2Int(T) {
|
||||
assert(x != 0);
|
||||
return @intCast(Log2Int(T), T.bit_count - 1 - @clz(T, x));
|
||||
return @intCast(Log2Int(T), @typeInfo(T).Int.bits - 1 - @clz(T, x));
|
||||
}
|
||||
|
||||
pub fn log2_int_ceil(comptime T: type, x: T) Log2Int(T) {
|
||||
@ -1008,8 +1009,8 @@ test "max value type" {
|
||||
testing.expect(x == 2147483647);
|
||||
}
|
||||
|
||||
pub fn mulWide(comptime T: type, a: T, b: T) std.meta.Int(T.is_signed, T.bit_count * 2) {
|
||||
const ResultInt = std.meta.Int(T.is_signed, T.bit_count * 2);
|
||||
pub fn mulWide(comptime T: type, a: T, b: T) std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits * 2) {
|
||||
const ResultInt = std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits * 2);
|
||||
return @as(ResultInt, a) * @as(ResultInt, b);
|
||||
}
|
||||
|
||||
|
@ -9,14 +9,15 @@ const assert = std.debug.assert;
|
||||
pub const Rational = @import("big/rational.zig").Rational;
|
||||
pub const int = @import("big/int.zig");
|
||||
pub const Limb = usize;
|
||||
pub const DoubleLimb = std.meta.IntType(false, 2 * Limb.bit_count);
|
||||
pub const SignedDoubleLimb = std.meta.IntType(true, DoubleLimb.bit_count);
|
||||
const limb_info = @typeInfo(Limb).Int;
|
||||
pub const DoubleLimb = std.meta.IntType(false, 2 * limb_info.bits);
|
||||
pub const SignedDoubleLimb = std.meta.IntType(true, 2 * limb_info.bits);
|
||||
pub const Log2Limb = std.math.Log2Int(Limb);
|
||||
|
||||
comptime {
|
||||
assert(std.math.floorPowerOfTwo(usize, Limb.bit_count) == Limb.bit_count);
|
||||
assert(Limb.bit_count <= 64); // u128 set is unsupported
|
||||
assert(Limb.is_signed == false);
|
||||
assert(std.math.floorPowerOfTwo(usize, limb_info.bits) == limb_info.bits);
|
||||
assert(limb_info.bits <= 64); // u128 set is unsupported
|
||||
assert(limb_info.is_signed == false);
|
||||
}
|
||||
|
||||
test "" {
|
||||
|
@ -6,6 +6,7 @@
|
||||
const std = @import("../../std.zig");
|
||||
const math = std.math;
|
||||
const Limb = std.math.big.Limb;
|
||||
const limb_bits = @typeInfo(Limb).Int.bits;
|
||||
const DoubleLimb = std.math.big.DoubleLimb;
|
||||
const SignedDoubleLimb = std.math.big.SignedDoubleLimb;
|
||||
const Log2Limb = std.math.big.Log2Limb;
|
||||
@ -28,7 +29,7 @@ pub fn calcLimbLen(scalar: anytype) usize {
|
||||
},
|
||||
.ComptimeInt => {
|
||||
const w_value = if (scalar < 0) -scalar else scalar;
|
||||
return @divFloor(math.log2(w_value), Limb.bit_count) + 1;
|
||||
return @divFloor(math.log2(w_value), limb_bits) + 1;
|
||||
},
|
||||
else => @compileError("parameter must be a primitive integer type"),
|
||||
}
|
||||
@ -54,7 +55,7 @@ pub fn calcSetStringLimbsBufferLen(base: u8, string_len: usize) usize {
|
||||
}
|
||||
|
||||
pub fn calcSetStringLimbCount(base: u8, string_len: usize) usize {
|
||||
return (string_len + (Limb.bit_count / base - 1)) / (Limb.bit_count / base);
|
||||
return (string_len + (limb_bits / base - 1)) / (limb_bits / base);
|
||||
}
|
||||
|
||||
/// a + b * c + *carry, sets carry to the overflow bits
|
||||
@ -68,7 +69,7 @@ pub fn addMulLimbWithCarry(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb {
|
||||
// r2 = b * c
|
||||
const bc = @as(DoubleLimb, math.mulWide(Limb, b, c));
|
||||
const r2 = @truncate(Limb, bc);
|
||||
const c2 = @truncate(Limb, bc >> Limb.bit_count);
|
||||
const c2 = @truncate(Limb, bc >> limb_bits);
|
||||
|
||||
// r1 = r1 + r2
|
||||
const c3: Limb = @boolToInt(@addWithOverflow(Limb, r1, r2, &r1));
|
||||
@ -181,7 +182,7 @@ pub const Mutable = struct {
|
||||
|
||||
switch (@typeInfo(T)) {
|
||||
.Int => |info| {
|
||||
const UT = if (T.is_signed) std.meta.Int(false, T.bit_count - 1) else T;
|
||||
const UT = if (info.is_signed) std.meta.Int(false, info.bits - 1) else T;
|
||||
|
||||
const needed_limbs = @sizeOf(UT) / @sizeOf(Limb);
|
||||
assert(needed_limbs <= self.limbs.len); // value too big
|
||||
@ -190,7 +191,7 @@ pub const Mutable = struct {
|
||||
|
||||
var w_value: UT = if (value < 0) @intCast(UT, -value) else @intCast(UT, value);
|
||||
|
||||
if (info.bits <= Limb.bit_count) {
|
||||
if (info.bits <= limb_bits) {
|
||||
self.limbs[0] = @as(Limb, w_value);
|
||||
self.len += 1;
|
||||
} else {
|
||||
@ -200,15 +201,15 @@ pub const Mutable = struct {
|
||||
self.len += 1;
|
||||
|
||||
// TODO: shift == 64 at compile-time fails. Fails on u128 limbs.
|
||||
w_value >>= Limb.bit_count / 2;
|
||||
w_value >>= Limb.bit_count / 2;
|
||||
w_value >>= limb_bits / 2;
|
||||
w_value >>= limb_bits / 2;
|
||||
}
|
||||
}
|
||||
},
|
||||
.ComptimeInt => {
|
||||
comptime var w_value = if (value < 0) -value else value;
|
||||
|
||||
const req_limbs = @divFloor(math.log2(w_value), Limb.bit_count) + 1;
|
||||
const req_limbs = @divFloor(math.log2(w_value), limb_bits) + 1;
|
||||
assert(req_limbs <= self.limbs.len); // value too big
|
||||
|
||||
self.len = req_limbs;
|
||||
@ -217,14 +218,14 @@ pub const Mutable = struct {
|
||||
if (w_value <= maxInt(Limb)) {
|
||||
self.limbs[0] = w_value;
|
||||
} else {
|
||||
const mask = (1 << Limb.bit_count) - 1;
|
||||
const mask = (1 << limb_bits) - 1;
|
||||
|
||||
comptime var i = 0;
|
||||
inline while (w_value != 0) : (i += 1) {
|
||||
self.limbs[i] = w_value & mask;
|
||||
|
||||
w_value >>= Limb.bit_count / 2;
|
||||
w_value >>= Limb.bit_count / 2;
|
||||
w_value >>= limb_bits / 2;
|
||||
w_value >>= limb_bits / 2;
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -506,7 +507,7 @@ pub const Mutable = struct {
|
||||
/// `a.limbs.len + (shift / (@sizeOf(Limb) * 8))`.
|
||||
pub fn shiftLeft(r: *Mutable, a: Const, shift: usize) void {
|
||||
llshl(r.limbs[0..], a.limbs[0..a.limbs.len], shift);
|
||||
r.normalize(a.limbs.len + (shift / Limb.bit_count) + 1);
|
||||
r.normalize(a.limbs.len + (shift / limb_bits) + 1);
|
||||
r.positive = a.positive;
|
||||
}
|
||||
|
||||
@ -516,7 +517,7 @@ pub const Mutable = struct {
|
||||
/// Asserts there is enough memory to fit the result. The upper bound Limb count is
|
||||
/// `a.limbs.len - (shift / (@sizeOf(Limb) * 8))`.
|
||||
pub fn shiftRight(r: *Mutable, a: Const, shift: usize) void {
|
||||
if (a.limbs.len <= shift / Limb.bit_count) {
|
||||
if (a.limbs.len <= shift / limb_bits) {
|
||||
r.len = 1;
|
||||
r.positive = true;
|
||||
r.limbs[0] = 0;
|
||||
@ -524,7 +525,7 @@ pub const Mutable = struct {
|
||||
}
|
||||
|
||||
const r_len = llshr(r.limbs[0..], a.limbs[0..a.limbs.len], shift);
|
||||
r.len = a.limbs.len - (shift / Limb.bit_count);
|
||||
r.len = a.limbs.len - (shift / limb_bits);
|
||||
r.positive = a.positive;
|
||||
}
|
||||
|
||||
@ -772,7 +773,7 @@ pub const Mutable = struct {
|
||||
}
|
||||
|
||||
if (ab_zero_limb_count != 0) {
|
||||
rem.shiftLeft(rem.toConst(), ab_zero_limb_count * Limb.bit_count);
|
||||
rem.shiftLeft(rem.toConst(), ab_zero_limb_count * limb_bits);
|
||||
}
|
||||
}
|
||||
|
||||
@ -803,10 +804,10 @@ pub const Mutable = struct {
|
||||
};
|
||||
tmp.limbs[0] = 0;
|
||||
|
||||
// Normalize so y > Limb.bit_count / 2 (i.e. leading bit is set) and even
|
||||
// Normalize so y > limb_bits / 2 (i.e. leading bit is set) and even
|
||||
var norm_shift = @clz(Limb, y.limbs[y.len - 1]);
|
||||
if (norm_shift == 0 and y.toConst().isOdd()) {
|
||||
norm_shift = Limb.bit_count;
|
||||
norm_shift = limb_bits;
|
||||
}
|
||||
x.shiftLeft(x.toConst(), norm_shift);
|
||||
y.shiftLeft(y.toConst(), norm_shift);
|
||||
@ -820,7 +821,7 @@ pub const Mutable = struct {
|
||||
mem.set(Limb, q.limbs[0..q.len], 0);
|
||||
|
||||
// 2.
|
||||
tmp.shiftLeft(y.toConst(), Limb.bit_count * (n - t));
|
||||
tmp.shiftLeft(y.toConst(), limb_bits * (n - t));
|
||||
while (x.toConst().order(tmp.toConst()) != .lt) {
|
||||
q.limbs[n - t] += 1;
|
||||
x.sub(x.toConst(), tmp.toConst());
|
||||
@ -833,7 +834,7 @@ pub const Mutable = struct {
|
||||
if (x.limbs[i] == y.limbs[t]) {
|
||||
q.limbs[i - t - 1] = maxInt(Limb);
|
||||
} else {
|
||||
const num = (@as(DoubleLimb, x.limbs[i]) << Limb.bit_count) | @as(DoubleLimb, x.limbs[i - 1]);
|
||||
const num = (@as(DoubleLimb, x.limbs[i]) << limb_bits) | @as(DoubleLimb, x.limbs[i - 1]);
|
||||
const z = @intCast(Limb, num / @as(DoubleLimb, y.limbs[t]));
|
||||
q.limbs[i - t - 1] = if (z > maxInt(Limb)) maxInt(Limb) else @as(Limb, z);
|
||||
}
|
||||
@ -862,11 +863,11 @@ pub const Mutable = struct {
|
||||
// 3.3
|
||||
tmp.set(q.limbs[i - t - 1]);
|
||||
tmp.mul(tmp.toConst(), y.toConst(), mul_limb_buf, allocator);
|
||||
tmp.shiftLeft(tmp.toConst(), Limb.bit_count * (i - t - 1));
|
||||
tmp.shiftLeft(tmp.toConst(), limb_bits * (i - t - 1));
|
||||
x.sub(x.toConst(), tmp.toConst());
|
||||
|
||||
if (!x.positive) {
|
||||
tmp.shiftLeft(y.toConst(), Limb.bit_count * (i - t - 1));
|
||||
tmp.shiftLeft(y.toConst(), limb_bits * (i - t - 1));
|
||||
x.add(x.toConst(), tmp.toConst());
|
||||
q.limbs[i - t - 1] -= 1;
|
||||
}
|
||||
@ -949,7 +950,7 @@ pub const Const = struct {
|
||||
|
||||
/// Returns the number of bits required to represent the absolute value of an integer.
|
||||
pub fn bitCountAbs(self: Const) usize {
|
||||
return (self.limbs.len - 1) * Limb.bit_count + (Limb.bit_count - @clz(Limb, self.limbs[self.limbs.len - 1]));
|
||||
return (self.limbs.len - 1) * limb_bits + (limb_bits - @clz(Limb, self.limbs[self.limbs.len - 1]));
|
||||
}
|
||||
|
||||
/// Returns the number of bits required to represent the integer in twos-complement form.
|
||||
@ -1019,10 +1020,10 @@ pub const Const = struct {
|
||||
/// Returns an error if self cannot be narrowed into the requested type without truncation.
|
||||
pub fn to(self: Const, comptime T: type) ConvertError!T {
|
||||
switch (@typeInfo(T)) {
|
||||
.Int => {
|
||||
const UT = std.meta.Int(false, T.bit_count);
|
||||
.Int => |info| {
|
||||
const UT = std.meta.Int(false, info.bits);
|
||||
|
||||
if (self.bitCountTwosComp() > T.bit_count) {
|
||||
if (self.bitCountTwosComp() > info.bits) {
|
||||
return error.TargetTooSmall;
|
||||
}
|
||||
|
||||
@ -1033,12 +1034,12 @@ pub const Const = struct {
|
||||
} else {
|
||||
for (self.limbs[0..self.limbs.len]) |_, ri| {
|
||||
const limb = self.limbs[self.limbs.len - ri - 1];
|
||||
r <<= Limb.bit_count;
|
||||
r <<= limb_bits;
|
||||
r |= limb;
|
||||
}
|
||||
}
|
||||
|
||||
if (!T.is_signed) {
|
||||
if (!info.is_signed) {
|
||||
return if (self.positive) @intCast(T, r) else error.NegativeIntoUnsigned;
|
||||
} else {
|
||||
if (self.positive) {
|
||||
@ -1149,7 +1150,7 @@ pub const Const = struct {
|
||||
|
||||
outer: for (self.limbs[0..self.limbs.len]) |limb| {
|
||||
var shift: usize = 0;
|
||||
while (shift < Limb.bit_count) : (shift += base_shift) {
|
||||
while (shift < limb_bits) : (shift += base_shift) {
|
||||
const r = @intCast(u8, (limb >> @intCast(Log2Limb, shift)) & @as(Limb, base - 1));
|
||||
const ch = std.fmt.digitToChar(r, uppercase);
|
||||
string[digits_len] = ch;
|
||||
@ -1295,7 +1296,7 @@ pub const Const = struct {
|
||||
/// Memory is allocated as needed to ensure operations never overflow. The range
|
||||
/// is bounded only by available memory.
|
||||
pub const Managed = struct {
|
||||
pub const sign_bit: usize = 1 << (usize.bit_count - 1);
|
||||
pub const sign_bit: usize = 1 << (@typeInfo(usize).Int.bits - 1);
|
||||
|
||||
/// Default number of limbs to allocate on creation of a `Managed`.
|
||||
pub const default_capacity = 4;
|
||||
@ -1716,7 +1717,7 @@ pub const Managed = struct {
|
||||
|
||||
/// r = a << shift, in other words, r = a * 2^shift
|
||||
pub fn shiftLeft(r: *Managed, a: Managed, shift: usize) !void {
|
||||
try r.ensureCapacity(a.len() + (shift / Limb.bit_count) + 1);
|
||||
try r.ensureCapacity(a.len() + (shift / limb_bits) + 1);
|
||||
var m = r.toMutable();
|
||||
m.shiftLeft(a.toConst(), shift);
|
||||
r.setMetadata(m.positive, m.len);
|
||||
@ -1724,13 +1725,13 @@ pub const Managed = struct {
|
||||
|
||||
/// r = a >> shift
|
||||
pub fn shiftRight(r: *Managed, a: Managed, shift: usize) !void {
|
||||
if (a.len() <= shift / Limb.bit_count) {
|
||||
if (a.len() <= shift / limb_bits) {
|
||||
r.metadata = 1;
|
||||
r.limbs[0] = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
try r.ensureCapacity(a.len() - (shift / Limb.bit_count));
|
||||
try r.ensureCapacity(a.len() - (shift / limb_bits));
|
||||
var m = r.toMutable();
|
||||
m.shiftRight(a.toConst(), shift);
|
||||
r.setMetadata(m.positive, m.len);
|
||||
@ -2021,7 +2022,7 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
|
||||
rem.* = 0;
|
||||
for (a) |_, ri| {
|
||||
const i = a.len - ri - 1;
|
||||
const pdiv = ((@as(DoubleLimb, rem.*) << Limb.bit_count) | a[i]);
|
||||
const pdiv = ((@as(DoubleLimb, rem.*) << limb_bits) | a[i]);
|
||||
|
||||
if (pdiv == 0) {
|
||||
quo[i] = 0;
|
||||
@ -2042,10 +2043,10 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
|
||||
fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
|
||||
@setRuntimeSafety(debug_safety);
|
||||
assert(a.len >= 1);
|
||||
assert(r.len >= a.len + (shift / Limb.bit_count) + 1);
|
||||
assert(r.len >= a.len + (shift / limb_bits) + 1);
|
||||
|
||||
const limb_shift = shift / Limb.bit_count + 1;
|
||||
const interior_limb_shift = @intCast(Log2Limb, shift % Limb.bit_count);
|
||||
const limb_shift = shift / limb_bits + 1;
|
||||
const interior_limb_shift = @intCast(Log2Limb, shift % limb_bits);
|
||||
|
||||
var carry: Limb = 0;
|
||||
var i: usize = 0;
|
||||
@ -2057,7 +2058,7 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
|
||||
r[dst_i] = carry | @call(.{ .modifier = .always_inline }, math.shr, .{
|
||||
Limb,
|
||||
src_digit,
|
||||
Limb.bit_count - @intCast(Limb, interior_limb_shift),
|
||||
limb_bits - @intCast(Limb, interior_limb_shift),
|
||||
});
|
||||
carry = (src_digit << interior_limb_shift);
|
||||
}
|
||||
@ -2069,10 +2070,10 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
|
||||
fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
|
||||
@setRuntimeSafety(debug_safety);
|
||||
assert(a.len >= 1);
|
||||
assert(r.len >= a.len - (shift / Limb.bit_count));
|
||||
assert(r.len >= a.len - (shift / limb_bits));
|
||||
|
||||
const limb_shift = shift / Limb.bit_count;
|
||||
const interior_limb_shift = @intCast(Log2Limb, shift % Limb.bit_count);
|
||||
const limb_shift = shift / limb_bits;
|
||||
const interior_limb_shift = @intCast(Log2Limb, shift % limb_bits);
|
||||
|
||||
var carry: Limb = 0;
|
||||
var i: usize = 0;
|
||||
@ -2085,7 +2086,7 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
|
||||
carry = @call(.{ .modifier = .always_inline }, math.shl, .{
|
||||
Limb,
|
||||
src_digit,
|
||||
Limb.bit_count - @intCast(Limb, interior_limb_shift),
|
||||
limb_bits - @intCast(Limb, interior_limb_shift),
|
||||
});
|
||||
}
|
||||
}
|
||||
@ -2135,7 +2136,7 @@ fn fixedIntFromSignedDoubleLimb(A: SignedDoubleLimb, storage: []Limb) Mutable {
|
||||
const A_is_positive = A >= 0;
|
||||
const Au = @intCast(DoubleLimb, if (A < 0) -A else A);
|
||||
storage[0] = @truncate(Limb, Au);
|
||||
storage[1] = @truncate(Limb, Au >> Limb.bit_count);
|
||||
storage[1] = @truncate(Limb, Au >> limb_bits);
|
||||
return .{
|
||||
.limbs = storage[0..2],
|
||||
.positive = A_is_positive,
|
||||
|
@ -23,13 +23,13 @@ test "big.int comptime_int set" {
|
||||
var a = try Managed.initSet(testing.allocator, s);
|
||||
defer a.deinit();
|
||||
|
||||
const s_limb_count = 128 / Limb.bit_count;
|
||||
const s_limb_count = 128 / @typeInfo(Limb).Int.bits;
|
||||
|
||||
comptime var i: usize = 0;
|
||||
inline while (i < s_limb_count) : (i += 1) {
|
||||
const result = @as(Limb, s & maxInt(Limb));
|
||||
s >>= Limb.bit_count / 2;
|
||||
s >>= Limb.bit_count / 2;
|
||||
s >>= @typeInfo(Limb).Int.bits / 2;
|
||||
s >>= @typeInfo(Limb).Int.bits / 2;
|
||||
testing.expect(a.limbs[i] == result);
|
||||
}
|
||||
}
|
||||
|
@ -136,7 +136,7 @@ pub const Rational = struct {
|
||||
// Translated from golang.go/src/math/big/rat.go.
|
||||
debug.assert(@typeInfo(T) == .Float);
|
||||
|
||||
const UnsignedInt = std.meta.Int(false, T.bit_count);
|
||||
const UnsignedInt = std.meta.Int(false, @typeInfo(T).Float.bits);
|
||||
const f_bits = @bitCast(UnsignedInt, f);
|
||||
|
||||
const exponent_bits = math.floatExponentBits(T);
|
||||
@ -194,8 +194,8 @@ pub const Rational = struct {
|
||||
// TODO: Indicate whether the result is not exact.
|
||||
debug.assert(@typeInfo(T) == .Float);
|
||||
|
||||
const fsize = T.bit_count;
|
||||
const BitReprType = std.meta.Int(false, T.bit_count);
|
||||
const fsize = @typeInfo(T).Float.bits;
|
||||
const BitReprType = std.meta.Int(false, fsize);
|
||||
|
||||
const msize = math.floatMantissaBits(T);
|
||||
const msize1 = msize + 1;
|
||||
@ -475,16 +475,18 @@ pub const Rational = struct {
|
||||
fn extractLowBits(a: Int, comptime T: type) T {
|
||||
testing.expect(@typeInfo(T) == .Int);
|
||||
|
||||
if (T.bit_count <= Limb.bit_count) {
|
||||
const t_bits = @typeInfo(T).Int.bits;
|
||||
const limb_bits = @typeInfo(Limb).Int.bits;
|
||||
if (t_bits <= limb_bits) {
|
||||
return @truncate(T, a.limbs[0]);
|
||||
} else {
|
||||
var r: T = 0;
|
||||
comptime var i: usize = 0;
|
||||
|
||||
// Remainder is always 0 since if T.bit_count >= Limb.bit_count -> Limb | T and both
|
||||
// Remainder is always 0 since if t_bits >= limb_bits -> Limb | T and both
|
||||
// are powers of two.
|
||||
inline while (i < T.bit_count / Limb.bit_count) : (i += 1) {
|
||||
r |= math.shl(T, a.limbs[i], i * Limb.bit_count);
|
||||
inline while (i < t_bits / limb_bits) : (i += 1) {
|
||||
r |= math.shl(T, a.limbs[i], i * limb_bits);
|
||||
}
|
||||
|
||||
return r;
|
||||
|
@ -49,7 +49,7 @@ const pi4c = 2.69515142907905952645E-15;
|
||||
const m4pi = 1.273239544735162542821171882678754627704620361328125;
|
||||
|
||||
fn cos_(comptime T: type, x_: T) T {
|
||||
const I = std.meta.Int(true, T.bit_count);
|
||||
const I = std.meta.Int(true, @typeInfo(T).Float.bits);
|
||||
|
||||
var x = x_;
|
||||
if (math.isNan(x) or math.isInf(x)) {
|
||||
|
@ -128,7 +128,7 @@ pub fn pow(comptime T: type, x: T, y: T) T {
|
||||
if (yf != 0 and x < 0) {
|
||||
return math.nan(T);
|
||||
}
|
||||
if (yi >= 1 << (T.bit_count - 1)) {
|
||||
if (yi >= 1 << (@typeInfo(T).Float.bits - 1)) {
|
||||
return math.exp(y * math.ln(x));
|
||||
}
|
||||
|
||||
@ -150,7 +150,7 @@ pub fn pow(comptime T: type, x: T, y: T) T {
|
||||
var xe = r2.exponent;
|
||||
var x1 = r2.significand;
|
||||
|
||||
var i = @floatToInt(std.meta.Int(true, T.bit_count), yi);
|
||||
var i = @floatToInt(std.meta.Int(true, @typeInfo(T).Float.bits), yi);
|
||||
while (i != 0) : (i >>= 1) {
|
||||
const overflow_shift = math.floatExponentBits(T) + 1;
|
||||
if (xe < -(1 << overflow_shift) or (1 << overflow_shift) < xe) {
|
||||
|
@ -50,7 +50,7 @@ const pi4c = 2.69515142907905952645E-15;
|
||||
const m4pi = 1.273239544735162542821171882678754627704620361328125;
|
||||
|
||||
fn sin_(comptime T: type, x_: T) T {
|
||||
const I = std.meta.Int(true, T.bit_count);
|
||||
const I = std.meta.Int(true, @typeInfo(T).Float.bits);
|
||||
|
||||
var x = x_;
|
||||
if (x == 0 or math.isNan(x)) {
|
||||
|
@ -36,10 +36,10 @@ pub fn sqrt(x: anytype) Sqrt(@TypeOf(x)) {
|
||||
}
|
||||
}
|
||||
|
||||
fn sqrt_int(comptime T: type, value: T) std.meta.Int(false, T.bit_count / 2) {
|
||||
fn sqrt_int(comptime T: type, value: T) std.meta.Int(false, @typeInfo(T).Int.bits / 2) {
|
||||
var op = value;
|
||||
var res: T = 0;
|
||||
var one: T = 1 << (T.bit_count - 2);
|
||||
var one: T = 1 << (@typeInfo(T).Int.bits - 2);
|
||||
|
||||
// "one" starts at the highest power of four <= than the argument.
|
||||
while (one > op) {
|
||||
@ -55,7 +55,7 @@ fn sqrt_int(comptime T: type, value: T) std.meta.Int(false, T.bit_count / 2) {
|
||||
one >>= 2;
|
||||
}
|
||||
|
||||
const ResultType = std.meta.Int(false, T.bit_count / 2);
|
||||
const ResultType = std.meta.Int(false, @typeInfo(T).Int.bits / 2);
|
||||
return @intCast(ResultType, res);
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ const pi4c = 2.69515142907905952645E-15;
|
||||
const m4pi = 1.273239544735162542821171882678754627704620361328125;
|
||||
|
||||
fn tan_(comptime T: type, x_: T) T {
|
||||
const I = std.meta.Int(true, T.bit_count);
|
||||
const I = std.meta.Int(true, @typeInfo(T).Float.bits);
|
||||
|
||||
var x = x_;
|
||||
if (x == 0 or math.isNan(x)) {
|
||||
|
@ -949,7 +949,7 @@ pub fn readVarInt(comptime ReturnType: type, bytes: []const u8, endian: builtin.
|
||||
/// This function cannot fail and cannot cause undefined behavior.
|
||||
/// Assumes the endianness of memory is native. This means the function can
|
||||
/// simply pointer cast memory.
|
||||
pub fn readIntNative(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8) T {
|
||||
pub fn readIntNative(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8) T {
|
||||
return @ptrCast(*align(1) const T, bytes).*;
|
||||
}
|
||||
|
||||
@ -957,7 +957,7 @@ pub fn readIntNative(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]
|
||||
/// The bit count of T must be evenly divisible by 8.
|
||||
/// This function cannot fail and cannot cause undefined behavior.
|
||||
/// Assumes the endianness of memory is foreign, so it must byte-swap.
|
||||
pub fn readIntForeign(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8) T {
|
||||
pub fn readIntForeign(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8) T {
|
||||
return @byteSwap(T, readIntNative(T, bytes));
|
||||
}
|
||||
|
||||
@ -971,18 +971,18 @@ pub const readIntBig = switch (builtin.endian) {
|
||||
.Big => readIntNative,
|
||||
};
|
||||
|
||||
/// Asserts that bytes.len >= T.bit_count / 8. Reads the integer starting from index 0
|
||||
/// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0
|
||||
/// and ignores extra bytes.
|
||||
/// The bit count of T must be evenly divisible by 8.
|
||||
/// Assumes the endianness of memory is native. This means the function can
|
||||
/// simply pointer cast memory.
|
||||
pub fn readIntSliceNative(comptime T: type, bytes: []const u8) T {
|
||||
const n = @divExact(T.bit_count, 8);
|
||||
const n = @divExact(@typeInfo(T).Int.bits, 8);
|
||||
assert(bytes.len >= n);
|
||||
return readIntNative(T, bytes[0..n]);
|
||||
}
|
||||
|
||||
/// Asserts that bytes.len >= T.bit_count / 8. Reads the integer starting from index 0
|
||||
/// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0
|
||||
/// and ignores extra bytes.
|
||||
/// The bit count of T must be evenly divisible by 8.
|
||||
/// Assumes the endianness of memory is foreign, so it must byte-swap.
|
||||
@ -1003,7 +1003,7 @@ pub const readIntSliceBig = switch (builtin.endian) {
|
||||
/// Reads an integer from memory with bit count specified by T.
|
||||
/// The bit count of T must be evenly divisible by 8.
|
||||
/// This function cannot fail and cannot cause undefined behavior.
|
||||
pub fn readInt(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8, endian: builtin.Endian) T {
|
||||
pub fn readInt(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8, endian: builtin.Endian) T {
|
||||
if (endian == builtin.endian) {
|
||||
return readIntNative(T, bytes);
|
||||
} else {
|
||||
@ -1011,11 +1011,11 @@ pub fn readInt(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8, en
|
||||
}
|
||||
}
|
||||
|
||||
/// Asserts that bytes.len >= T.bit_count / 8. Reads the integer starting from index 0
|
||||
/// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0
|
||||
/// and ignores extra bytes.
|
||||
/// The bit count of T must be evenly divisible by 8.
|
||||
pub fn readIntSlice(comptime T: type, bytes: []const u8, endian: builtin.Endian) T {
|
||||
const n = @divExact(T.bit_count, 8);
|
||||
const n = @divExact(@typeInfo(T).Int.bits, 8);
|
||||
assert(bytes.len >= n);
|
||||
return readInt(T, bytes[0..n], endian);
|
||||
}
|
||||
@ -1060,7 +1060,7 @@ test "readIntBig and readIntLittle" {
|
||||
/// accepts any integer bit width.
|
||||
/// This function stores in native endian, which means it is implemented as a simple
|
||||
/// memory store.
|
||||
pub fn writeIntNative(comptime T: type, buf: *[(T.bit_count + 7) / 8]u8, value: T) void {
|
||||
pub fn writeIntNative(comptime T: type, buf: *[(@typeInfo(T).Int.bits + 7) / 8]u8, value: T) void {
|
||||
@ptrCast(*align(1) T, buf).* = value;
|
||||
}
|
||||
|
||||
@ -1068,7 +1068,7 @@ pub fn writeIntNative(comptime T: type, buf: *[(T.bit_count + 7) / 8]u8, value:
|
||||
/// This function always succeeds, has defined behavior for all inputs, but
|
||||
/// the integer bit width must be divisible by 8.
|
||||
/// This function stores in foreign endian, which means it does a @byteSwap first.
|
||||
pub fn writeIntForeign(comptime T: type, buf: *[@divExact(T.bit_count, 8)]u8, value: T) void {
|
||||
pub fn writeIntForeign(comptime T: type, buf: *[@divExact(@typeInfo(T).Int.bits, 8)]u8, value: T) void {
|
||||
writeIntNative(T, buf, @byteSwap(T, value));
|
||||
}
|
||||
|
||||
@ -1085,7 +1085,7 @@ pub const writeIntBig = switch (builtin.endian) {
|
||||
/// Writes an integer to memory, storing it in twos-complement.
|
||||
/// This function always succeeds, has defined behavior for all inputs, but
|
||||
/// the integer bit width must be divisible by 8.
|
||||
pub fn writeInt(comptime T: type, buffer: *[@divExact(T.bit_count, 8)]u8, value: T, endian: builtin.Endian) void {
|
||||
pub fn writeInt(comptime T: type, buffer: *[@divExact(@typeInfo(T).Int.bits, 8)]u8, value: T, endian: builtin.Endian) void {
|
||||
if (endian == builtin.endian) {
|
||||
return writeIntNative(T, buffer, value);
|
||||
} else {
|
||||
@ -1094,19 +1094,19 @@ pub fn writeInt(comptime T: type, buffer: *[@divExact(T.bit_count, 8)]u8, value:
|
||||
}
|
||||
|
||||
/// Writes a twos-complement little-endian integer to memory.
|
||||
/// Asserts that buf.len >= T.bit_count / 8.
|
||||
/// Asserts that buf.len >= @typeInfo(T).Int.bits / 8.
|
||||
/// The bit count of T must be divisible by 8.
|
||||
/// Any extra bytes in buffer after writing the integer are set to zero. To
|
||||
/// avoid the branch to check for extra buffer bytes, use writeIntLittle
|
||||
/// instead.
|
||||
pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void {
|
||||
assert(buffer.len >= @divExact(T.bit_count, 8));
|
||||
assert(buffer.len >= @divExact(@typeInfo(T).Int.bits, 8));
|
||||
|
||||
if (T.bit_count == 0)
|
||||
if (@typeInfo(T).Int.bits == 0)
|
||||
return set(u8, buffer, 0);
|
||||
|
||||
// TODO I want to call writeIntLittle here but comptime eval facilities aren't good enough
|
||||
const uint = std.meta.Int(false, T.bit_count);
|
||||
const uint = std.meta.Int(false, @typeInfo(T).Int.bits);
|
||||
var bits = @truncate(uint, value);
|
||||
for (buffer) |*b| {
|
||||
b.* = @truncate(u8, bits);
|
||||
@ -1115,18 +1115,18 @@ pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void {
|
||||
}
|
||||
|
||||
/// Writes a twos-complement big-endian integer to memory.
|
||||
/// Asserts that buffer.len >= T.bit_count / 8.
|
||||
/// Asserts that buffer.len >= @typeInfo(T).Int.bits / 8.
|
||||
/// The bit count of T must be divisible by 8.
|
||||
/// Any extra bytes in buffer before writing the integer are set to zero. To
|
||||
/// avoid the branch to check for extra buffer bytes, use writeIntBig instead.
|
||||
pub fn writeIntSliceBig(comptime T: type, buffer: []u8, value: T) void {
|
||||
assert(buffer.len >= @divExact(T.bit_count, 8));
|
||||
assert(buffer.len >= @divExact(@typeInfo(T).Int.bits, 8));
|
||||
|
||||
if (T.bit_count == 0)
|
||||
if (@typeInfo(T).Int.bits == 0)
|
||||
return set(u8, buffer, 0);
|
||||
|
||||
// TODO I want to call writeIntBig here but comptime eval facilities aren't good enough
|
||||
const uint = std.meta.Int(false, T.bit_count);
|
||||
const uint = std.meta.Int(false, @typeInfo(T).Int.bits);
|
||||
var bits = @truncate(uint, value);
|
||||
var index: usize = buffer.len;
|
||||
while (index != 0) {
|
||||
@ -1147,13 +1147,13 @@ pub const writeIntSliceForeign = switch (builtin.endian) {
|
||||
};
|
||||
|
||||
/// Writes a twos-complement integer to memory, with the specified endianness.
|
||||
/// Asserts that buf.len >= T.bit_count / 8.
|
||||
/// Asserts that buf.len >= @typeInfo(T).Int.bits / 8.
|
||||
/// The bit count of T must be evenly divisible by 8.
|
||||
/// Any extra bytes in buffer not part of the integer are set to zero, with
|
||||
/// respect to endianness. To avoid the branch to check for extra buffer bytes,
|
||||
/// use writeInt instead.
|
||||
pub fn writeIntSlice(comptime T: type, buffer: []u8, value: T, endian: builtin.Endian) void {
|
||||
comptime assert(T.bit_count % 8 == 0);
|
||||
comptime assert(@typeInfo(T).Int.bits % 8 == 0);
|
||||
return switch (endian) {
|
||||
.Little => writeIntSliceLittle(T, buffer, value),
|
||||
.Big => writeIntSliceBig(T, buffer, value),
|
||||
|
@ -167,11 +167,11 @@ pub fn create(self: *Allocator, comptime T: type) Error!*T {
|
||||
/// `ptr` should be the return value of `create`, or otherwise
|
||||
/// have the same address and alignment property.
|
||||
pub fn destroy(self: *Allocator, ptr: anytype) void {
|
||||
const T = @TypeOf(ptr).Child;
|
||||
const info = @typeInfo(@TypeOf(ptr)).Pointer;
|
||||
const T = info.child;
|
||||
if (@sizeOf(T) == 0) return;
|
||||
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
|
||||
const ptr_align = @typeInfo(@TypeOf(ptr)).Pointer.alignment;
|
||||
_ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], ptr_align, 0, 0, @returnAddress());
|
||||
_ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], info.alignment, 0, 0, @returnAddress());
|
||||
}
|
||||
|
||||
/// Allocates an array of `n` items of type `T` and sets all the
|
||||
|
@ -4526,7 +4526,7 @@ pub fn res_mkquery(
|
||||
// Make a reasonably unpredictable id
|
||||
var ts: timespec = undefined;
|
||||
clock_gettime(CLOCK_REALTIME, &ts) catch {};
|
||||
const UInt = std.meta.Int(false, @TypeOf(ts.tv_nsec).bit_count);
|
||||
const UInt = std.meta.Int(false, std.meta.bitCount(@TypeOf(ts.tv_nsec)));
|
||||
const unsec = @bitCast(UInt, ts.tv_nsec);
|
||||
const id = @truncate(u32, unsec + unsec / 65536);
|
||||
q[0] = @truncate(u8, id / 256);
|
||||
|
@ -846,7 +846,7 @@ pub const SIG_ERR = @intToPtr(?Sigaction.sigaction_fn, maxInt(usize));
|
||||
pub const SIG_DFL = @intToPtr(?Sigaction.sigaction_fn, 0);
|
||||
pub const SIG_IGN = @intToPtr(?Sigaction.sigaction_fn, 1);
|
||||
|
||||
pub const empty_sigset = [_]u32{0} ** sigset_t.len;
|
||||
pub const empty_sigset = [_]u32{0} ** @typeInfo(sigset_t).Array.len;
|
||||
|
||||
pub const signalfd_siginfo = extern struct {
|
||||
signo: u32,
|
||||
|
@ -829,17 +829,19 @@ pub fn sigaction(sig: u6, noalias act: *const Sigaction, noalias oact: ?*Sigacti
|
||||
return 0;
|
||||
}
|
||||
|
||||
const usize_bits = @typeInfo(usize).Int.bits;
|
||||
|
||||
pub fn sigaddset(set: *sigset_t, sig: u6) void {
|
||||
const s = sig - 1;
|
||||
// shift in musl: s&8*sizeof *set->__bits-1
|
||||
const shift = @intCast(u5, s & (usize.bit_count - 1));
|
||||
const shift = @intCast(u5, s & (usize_bits - 1));
|
||||
const val = @intCast(u32, 1) << shift;
|
||||
(set.*)[@intCast(usize, s) / usize.bit_count] |= val;
|
||||
(set.*)[@intCast(usize, s) / usize_bits] |= val;
|
||||
}
|
||||
|
||||
pub fn sigismember(set: *const sigset_t, sig: u6) bool {
|
||||
const s = sig - 1;
|
||||
return ((set.*)[@intCast(usize, s) / usize.bit_count] & (@intCast(usize, 1) << (s & (usize.bit_count - 1)))) != 0;
|
||||
return ((set.*)[@intCast(usize, s) / usize_bits] & (@intCast(usize, 1) << (s & (usize_bits - 1)))) != 0;
|
||||
}
|
||||
|
||||
pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
|
||||
|
@ -12,7 +12,7 @@ pub const SOCKET_ERROR = -1;
|
||||
pub const WSADESCRIPTION_LEN = 256;
|
||||
pub const WSASYS_STATUS_LEN = 128;
|
||||
|
||||
pub const WSADATA = if (usize.bit_count == u64.bit_count)
|
||||
pub const WSADATA = if (@sizeOf(usize) == @sizeOf(u64))
|
||||
extern struct {
|
||||
wVersion: WORD,
|
||||
wHighVersion: WORD,
|
||||
|
@ -636,7 +636,7 @@ const MsfStream = struct {
|
||||
blocks: []u32 = undefined,
|
||||
block_size: u32 = undefined,
|
||||
|
||||
pub const Error = @TypeOf(read).ReturnType.ErrorSet;
|
||||
pub const Error = @typeInfo(@typeInfo(@TypeOf(read)).Fn.return_type.?).ErrorUnion.error_set;
|
||||
|
||||
fn init(block_size: u32, file: File, blocks: []u32) MsfStream {
|
||||
const stream = MsfStream{
|
||||
|
@ -51,8 +51,9 @@ pub const Random = struct {
|
||||
/// Returns a random int `i` such that `0 <= i <= maxInt(T)`.
|
||||
/// `i` is evenly distributed.
|
||||
pub fn int(r: *Random, comptime T: type) T {
|
||||
const UnsignedT = std.meta.Int(false, T.bit_count);
|
||||
const ByteAlignedT = std.meta.Int(false, @divTrunc(T.bit_count + 7, 8) * 8);
|
||||
const bits = @typeInfo(T).Int.bits;
|
||||
const UnsignedT = std.meta.Int(false, bits);
|
||||
const ByteAlignedT = std.meta.Int(false, @divTrunc(bits + 7, 8) * 8);
|
||||
|
||||
var rand_bytes: [@sizeOf(ByteAlignedT)]u8 = undefined;
|
||||
r.bytes(rand_bytes[0..]);
|
||||
@ -68,10 +69,11 @@ pub const Random = struct {
|
||||
/// Constant-time implementation off `uintLessThan`.
|
||||
/// The results of this function may be biased.
|
||||
pub fn uintLessThanBiased(r: *Random, comptime T: type, less_than: T) T {
|
||||
comptime assert(T.is_signed == false);
|
||||
comptime assert(T.bit_count <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
|
||||
comptime assert(@typeInfo(T).Int.is_signed == false);
|
||||
const bits = @typeInfo(T).Int.bits;
|
||||
comptime assert(bits <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
|
||||
assert(0 < less_than);
|
||||
if (T.bit_count <= 32) {
|
||||
if (bits <= 32) {
|
||||
return @intCast(T, limitRangeBiased(u32, r.int(u32), less_than));
|
||||
} else {
|
||||
return @intCast(T, limitRangeBiased(u64, r.int(u64), less_than));
|
||||
@ -87,13 +89,15 @@ pub const Random = struct {
|
||||
/// this function is guaranteed to return.
|
||||
/// If you need deterministic runtime bounds, use `uintLessThanBiased`.
|
||||
pub fn uintLessThan(r: *Random, comptime T: type, less_than: T) T {
|
||||
comptime assert(T.is_signed == false);
|
||||
comptime assert(T.bit_count <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
|
||||
comptime assert(@typeInfo(T).Int.is_signed == false);
|
||||
const bits = @typeInfo(T).Int.bits;
|
||||
comptime assert(bits <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
|
||||
assert(0 < less_than);
|
||||
// Small is typically u32
|
||||
const Small = std.meta.Int(false, @divTrunc(T.bit_count + 31, 32) * 32);
|
||||
const small_bits = @divTrunc(bits + 31, 32) * 32;
|
||||
const Small = std.meta.Int(false, small_bits);
|
||||
// Large is typically u64
|
||||
const Large = std.meta.Int(false, Small.bit_count * 2);
|
||||
const Large = std.meta.Int(false, small_bits * 2);
|
||||
|
||||
// adapted from:
|
||||
// http://www.pcg-random.org/posts/bounded-rands.html
|
||||
@ -105,7 +109,7 @@ pub const Random = struct {
|
||||
// TODO: workaround for https://github.com/ziglang/zig/issues/1770
|
||||
// should be:
|
||||
// var t: Small = -%less_than;
|
||||
var t: Small = @bitCast(Small, -%@bitCast(std.meta.Int(true, Small.bit_count), @as(Small, less_than)));
|
||||
var t: Small = @bitCast(Small, -%@bitCast(std.meta.Int(true, small_bits), @as(Small, less_than)));
|
||||
|
||||
if (t >= less_than) {
|
||||
t -= less_than;
|
||||
@ -119,13 +123,13 @@ pub const Random = struct {
|
||||
l = @truncate(Small, m);
|
||||
}
|
||||
}
|
||||
return @intCast(T, m >> Small.bit_count);
|
||||
return @intCast(T, m >> small_bits);
|
||||
}
|
||||
|
||||
/// Constant-time implementation off `uintAtMost`.
|
||||
/// The results of this function may be biased.
|
||||
pub fn uintAtMostBiased(r: *Random, comptime T: type, at_most: T) T {
|
||||
assert(T.is_signed == false);
|
||||
assert(@typeInfo(T).Int.is_signed == false);
|
||||
if (at_most == maxInt(T)) {
|
||||
// have the full range
|
||||
return r.int(T);
|
||||
@ -137,7 +141,7 @@ pub const Random = struct {
|
||||
/// See `uintLessThan`, which this function uses in most cases,
|
||||
/// for commentary on the runtime of this function.
|
||||
pub fn uintAtMost(r: *Random, comptime T: type, at_most: T) T {
|
||||
assert(T.is_signed == false);
|
||||
assert(@typeInfo(T).Int.is_signed == false);
|
||||
if (at_most == maxInt(T)) {
|
||||
// have the full range
|
||||
return r.int(T);
|
||||
@ -149,9 +153,10 @@ pub const Random = struct {
|
||||
/// The results of this function may be biased.
|
||||
pub fn intRangeLessThanBiased(r: *Random, comptime T: type, at_least: T, less_than: T) T {
|
||||
assert(at_least < less_than);
|
||||
if (T.is_signed) {
|
||||
const info = @typeInfo(T).Int;
|
||||
if (info.is_signed) {
|
||||
// Two's complement makes this math pretty easy.
|
||||
const UnsignedT = std.meta.Int(false, T.bit_count);
|
||||
const UnsignedT = std.meta.Int(false, info.bits);
|
||||
const lo = @bitCast(UnsignedT, at_least);
|
||||
const hi = @bitCast(UnsignedT, less_than);
|
||||
const result = lo +% r.uintLessThanBiased(UnsignedT, hi -% lo);
|
||||
@ -167,9 +172,10 @@ pub const Random = struct {
|
||||
/// for commentary on the runtime of this function.
|
||||
pub fn intRangeLessThan(r: *Random, comptime T: type, at_least: T, less_than: T) T {
|
||||
assert(at_least < less_than);
|
||||
if (T.is_signed) {
|
||||
const info = @typeInfo(T).Int;
|
||||
if (info.is_signed) {
|
||||
// Two's complement makes this math pretty easy.
|
||||
const UnsignedT = std.meta.Int(false, T.bit_count);
|
||||
const UnsignedT = std.meta.Int(false, info.bits);
|
||||
const lo = @bitCast(UnsignedT, at_least);
|
||||
const hi = @bitCast(UnsignedT, less_than);
|
||||
const result = lo +% r.uintLessThan(UnsignedT, hi -% lo);
|
||||
@ -184,9 +190,10 @@ pub const Random = struct {
|
||||
/// The results of this function may be biased.
|
||||
pub fn intRangeAtMostBiased(r: *Random, comptime T: type, at_least: T, at_most: T) T {
|
||||
assert(at_least <= at_most);
|
||||
if (T.is_signed) {
|
||||
const info = @typeInfo(T).Int;
|
||||
if (info.is_signed) {
|
||||
// Two's complement makes this math pretty easy.
|
||||
const UnsignedT = std.meta.Int(false, T.bit_count);
|
||||
const UnsignedT = std.meta.Int(false, info.bits);
|
||||
const lo = @bitCast(UnsignedT, at_least);
|
||||
const hi = @bitCast(UnsignedT, at_most);
|
||||
const result = lo +% r.uintAtMostBiased(UnsignedT, hi -% lo);
|
||||
@ -202,9 +209,10 @@ pub const Random = struct {
|
||||
/// for commentary on the runtime of this function.
|
||||
pub fn intRangeAtMost(r: *Random, comptime T: type, at_least: T, at_most: T) T {
|
||||
assert(at_least <= at_most);
|
||||
if (T.is_signed) {
|
||||
const info = @typeInfo(T).Int;
|
||||
if (info.is_signed) {
|
||||
// Two's complement makes this math pretty easy.
|
||||
const UnsignedT = std.meta.Int(false, T.bit_count);
|
||||
const UnsignedT = std.meta.Int(false, info.bits);
|
||||
const lo = @bitCast(UnsignedT, at_least);
|
||||
const hi = @bitCast(UnsignedT, at_most);
|
||||
const result = lo +% r.uintAtMost(UnsignedT, hi -% lo);
|
||||
@ -280,14 +288,15 @@ pub const Random = struct {
|
||||
/// into an integer 0 <= result < less_than.
|
||||
/// This function introduces a minor bias.
|
||||
pub fn limitRangeBiased(comptime T: type, random_int: T, less_than: T) T {
|
||||
comptime assert(T.is_signed == false);
|
||||
const T2 = std.meta.Int(false, T.bit_count * 2);
|
||||
comptime assert(@typeInfo(T).Int.is_signed == false);
|
||||
const bits = @typeInfo(T).Int.bits;
|
||||
const T2 = std.meta.Int(false, bits * 2);
|
||||
|
||||
// adapted from:
|
||||
// http://www.pcg-random.org/posts/bounded-rands.html
|
||||
// "Integer Multiplication (Biased)"
|
||||
var m: T2 = @as(T2, random_int) * @as(T2, less_than);
|
||||
return @intCast(T, m >> T.bit_count);
|
||||
return @intCast(T, m >> bits);
|
||||
}
|
||||
|
||||
const SequentialPrng = struct {
|
||||
|
@ -133,7 +133,7 @@ pub fn main() !void {
|
||||
}
|
||||
|
||||
fn runBuild(builder: *Builder) anyerror!void {
|
||||
switch (@typeInfo(@TypeOf(root.build).ReturnType)) {
|
||||
switch (@typeInfo(@typeInfo(@TypeOf(root.build)).Fn.return_type.?)) {
|
||||
.Void => root.build(builder),
|
||||
.ErrorUnion => try root.build(builder),
|
||||
else => @compileError("expected return type of build to be 'void' or '!void'"),
|
||||
|
@ -516,11 +516,12 @@ export fn roundf(a: f32) f32 {
|
||||
fn generic_fmod(comptime T: type, x: T, y: T) T {
|
||||
@setRuntimeSafety(false);
|
||||
|
||||
const uint = std.meta.Int(false, T.bit_count);
|
||||
const bits = @typeInfo(T).Float.bits;
|
||||
const uint = std.meta.Int(false, bits);
|
||||
const log2uint = math.Log2Int(uint);
|
||||
const digits = if (T == f32) 23 else 52;
|
||||
const exp_bits = if (T == f32) 9 else 12;
|
||||
const bits_minus_1 = T.bit_count - 1;
|
||||
const bits_minus_1 = bits - 1;
|
||||
const mask = if (T == f32) 0xff else 0x7ff;
|
||||
var ux = @bitCast(uint, x);
|
||||
var uy = @bitCast(uint, y);
|
||||
|
@ -59,23 +59,25 @@ pub fn __aeabi_dsub(a: f64, b: f64) callconv(.AAPCS) f64 {
|
||||
}
|
||||
|
||||
// TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
|
||||
fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 {
|
||||
const Z = std.meta.Int(false, T.bit_count);
|
||||
const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
|
||||
fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 {
|
||||
const bits = @typeInfo(T).Float.bits;
|
||||
const Z = std.meta.Int(false, bits);
|
||||
const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1));
|
||||
const significandBits = std.math.floatMantissaBits(T);
|
||||
const implicitBit = @as(Z, 1) << significandBits;
|
||||
|
||||
const shift = @clz(std.meta.Int(false, T.bit_count), significand.*) - @clz(Z, implicitBit);
|
||||
const shift = @clz(std.meta.Int(false, bits), significand.*) - @clz(Z, implicitBit);
|
||||
significand.* <<= @intCast(S, shift);
|
||||
return 1 - shift;
|
||||
}
|
||||
|
||||
// TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
|
||||
fn addXf3(comptime T: type, a: T, b: T) T {
|
||||
const Z = std.meta.Int(false, T.bit_count);
|
||||
const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
|
||||
const bits = @typeInfo(T).Float.bits;
|
||||
const Z = std.meta.Int(false, bits);
|
||||
const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1));
|
||||
|
||||
const typeWidth = T.bit_count;
|
||||
const typeWidth = bits;
|
||||
const significandBits = std.math.floatMantissaBits(T);
|
||||
const exponentBits = std.math.floatExponentBits(T);
|
||||
|
||||
@ -187,7 +189,7 @@ fn addXf3(comptime T: type, a: T, b: T) T {
|
||||
// If partial cancellation occured, we need to left-shift the result
|
||||
// and adjust the exponent:
|
||||
if (aSignificand < implicitBit << 3) {
|
||||
const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(std.meta.Int(false, T.bit_count), implicitBit << 3));
|
||||
const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(std.meta.Int(false, bits), implicitBit << 3));
|
||||
aSignificand <<= @intCast(S, shift);
|
||||
aExponent -= shift;
|
||||
}
|
||||
|
@ -7,8 +7,8 @@ const builtin = @import("builtin");
|
||||
|
||||
pub fn _alldiv(a: i64, b: i64) callconv(.Stdcall) i64 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
const s_a = a >> (i64.bit_count - 1);
|
||||
const s_b = b >> (i64.bit_count - 1);
|
||||
const s_a = a >> (64 - 1);
|
||||
const s_b = b >> (64 - 1);
|
||||
|
||||
const an = (a ^ s_a) -% s_a;
|
||||
const bn = (b ^ s_b) -% s_b;
|
||||
|
@ -7,8 +7,8 @@ const builtin = @import("builtin");
|
||||
|
||||
pub fn _allrem(a: i64, b: i64) callconv(.Stdcall) i64 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
const s_a = a >> (i64.bit_count - 1);
|
||||
const s_b = b >> (i64.bit_count - 1);
|
||||
const s_a = a >> (64 - 1);
|
||||
const s_b = b >> (64 - 1);
|
||||
|
||||
const an = (a ^ s_a) -% s_a;
|
||||
const bn = (b ^ s_b) -% s_b;
|
||||
|
@ -27,8 +27,9 @@ const GE = extern enum(i32) {
|
||||
pub fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
|
||||
const srep_t = std.meta.Int(true, T.bit_count);
|
||||
const rep_t = std.meta.Int(false, T.bit_count);
|
||||
const bits = @typeInfo(T).Float.bits;
|
||||
const srep_t = std.meta.Int(true, bits);
|
||||
const rep_t = std.meta.Int(false, bits);
|
||||
|
||||
const significandBits = std.math.floatMantissaBits(T);
|
||||
const exponentBits = std.math.floatExponentBits(T);
|
||||
@ -73,7 +74,7 @@ pub fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
|
||||
pub fn unordcmp(comptime T: type, a: T, b: T) i32 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
|
||||
const rep_t = std.meta.Int(false, T.bit_count);
|
||||
const rep_t = std.meta.Int(false, @typeInfo(T).Float.bits);
|
||||
|
||||
const significandBits = std.math.floatMantissaBits(T);
|
||||
const exponentBits = std.math.floatExponentBits(T);
|
||||
|
@ -12,10 +12,9 @@ const builtin = @import("builtin");
|
||||
|
||||
pub fn __divdf3(a: f64, b: f64) callconv(.C) f64 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
const Z = std.meta.Int(false, f64.bit_count);
|
||||
const SignedZ = std.meta.Int(true, f64.bit_count);
|
||||
const Z = std.meta.Int(false, 64);
|
||||
const SignedZ = std.meta.Int(true, 64);
|
||||
|
||||
const typeWidth = f64.bit_count;
|
||||
const significandBits = std.math.floatMantissaBits(f64);
|
||||
const exponentBits = std.math.floatExponentBits(f64);
|
||||
|
||||
@ -317,9 +316,9 @@ pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 {
|
||||
pub fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
const Z = std.meta.Int(false, T.bit_count);
|
||||
const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
|
||||
const significandBits = std.math.floatMantissaBits(T);
|
||||
const implicitBit = @as(Z, 1) << significandBits;
|
||||
|
||||
|
@ -12,9 +12,8 @@ const builtin = @import("builtin");
|
||||
|
||||
pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
const Z = std.meta.Int(false, f32.bit_count);
|
||||
const Z = std.meta.Int(false, 32);
|
||||
|
||||
const typeWidth = f32.bit_count;
|
||||
const significandBits = std.math.floatMantissaBits(f32);
|
||||
const exponentBits = std.math.floatExponentBits(f32);
|
||||
|
||||
@ -190,9 +189,9 @@ pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
|
||||
}
|
||||
}
|
||||
|
||||
fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 {
|
||||
fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
const Z = std.meta.Int(false, T.bit_count);
|
||||
const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
|
||||
const significandBits = std.math.floatMantissaBits(T);
|
||||
const implicitBit = @as(Z, 1) << significandBits;
|
||||
|
||||
|
@ -11,10 +11,9 @@ const wideMultiply = @import("divdf3.zig").wideMultiply;
|
||||
|
||||
pub fn __divtf3(a: f128, b: f128) callconv(.C) f128 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
const Z = std.meta.Int(false, f128.bit_count);
|
||||
const SignedZ = std.meta.Int(true, f128.bit_count);
|
||||
const Z = std.meta.Int(false, 128);
|
||||
const SignedZ = std.meta.Int(true, 128);
|
||||
|
||||
const typeWidth = f128.bit_count;
|
||||
const significandBits = std.math.floatMantissaBits(f128);
|
||||
const exponentBits = std.math.floatExponentBits(f128);
|
||||
|
||||
|
@ -9,8 +9,8 @@ const builtin = @import("builtin");
|
||||
pub fn __divti3(a: i128, b: i128) callconv(.C) i128 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
|
||||
const s_a = a >> (i128.bit_count - 1);
|
||||
const s_b = b >> (i128.bit_count - 1);
|
||||
const s_a = a >> (128 - 1);
|
||||
const s_b = b >> (128 - 1);
|
||||
|
||||
const an = (a ^ s_a) -% s_a;
|
||||
const bn = (b ^ s_b) -% s_b;
|
||||
|
@ -28,7 +28,7 @@ pub fn fixint(comptime fp_t: type, comptime fixint_t: type, a: fp_t) fixint_t {
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
const typeWidth = rep_t.bit_count;
|
||||
const typeWidth = @typeInfo(rep_t).Int.bits;
|
||||
const exponentBits = (typeWidth - significandBits - 1);
|
||||
const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
|
||||
const maxExponent = ((1 << exponentBits) - 1);
|
||||
@ -50,12 +50,13 @@ pub fn fixint(comptime fp_t: type, comptime fixint_t: type, a: fp_t) fixint_t {
|
||||
if (exponent < 0) return 0;
|
||||
|
||||
// The unsigned result needs to be large enough to handle an fixint_t or rep_t
|
||||
const fixuint_t = std.meta.Int(false, fixint_t.bit_count);
|
||||
const UintResultType = if (fixint_t.bit_count > rep_t.bit_count) fixuint_t else rep_t;
|
||||
const fixint_bits = @typeInfo(fixint_t).Int.bits;
|
||||
const fixuint_t = std.meta.Int(false, fixint_bits);
|
||||
const UintResultType = if (fixint_bits > typeWidth) fixuint_t else rep_t;
|
||||
var uint_result: UintResultType = undefined;
|
||||
|
||||
// If the value is too large for the integer type, saturate.
|
||||
if (@intCast(usize, exponent) >= fixint_t.bit_count) {
|
||||
if (@intCast(usize, exponent) >= fixint_bits) {
|
||||
return if (negative) @as(fixint_t, minInt(fixint_t)) else @as(fixint_t, maxInt(fixint_t));
|
||||
}
|
||||
|
||||
|
@ -15,14 +15,14 @@ pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t
|
||||
f128 => u128,
|
||||
else => unreachable,
|
||||
};
|
||||
const srep_t = @import("std").meta.Int(true, rep_t.bit_count);
|
||||
const typeWidth = @typeInfo(rep_t).Int.bits;
|
||||
const srep_t = @import("std").meta.Int(true, typeWidth);
|
||||
const significandBits = switch (fp_t) {
|
||||
f32 => 23,
|
||||
f64 => 52,
|
||||
f128 => 112,
|
||||
else => unreachable,
|
||||
};
|
||||
const typeWidth = rep_t.bit_count;
|
||||
const exponentBits = (typeWidth - significandBits - 1);
|
||||
const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
|
||||
const maxExponent = ((1 << exponentBits) - 1);
|
||||
@ -44,7 +44,7 @@ pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t
|
||||
if (sign == -1 or exponent < 0) return 0;
|
||||
|
||||
// If the value is too large for the integer type, saturate.
|
||||
if (@intCast(c_uint, exponent) >= fixuint_t.bit_count) return ~@as(fixuint_t, 0);
|
||||
if (@intCast(c_uint, exponent) >= @typeInfo(fixuint_t).Int.bits) return ~@as(fixuint_t, 0);
|
||||
|
||||
// If 0 <= exponent < significandBits, right shift to get the result.
|
||||
// Otherwise, shift left.
|
||||
|
@ -12,15 +12,16 @@ const FLT_MANT_DIG = 24;
|
||||
fn __floatXisf(comptime T: type, arg: T) f32 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
|
||||
const Z = std.meta.Int(false, T.bit_count);
|
||||
const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
|
||||
const bits = @typeInfo(T).Int.bits;
|
||||
const Z = std.meta.Int(false, bits);
|
||||
const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1));
|
||||
|
||||
if (arg == 0) {
|
||||
return @as(f32, 0.0);
|
||||
}
|
||||
|
||||
var ai = arg;
|
||||
const N: u32 = T.bit_count;
|
||||
const N: u32 = bits;
|
||||
const si = ai >> @intCast(S, (N - 1));
|
||||
ai = ((ai ^ si) -% si);
|
||||
var a = @bitCast(Z, ai);
|
||||
@ -66,7 +67,7 @@ fn __floatXisf(comptime T: type, arg: T) f32 {
|
||||
// a is now rounded to FLT_MANT_DIG bits
|
||||
}
|
||||
|
||||
const s = @bitCast(Z, arg) >> (T.bit_count - 32);
|
||||
const s = @bitCast(Z, arg) >> (@typeInfo(T).Int.bits - 32);
|
||||
const r = (@intCast(u32, s) & 0x80000000) | // sign
|
||||
(@intCast(u32, (e + 127)) << 23) | // exponent
|
||||
(@truncate(u32, a) & 0x007fffff); // mantissa-high
|
||||
|
@ -10,8 +10,9 @@ const maxInt = std.math.maxInt;
|
||||
fn floatsiXf(comptime T: type, a: i32) T {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
|
||||
const Z = std.meta.Int(false, T.bit_count);
|
||||
const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
|
||||
const bits = @typeInfo(T).Float.bits;
|
||||
const Z = std.meta.Int(false, bits);
|
||||
const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1));
|
||||
|
||||
if (a == 0) {
|
||||
return @as(T, 0.0);
|
||||
@ -22,7 +23,7 @@ fn floatsiXf(comptime T: type, a: i32) T {
|
||||
const exponentBias = ((1 << exponentBits - 1) - 1);
|
||||
|
||||
const implicitBit = @as(Z, 1) << significandBits;
|
||||
const signBit = @as(Z, 1 << Z.bit_count - 1);
|
||||
const signBit = @as(Z, 1 << bits - 1);
|
||||
|
||||
const sign = a >> 31;
|
||||
// Take absolute value of a via abs(x) = (x^(x >> 31)) - (x >> 31).
|
||||
|
@ -15,7 +15,7 @@ pub fn __floatundisf(arg: u64) callconv(.C) f32 {
|
||||
if (arg == 0) return 0;
|
||||
|
||||
var a = arg;
|
||||
const N: usize = @TypeOf(a).bit_count;
|
||||
const N: usize = @typeInfo(@TypeOf(a)).Int.bits;
|
||||
// Number of significant digits
|
||||
const sd = N - @clz(u64, a);
|
||||
// 8 exponent
|
||||
|
@ -19,7 +19,7 @@ pub fn __floatunditf(a: u64) callconv(.C) f128 {
|
||||
const exponent_bias = (1 << (exponent_bits - 1)) - 1;
|
||||
const implicit_bit = 1 << mantissa_bits;
|
||||
|
||||
const exp: u128 = (u64.bit_count - 1) - @clz(u64, a);
|
||||
const exp: u128 = (64 - 1) - @clz(u64, a);
|
||||
const shift: u7 = mantissa_bits - @intCast(u7, exp);
|
||||
|
||||
var result: u128 = (@intCast(u128, a) << shift) ^ implicit_bit;
|
||||
|
@ -19,7 +19,7 @@ pub fn __floatunsitf(a: u64) callconv(.C) f128 {
|
||||
const exponent_bias = (1 << (exponent_bits - 1)) - 1;
|
||||
const implicit_bit = 1 << mantissa_bits;
|
||||
|
||||
const exp = (u64.bit_count - 1) - @clz(u64, a);
|
||||
const exp = (64 - 1) - @clz(u64, a);
|
||||
const shift = mantissa_bits - @intCast(u7, exp);
|
||||
|
||||
// TODO(#1148): @bitCast alignment error
|
||||
|
@ -219,7 +219,7 @@ fn test_one_divsi3(a: i32, b: i32, expected_q: i32) void {
|
||||
pub fn __udivsi3(n: u32, d: u32) callconv(.C) u32 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
|
||||
const n_uword_bits: c_uint = u32.bit_count;
|
||||
const n_uword_bits: c_uint = 32;
|
||||
// special cases
|
||||
if (d == 0) return 0; // ?!
|
||||
if (n == 0) return 0;
|
||||
|
@ -14,8 +14,8 @@ const compiler_rt = @import("../compiler_rt.zig");
|
||||
pub fn __modti3(a: i128, b: i128) callconv(.C) i128 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
|
||||
const s_a = a >> (i128.bit_count - 1); // s = a < 0 ? -1 : 0
|
||||
const s_b = b >> (i128.bit_count - 1); // s = b < 0 ? -1 : 0
|
||||
const s_a = a >> (128 - 1); // s = a < 0 ? -1 : 0
|
||||
const s_b = b >> (128 - 1); // s = b < 0 ? -1 : 0
|
||||
|
||||
const an = (a ^ s_a) -% s_a; // negate if s == -1
|
||||
const bn = (b ^ s_b) -% s_b; // negate if s == -1
|
||||
|
@ -33,9 +33,9 @@ pub fn __aeabi_dmul(a: f64, b: f64) callconv(.C) f64 {
|
||||
|
||||
fn mulXf3(comptime T: type, a: T, b: T) T {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
const Z = std.meta.Int(false, T.bit_count);
|
||||
const typeWidth = @typeInfo(T).Float.bits;
|
||||
const Z = std.meta.Int(false, typeWidth);
|
||||
|
||||
const typeWidth = T.bit_count;
|
||||
const significandBits = std.math.floatMantissaBits(T);
|
||||
const exponentBits = std.math.floatExponentBits(T);
|
||||
|
||||
@ -269,9 +269,9 @@ fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
|
||||
}
|
||||
}
|
||||
|
||||
fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 {
|
||||
fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
const Z = std.meta.Int(false, T.bit_count);
|
||||
const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
|
||||
const significandBits = std.math.floatMantissaBits(T);
|
||||
const implicitBit = @as(Z, 1) << significandBits;
|
||||
|
||||
@ -282,7 +282,7 @@ fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i
|
||||
|
||||
fn wideRightShiftWithSticky(comptime Z: type, hi: *Z, lo: *Z, count: u32) void {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
const typeWidth = Z.bit_count;
|
||||
const typeWidth = @typeInfo(Z).Int.bits;
|
||||
const S = std.math.Log2Int(Z);
|
||||
if (count < typeWidth) {
|
||||
const sticky = @truncate(u8, lo.* << @intCast(S, typeWidth -% count));
|
||||
|
@ -11,7 +11,7 @@ const minInt = std.math.minInt;
|
||||
pub fn __mulodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
|
||||
const min = @bitCast(i64, @as(u64, 1 << (i64.bit_count - 1)));
|
||||
const min = @bitCast(i64, @as(u64, 1 << (64 - 1)));
|
||||
const max = ~min;
|
||||
|
||||
overflow.* = 0;
|
||||
|
@ -9,7 +9,7 @@ const compiler_rt = @import("../compiler_rt.zig");
|
||||
pub fn __muloti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
|
||||
const min = @bitCast(i128, @as(u128, 1 << (i128.bit_count - 1)));
|
||||
const min = @bitCast(i128, @as(u128, 1 << (128 - 1)));
|
||||
const max = ~min;
|
||||
overflow.* = 0;
|
||||
|
||||
@ -27,9 +27,9 @@ pub fn __muloti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
|
||||
return r;
|
||||
}
|
||||
|
||||
const sa = a >> (i128.bit_count - 1);
|
||||
const sa = a >> (128 - 1);
|
||||
const abs_a = (a ^ sa) -% sa;
|
||||
const sb = b >> (i128.bit_count - 1);
|
||||
const sb = b >> (128 - 1);
|
||||
const abs_b = (b ^ sb) -% sb;
|
||||
|
||||
if (abs_a < 2 or abs_b < 2) {
|
||||
|
@ -24,9 +24,8 @@ pub fn __aeabi_dneg(arg: f64) callconv(.AAPCS) f64 {
|
||||
}
|
||||
|
||||
fn negXf2(comptime T: type, a: T) T {
|
||||
const Z = std.meta.Int(false, T.bit_count);
|
||||
const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
|
||||
|
||||
const typeWidth = T.bit_count;
|
||||
const significandBits = std.math.floatMantissaBits(T);
|
||||
const exponentBits = std.math.floatExponentBits(T);
|
||||
|
||||
|
@ -9,8 +9,9 @@ const Log2Int = std.math.Log2Int;
|
||||
|
||||
fn Dwords(comptime T: type, comptime signed_half: bool) type {
|
||||
return extern union {
|
||||
pub const HalfTU = std.meta.Int(false, @divExact(T.bit_count, 2));
|
||||
pub const HalfTS = std.meta.Int(true, @divExact(T.bit_count, 2));
|
||||
pub const bits = @divExact(@typeInfo(T).Int.bits, 2);
|
||||
pub const HalfTU = std.meta.Int(false, bits);
|
||||
pub const HalfTS = std.meta.Int(true, bits);
|
||||
pub const HalfT = if (signed_half) HalfTS else HalfTU;
|
||||
|
||||
all: T,
|
||||
@ -30,15 +31,15 @@ pub fn ashlXi3(comptime T: type, a: T, b: i32) T {
|
||||
const input = dwords{ .all = a };
|
||||
var output: dwords = undefined;
|
||||
|
||||
if (b >= dwords.HalfT.bit_count) {
|
||||
if (b >= dwords.bits) {
|
||||
output.s.low = 0;
|
||||
output.s.high = input.s.low << @intCast(S, b - dwords.HalfT.bit_count);
|
||||
output.s.high = input.s.low << @intCast(S, b - dwords.bits);
|
||||
} else if (b == 0) {
|
||||
return a;
|
||||
} else {
|
||||
output.s.low = input.s.low << @intCast(S, b);
|
||||
output.s.high = input.s.high << @intCast(S, b);
|
||||
output.s.high |= input.s.low >> @intCast(S, dwords.HalfT.bit_count - b);
|
||||
output.s.high |= input.s.low >> @intCast(S, dwords.bits - b);
|
||||
}
|
||||
|
||||
return output.all;
|
||||
@ -53,14 +54,14 @@ pub fn ashrXi3(comptime T: type, a: T, b: i32) T {
|
||||
const input = dwords{ .all = a };
|
||||
var output: dwords = undefined;
|
||||
|
||||
if (b >= dwords.HalfT.bit_count) {
|
||||
output.s.high = input.s.high >> (dwords.HalfT.bit_count - 1);
|
||||
output.s.low = input.s.high >> @intCast(S, b - dwords.HalfT.bit_count);
|
||||
if (b >= dwords.bits) {
|
||||
output.s.high = input.s.high >> (dwords.bits - 1);
|
||||
output.s.low = input.s.high >> @intCast(S, b - dwords.bits);
|
||||
} else if (b == 0) {
|
||||
return a;
|
||||
} else {
|
||||
output.s.high = input.s.high >> @intCast(S, b);
|
||||
output.s.low = input.s.high << @intCast(S, dwords.HalfT.bit_count - b);
|
||||
output.s.low = input.s.high << @intCast(S, dwords.bits - b);
|
||||
// Avoid sign-extension here
|
||||
output.s.low |= @bitCast(
|
||||
dwords.HalfT,
|
||||
@ -80,14 +81,14 @@ pub fn lshrXi3(comptime T: type, a: T, b: i32) T {
|
||||
const input = dwords{ .all = a };
|
||||
var output: dwords = undefined;
|
||||
|
||||
if (b >= dwords.HalfT.bit_count) {
|
||||
if (b >= dwords.bits) {
|
||||
output.s.high = 0;
|
||||
output.s.low = input.s.high >> @intCast(S, b - dwords.HalfT.bit_count);
|
||||
output.s.low = input.s.high >> @intCast(S, b - dwords.bits);
|
||||
} else if (b == 0) {
|
||||
return a;
|
||||
} else {
|
||||
output.s.high = input.s.high >> @intCast(S, b);
|
||||
output.s.low = input.s.high << @intCast(S, dwords.HalfT.bit_count - b);
|
||||
output.s.low = input.s.high << @intCast(S, dwords.bits - b);
|
||||
output.s.low |= input.s.low >> @intCast(S, b);
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,7 @@ fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
|
||||
|
||||
// Various constants whose values follow from the type parameters.
|
||||
// Any reasonable optimizer will fold and propagate all of these.
|
||||
const srcBits = src_t.bit_count;
|
||||
const srcBits = @typeInfo(src_t).Float.bits;
|
||||
const srcExpBits = srcBits - srcSigBits - 1;
|
||||
const srcInfExp = (1 << srcExpBits) - 1;
|
||||
const srcExpBias = srcInfExp >> 1;
|
||||
@ -65,7 +65,7 @@ fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
|
||||
const srcQNaN = 1 << (srcSigBits - 1);
|
||||
const srcNaNCode = srcQNaN - 1;
|
||||
|
||||
const dstBits = dst_t.bit_count;
|
||||
const dstBits = @typeInfo(dst_t).Float.bits;
|
||||
const dstExpBits = dstBits - dstSigBits - 1;
|
||||
const dstInfExp = (1 << dstExpBits) - 1;
|
||||
const dstExpBias = dstInfExp >> 1;
|
||||
|
@ -15,8 +15,10 @@ const high = 1 - low;
|
||||
pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?*DoubleInt) DoubleInt {
|
||||
@setRuntimeSafety(is_test);
|
||||
|
||||
const SingleInt = @import("std").meta.Int(false, @divExact(DoubleInt.bit_count, 2));
|
||||
const SignedDoubleInt = @import("std").meta.Int(true, DoubleInt.bit_count);
|
||||
const double_int_bits = @typeInfo(DoubleInt).Int.bits;
|
||||
const single_int_bits = @divExact(double_int_bits, 2);
|
||||
const SingleInt = @import("std").meta.Int(false, single_int_bits);
|
||||
const SignedDoubleInt = @import("std").meta.Int(true, double_int_bits);
|
||||
const Log2SingleInt = @import("std").math.Log2Int(SingleInt);
|
||||
|
||||
const n = @ptrCast(*const [2]SingleInt, &a).*; // TODO issue #421
|
||||
@ -82,21 +84,21 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
|
||||
// ---
|
||||
// K 0
|
||||
sr = @bitCast(c_uint, @as(c_int, @clz(SingleInt, d[high])) - @as(c_int, @clz(SingleInt, n[high])));
|
||||
// 0 <= sr <= SingleInt.bit_count - 2 or sr large
|
||||
if (sr > SingleInt.bit_count - 2) {
|
||||
// 0 <= sr <= single_int_bits - 2 or sr large
|
||||
if (sr > single_int_bits - 2) {
|
||||
if (maybe_rem) |rem| {
|
||||
rem.* = a;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
sr += 1;
|
||||
// 1 <= sr <= SingleInt.bit_count - 1
|
||||
// q.all = a << (DoubleInt.bit_count - sr);
|
||||
// 1 <= sr <= single_int_bits - 1
|
||||
// q.all = a << (double_int_bits - sr);
|
||||
q[low] = 0;
|
||||
q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr);
|
||||
q[high] = n[low] << @intCast(Log2SingleInt, single_int_bits - sr);
|
||||
// r.all = a >> sr;
|
||||
r[high] = n[high] >> @intCast(Log2SingleInt, sr);
|
||||
r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
|
||||
r[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
|
||||
} else {
|
||||
// d[low] != 0
|
||||
if (d[high] == 0) {
|
||||
@ -113,74 +115,74 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
|
||||
}
|
||||
sr = @ctz(SingleInt, d[low]);
|
||||
q[high] = n[high] >> @intCast(Log2SingleInt, sr);
|
||||
q[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
|
||||
q[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
|
||||
return @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &q[0]).*; // TODO issue #421
|
||||
}
|
||||
// K X
|
||||
// ---
|
||||
// 0 K
|
||||
sr = 1 + SingleInt.bit_count + @as(c_uint, @clz(SingleInt, d[low])) - @as(c_uint, @clz(SingleInt, n[high]));
|
||||
// 2 <= sr <= DoubleInt.bit_count - 1
|
||||
// q.all = a << (DoubleInt.bit_count - sr);
|
||||
sr = 1 + single_int_bits + @as(c_uint, @clz(SingleInt, d[low])) - @as(c_uint, @clz(SingleInt, n[high]));
|
||||
// 2 <= sr <= double_int_bits - 1
|
||||
// q.all = a << (double_int_bits - sr);
|
||||
// r.all = a >> sr;
|
||||
if (sr == SingleInt.bit_count) {
|
||||
if (sr == single_int_bits) {
|
||||
q[low] = 0;
|
||||
q[high] = n[low];
|
||||
r[high] = 0;
|
||||
r[low] = n[high];
|
||||
} else if (sr < SingleInt.bit_count) {
|
||||
// 2 <= sr <= SingleInt.bit_count - 1
|
||||
} else if (sr < single_int_bits) {
|
||||
// 2 <= sr <= single_int_bits - 1
|
||||
q[low] = 0;
|
||||
q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr);
|
||||
q[high] = n[low] << @intCast(Log2SingleInt, single_int_bits - sr);
|
||||
r[high] = n[high] >> @intCast(Log2SingleInt, sr);
|
||||
r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
|
||||
r[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
|
||||
} else {
|
||||
// SingleInt.bit_count + 1 <= sr <= DoubleInt.bit_count - 1
|
||||
q[low] = n[low] << @intCast(Log2SingleInt, DoubleInt.bit_count - sr);
|
||||
q[high] = (n[high] << @intCast(Log2SingleInt, DoubleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr - SingleInt.bit_count));
|
||||
// single_int_bits + 1 <= sr <= double_int_bits - 1
|
||||
q[low] = n[low] << @intCast(Log2SingleInt, double_int_bits - sr);
|
||||
q[high] = (n[high] << @intCast(Log2SingleInt, double_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr - single_int_bits));
|
||||
r[high] = 0;
|
||||
r[low] = n[high] >> @intCast(Log2SingleInt, sr - SingleInt.bit_count);
|
||||
r[low] = n[high] >> @intCast(Log2SingleInt, sr - single_int_bits);
|
||||
}
|
||||
} else {
|
||||
// K X
|
||||
// ---
|
||||
// K K
|
||||
sr = @bitCast(c_uint, @as(c_int, @clz(SingleInt, d[high])) - @as(c_int, @clz(SingleInt, n[high])));
|
||||
// 0 <= sr <= SingleInt.bit_count - 1 or sr large
|
||||
if (sr > SingleInt.bit_count - 1) {
|
||||
// 0 <= sr <= single_int_bits - 1 or sr large
|
||||
if (sr > single_int_bits - 1) {
|
||||
if (maybe_rem) |rem| {
|
||||
rem.* = a;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
sr += 1;
|
||||
// 1 <= sr <= SingleInt.bit_count
|
||||
// q.all = a << (DoubleInt.bit_count - sr);
|
||||
// 1 <= sr <= single_int_bits
|
||||
// q.all = a << (double_int_bits - sr);
|
||||
// r.all = a >> sr;
|
||||
q[low] = 0;
|
||||
if (sr == SingleInt.bit_count) {
|
||||
if (sr == single_int_bits) {
|
||||
q[high] = n[low];
|
||||
r[high] = 0;
|
||||
r[low] = n[high];
|
||||
} else {
|
||||
r[high] = n[high] >> @intCast(Log2SingleInt, sr);
|
||||
r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
|
||||
q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr);
|
||||
r[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
|
||||
q[high] = n[low] << @intCast(Log2SingleInt, single_int_bits - sr);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Not a special case
|
||||
// q and r are initialized with:
|
||||
// q.all = a << (DoubleInt.bit_count - sr);
|
||||
// q.all = a << (double_int_bits - sr);
|
||||
// r.all = a >> sr;
|
||||
// 1 <= sr <= DoubleInt.bit_count - 1
|
||||
// 1 <= sr <= double_int_bits - 1
|
||||
var carry: u32 = 0;
|
||||
var r_all: DoubleInt = undefined;
|
||||
while (sr > 0) : (sr -= 1) {
|
||||
// r:q = ((r:q) << 1) | carry
|
||||
r[high] = (r[high] << 1) | (r[low] >> (SingleInt.bit_count - 1));
|
||||
r[low] = (r[low] << 1) | (q[high] >> (SingleInt.bit_count - 1));
|
||||
q[high] = (q[high] << 1) | (q[low] >> (SingleInt.bit_count - 1));
|
||||
r[high] = (r[high] << 1) | (r[low] >> (single_int_bits - 1));
|
||||
r[low] = (r[low] << 1) | (q[high] >> (single_int_bits - 1));
|
||||
q[high] = (q[high] << 1) | (q[low] >> (single_int_bits - 1));
|
||||
q[low] = (q[low] << 1) | carry;
|
||||
// carry = 0;
|
||||
// if (r.all >= b)
|
||||
@ -189,7 +191,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
|
||||
// carry = 1;
|
||||
// }
|
||||
r_all = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
|
||||
const s: SignedDoubleInt = @bitCast(SignedDoubleInt, b -% r_all -% 1) >> (DoubleInt.bit_count - 1);
|
||||
const s: SignedDoubleInt = @bitCast(SignedDoubleInt, b -% r_all -% 1) >> (double_int_bits - 1);
|
||||
carry = @intCast(u32, s & 1);
|
||||
r_all -= b & @bitCast(DoubleInt, s);
|
||||
r = @ptrCast(*[2]SingleInt, &r_all).*; // TODO issue #421
|
||||
|
@ -67,7 +67,7 @@ fn EfiMain(handle: uefi.Handle, system_table: *uefi.tables.SystemTable) callconv
|
||||
uefi.handle = handle;
|
||||
uefi.system_table = system_table;
|
||||
|
||||
switch (@TypeOf(root.main).ReturnType) {
|
||||
switch (@typeInfo(@TypeOf(read)).Fn.return_type.?) {
|
||||
noreturn => {
|
||||
root.main();
|
||||
},
|
||||
@ -239,7 +239,7 @@ fn callMainAsync(loop: *std.event.Loop) callconv(.Async) u8 {
|
||||
// This is not marked inline because it is called with @asyncCall when
|
||||
// there is an event loop.
|
||||
pub fn callMain() u8 {
|
||||
switch (@typeInfo(@TypeOf(root.main).ReturnType)) {
|
||||
switch (@typeInfo(@typeInfo(@TypeOf(root.main)).Fn.return_type.?)) {
|
||||
.NoReturn => {
|
||||
root.main();
|
||||
},
|
||||
|
@ -166,7 +166,7 @@ pub const Thread = struct {
|
||||
fn threadMain(raw_arg: windows.LPVOID) callconv(.C) windows.DWORD {
|
||||
const arg = if (@sizeOf(Context) == 0) {} else @ptrCast(*Context, @alignCast(@alignOf(Context), raw_arg)).*;
|
||||
|
||||
switch (@typeInfo(@TypeOf(startFn).ReturnType)) {
|
||||
switch (@typeInfo(@typeInfo(@TypeOf(startFn)).Fn.return_type.?)) {
|
||||
.NoReturn => {
|
||||
startFn(arg);
|
||||
},
|
||||
@ -227,7 +227,7 @@ pub const Thread = struct {
|
||||
fn linuxThreadMain(ctx_addr: usize) callconv(.C) u8 {
|
||||
const arg = if (@sizeOf(Context) == 0) {} else @intToPtr(*const Context, ctx_addr).*;
|
||||
|
||||
switch (@typeInfo(@TypeOf(startFn).ReturnType)) {
|
||||
switch (@typeInfo(@typeInfo(@TypeOf(startFn)).Fn.return_type.?)) {
|
||||
.NoReturn => {
|
||||
startFn(arg);
|
||||
},
|
||||
@ -259,7 +259,7 @@ pub const Thread = struct {
|
||||
fn posixThreadMain(ctx: ?*c_void) callconv(.C) ?*c_void {
|
||||
const arg = if (@sizeOf(Context) == 0) {} else @ptrCast(*Context, @alignCast(@alignOf(Context), ctx)).*;
|
||||
|
||||
switch (@typeInfo(@TypeOf(startFn).ReturnType)) {
|
||||
switch (@typeInfo(@typeInfo(@TypeOf(startFn)).Fn.return_type.?)) {
|
||||
.NoReturn => {
|
||||
startFn(arg);
|
||||
},
|
||||
|
@ -22,7 +22,7 @@ pub const SrcHash = [16]u8;
|
||||
/// If it is long, blake3 hash is computed.
|
||||
pub fn hashSrc(src: []const u8) SrcHash {
|
||||
var out: SrcHash = undefined;
|
||||
if (src.len <= SrcHash.len) {
|
||||
if (src.len <= @typeInfo(SrcHash).Array.len) {
|
||||
std.mem.copy(u8, &out, src);
|
||||
std.mem.set(u8, out[src.len..], 0);
|
||||
} else {
|
||||
|
@ -1810,7 +1810,7 @@ Error type_allowed_in_extern(CodeGen *g, ZigType *type_entry, bool *result) {
|
||||
ZigType *get_auto_err_set_type(CodeGen *g, ZigFn *fn_entry) {
|
||||
ZigType *err_set_type = new_type_table_entry(ZigTypeIdErrorSet);
|
||||
buf_resize(&err_set_type->name, 0);
|
||||
buf_appendf(&err_set_type->name, "@TypeOf(%s).ReturnType.ErrorSet", buf_ptr(&fn_entry->symbol_name));
|
||||
buf_appendf(&err_set_type->name, "@typeInfo(@typeInfo(@TypeOf(%s)).Fn.return_type.?).ErrorUnion.error_set", buf_ptr(&fn_entry->symbol_name));
|
||||
err_set_type->data.error_set.err_count = 0;
|
||||
err_set_type->data.error_set.errors = nullptr;
|
||||
err_set_type->data.error_set.infer_fn = fn_entry;
|
||||
|
160
src/ir.cpp
160
src/ir.cpp
@ -22835,167 +22835,9 @@ static IrInstGen *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstSrcFiel
|
||||
bool ptr_is_volatile = false;
|
||||
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base, const_val,
|
||||
err_set_type, ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
|
||||
} else if (child_type->id == ZigTypeIdInt) {
|
||||
if (buf_eql_str(field_name, "bit_count")) {
|
||||
bool ptr_is_const = true;
|
||||
bool ptr_is_volatile = false;
|
||||
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
|
||||
create_const_unsigned_negative(ira->codegen, ira->codegen->builtin_types.entry_num_lit_int,
|
||||
child_type->data.integral.bit_count, false),
|
||||
ira->codegen->builtin_types.entry_num_lit_int,
|
||||
ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
|
||||
} else if (buf_eql_str(field_name, "is_signed")) {
|
||||
bool ptr_is_const = true;
|
||||
bool ptr_is_volatile = false;
|
||||
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
|
||||
create_const_bool(ira->codegen, child_type->data.integral.is_signed),
|
||||
ira->codegen->builtin_types.entry_bool,
|
||||
ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
|
||||
} else {
|
||||
ir_add_error(ira, &field_ptr_instruction->base.base,
|
||||
buf_sprintf("type '%s' has no member called '%s'",
|
||||
buf_ptr(&child_type->name), buf_ptr(field_name)));
|
||||
return ira->codegen->invalid_inst_gen;
|
||||
}
|
||||
} else if (child_type->id == ZigTypeIdFloat) {
|
||||
if (buf_eql_str(field_name, "bit_count")) {
|
||||
bool ptr_is_const = true;
|
||||
bool ptr_is_volatile = false;
|
||||
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
|
||||
create_const_unsigned_negative(ira->codegen, ira->codegen->builtin_types.entry_num_lit_int,
|
||||
child_type->data.floating.bit_count, false),
|
||||
ira->codegen->builtin_types.entry_num_lit_int,
|
||||
ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
|
||||
} else {
|
||||
ir_add_error(ira, &field_ptr_instruction->base.base,
|
||||
buf_sprintf("type '%s' has no member called '%s'",
|
||||
buf_ptr(&child_type->name), buf_ptr(field_name)));
|
||||
return ira->codegen->invalid_inst_gen;
|
||||
}
|
||||
} else if (child_type->id == ZigTypeIdPointer) {
|
||||
if (buf_eql_str(field_name, "Child")) {
|
||||
bool ptr_is_const = true;
|
||||
bool ptr_is_volatile = false;
|
||||
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
|
||||
create_const_type(ira->codegen, child_type->data.pointer.child_type),
|
||||
ira->codegen->builtin_types.entry_type,
|
||||
ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
|
||||
} else if (buf_eql_str(field_name, "alignment")) {
|
||||
bool ptr_is_const = true;
|
||||
bool ptr_is_volatile = false;
|
||||
if ((err = type_resolve(ira->codegen, child_type->data.pointer.child_type,
|
||||
ResolveStatusAlignmentKnown)))
|
||||
{
|
||||
return ira->codegen->invalid_inst_gen;
|
||||
}
|
||||
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
|
||||
create_const_unsigned_negative(ira->codegen, ira->codegen->builtin_types.entry_num_lit_int,
|
||||
get_ptr_align(ira->codegen, child_type), false),
|
||||
ira->codegen->builtin_types.entry_num_lit_int,
|
||||
ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
|
||||
} else {
|
||||
ir_add_error(ira, &field_ptr_instruction->base.base,
|
||||
buf_sprintf("type '%s' has no member called '%s'",
|
||||
buf_ptr(&child_type->name), buf_ptr(field_name)));
|
||||
return ira->codegen->invalid_inst_gen;
|
||||
}
|
||||
} else if (child_type->id == ZigTypeIdArray) {
|
||||
if (buf_eql_str(field_name, "Child")) {
|
||||
bool ptr_is_const = true;
|
||||
bool ptr_is_volatile = false;
|
||||
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
|
||||
create_const_type(ira->codegen, child_type->data.array.child_type),
|
||||
ira->codegen->builtin_types.entry_type,
|
||||
ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
|
||||
} else if (buf_eql_str(field_name, "len")) {
|
||||
bool ptr_is_const = true;
|
||||
bool ptr_is_volatile = false;
|
||||
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
|
||||
create_const_unsigned_negative(ira->codegen, ira->codegen->builtin_types.entry_num_lit_int,
|
||||
child_type->data.array.len, false),
|
||||
ira->codegen->builtin_types.entry_num_lit_int,
|
||||
ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
|
||||
} else {
|
||||
ir_add_error(ira, &field_ptr_instruction->base.base,
|
||||
buf_sprintf("type '%s' has no member called '%s'",
|
||||
buf_ptr(&child_type->name), buf_ptr(field_name)));
|
||||
return ira->codegen->invalid_inst_gen;
|
||||
}
|
||||
} else if (child_type->id == ZigTypeIdErrorUnion) {
|
||||
if (buf_eql_str(field_name, "Payload")) {
|
||||
bool ptr_is_const = true;
|
||||
bool ptr_is_volatile = false;
|
||||
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
|
||||
create_const_type(ira->codegen, child_type->data.error_union.payload_type),
|
||||
ira->codegen->builtin_types.entry_type,
|
||||
ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
|
||||
} else if (buf_eql_str(field_name, "ErrorSet")) {
|
||||
bool ptr_is_const = true;
|
||||
bool ptr_is_volatile = false;
|
||||
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
|
||||
create_const_type(ira->codegen, child_type->data.error_union.err_set_type),
|
||||
ira->codegen->builtin_types.entry_type,
|
||||
ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
|
||||
} else {
|
||||
ir_add_error(ira, &field_ptr_instruction->base.base,
|
||||
buf_sprintf("type '%s' has no member called '%s'",
|
||||
buf_ptr(&child_type->name), buf_ptr(field_name)));
|
||||
return ira->codegen->invalid_inst_gen;
|
||||
}
|
||||
} else if (child_type->id == ZigTypeIdOptional) {
|
||||
if (buf_eql_str(field_name, "Child")) {
|
||||
bool ptr_is_const = true;
|
||||
bool ptr_is_volatile = false;
|
||||
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
|
||||
create_const_type(ira->codegen, child_type->data.maybe.child_type),
|
||||
ira->codegen->builtin_types.entry_type,
|
||||
ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
|
||||
} else {
|
||||
ir_add_error(ira, &field_ptr_instruction->base.base,
|
||||
buf_sprintf("type '%s' has no member called '%s'",
|
||||
buf_ptr(&child_type->name), buf_ptr(field_name)));
|
||||
return ira->codegen->invalid_inst_gen;
|
||||
}
|
||||
} else if (child_type->id == ZigTypeIdFn) {
|
||||
if (buf_eql_str(field_name, "ReturnType")) {
|
||||
if (child_type->data.fn.fn_type_id.return_type == nullptr) {
|
||||
// Return type can only ever be null, if the function is generic
|
||||
assert(child_type->data.fn.is_generic);
|
||||
|
||||
ir_add_error(ira, &field_ptr_instruction->base.base,
|
||||
buf_sprintf("ReturnType has not been resolved because '%s' is generic", buf_ptr(&child_type->name)));
|
||||
return ira->codegen->invalid_inst_gen;
|
||||
}
|
||||
|
||||
bool ptr_is_const = true;
|
||||
bool ptr_is_volatile = false;
|
||||
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
|
||||
create_const_type(ira->codegen, child_type->data.fn.fn_type_id.return_type),
|
||||
ira->codegen->builtin_types.entry_type,
|
||||
ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
|
||||
} else if (buf_eql_str(field_name, "is_var_args")) {
|
||||
bool ptr_is_const = true;
|
||||
bool ptr_is_volatile = false;
|
||||
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
|
||||
create_const_bool(ira->codegen, child_type->data.fn.fn_type_id.is_var_args),
|
||||
ira->codegen->builtin_types.entry_bool,
|
||||
ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
|
||||
} else if (buf_eql_str(field_name, "arg_count")) {
|
||||
bool ptr_is_const = true;
|
||||
bool ptr_is_volatile = false;
|
||||
return ir_get_const_ptr(ira, &field_ptr_instruction->base.base,
|
||||
create_const_usize(ira->codegen, child_type->data.fn.fn_type_id.param_count),
|
||||
ira->codegen->builtin_types.entry_usize,
|
||||
ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0);
|
||||
} else {
|
||||
ir_add_error(ira, &field_ptr_instruction->base.base,
|
||||
buf_sprintf("type '%s' has no member called '%s'",
|
||||
buf_ptr(&child_type->name), buf_ptr(field_name)));
|
||||
return ira->codegen->invalid_inst_gen;
|
||||
}
|
||||
} else {
|
||||
ir_add_error(ira, &field_ptr_instruction->base.base,
|
||||
buf_sprintf("type '%s' does not support field access", buf_ptr(&child_type->name)));
|
||||
buf_sprintf("type '%s' does not support field access", buf_ptr(&container_type->name)));
|
||||
return ira->codegen->invalid_inst_gen;
|
||||
}
|
||||
} else if (field_ptr_instruction->initializing) {
|
||||
|
@ -176,11 +176,11 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
, &[_][]const u8{
|
||||
"tmp.zig:2:17: error: expected type 'u32', found 'error{Ohno}'",
|
||||
"tmp.zig:1:17: note: function cannot return an error",
|
||||
"tmp.zig:8:5: error: expected type 'void', found '@TypeOf(bar).ReturnType.ErrorSet'",
|
||||
"tmp.zig:8:5: error: expected type 'void', found '@typeInfo(@typeInfo(@TypeOf(bar)).Fn.return_type.?).ErrorUnion.error_set'",
|
||||
"tmp.zig:7:17: note: function cannot return an error",
|
||||
"tmp.zig:11:15: error: expected type 'u32', found '@TypeOf(bar).ReturnType.ErrorSet!u32'",
|
||||
"tmp.zig:11:15: error: expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(bar)).Fn.return_type.?).ErrorUnion.error_set!u32'",
|
||||
"tmp.zig:10:17: note: function cannot return an error",
|
||||
"tmp.zig:15:14: error: expected type 'u32', found '@TypeOf(bar).ReturnType.ErrorSet!u32'",
|
||||
"tmp.zig:15:14: error: expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(bar)).Fn.return_type.?).ErrorUnion.error_set!u32'",
|
||||
"tmp.zig:14:5: note: cannot store an error in type 'u32'",
|
||||
});
|
||||
|
||||
@ -1224,7 +1224,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\ };
|
||||
\\}
|
||||
, &[_][]const u8{
|
||||
"tmp.zig:11:25: error: expected type 'u32', found '@TypeOf(get_uval).ReturnType.ErrorSet!u32'",
|
||||
"tmp.zig:11:25: error: expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(get_uval)).Fn.return_type.?).ErrorUnion.error_set!u32'",
|
||||
});
|
||||
|
||||
cases.add("assigning to struct or union fields that are not optionals with a function that returns an optional",
|
||||
@ -1929,7 +1929,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\ const info = @TypeOf(slice).unknown;
|
||||
\\}
|
||||
, &[_][]const u8{
|
||||
"tmp.zig:3:32: error: type '[]i32' does not support field access",
|
||||
"tmp.zig:3:32: error: type 'type' does not support field access",
|
||||
});
|
||||
|
||||
cases.add("peer cast then implicit cast const pointer to mutable C pointer",
|
||||
@ -3542,7 +3542,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\ }
|
||||
\\}
|
||||
, &[_][]const u8{
|
||||
"tmp.zig:5:14: error: duplicate switch value: '@TypeOf(foo).ReturnType.ErrorSet.Foo'",
|
||||
"tmp.zig:5:14: error: duplicate switch value: '@typeInfo(@typeInfo(@TypeOf(foo)).Fn.return_type.?).ErrorUnion.error_set.Foo'",
|
||||
"tmp.zig:3:14: note: other value is here",
|
||||
});
|
||||
|
||||
@ -3674,7 +3674,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\ try foo();
|
||||
\\}
|
||||
, &[_][]const u8{
|
||||
"tmp.zig:5:5: error: cannot resolve inferred error set '@TypeOf(foo).ReturnType.ErrorSet': function 'foo' not fully analyzed yet",
|
||||
"tmp.zig:5:5: error: cannot resolve inferred error set '@typeInfo(@typeInfo(@TypeOf(foo)).Fn.return_type.?).ErrorUnion.error_set': function 'foo' not fully analyzed yet",
|
||||
});
|
||||
|
||||
cases.add("implicit cast of error set not a subset",
|
||||
@ -7206,15 +7206,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
"tmp.zig:7:24: error: accessing union field 'Bar' while field 'Baz' is set",
|
||||
});
|
||||
|
||||
cases.add("getting return type of generic function",
|
||||
\\fn generic(a: anytype) void {}
|
||||
\\comptime {
|
||||
\\ _ = @TypeOf(generic).ReturnType;
|
||||
\\}
|
||||
, &[_][]const u8{
|
||||
"tmp.zig:3:25: error: ReturnType has not been resolved because 'fn(anytype) anytype' is generic",
|
||||
});
|
||||
|
||||
cases.add("unsupported modifier at start of asm output constraint",
|
||||
\\export fn foo() void {
|
||||
\\ var bar: u32 = 3;
|
||||
|
@ -5,7 +5,7 @@ const builtin = @import("builtin");
|
||||
var foo: u8 align(4) = 100;
|
||||
|
||||
test "global variable alignment" {
|
||||
comptime expect(@TypeOf(&foo).alignment == 4);
|
||||
comptime expect(@typeInfo(@TypeOf(&foo)).Pointer.alignment == 4);
|
||||
comptime expect(@TypeOf(&foo) == *align(4) u8);
|
||||
{
|
||||
const slice = @as(*[1]u8, &foo)[0..];
|
||||
|
@ -136,16 +136,6 @@ test "array literal with specified size" {
|
||||
expect(array[1] == 2);
|
||||
}
|
||||
|
||||
test "array child property" {
|
||||
var x: [5]i32 = undefined;
|
||||
expect(@TypeOf(x).Child == i32);
|
||||
}
|
||||
|
||||
test "array len property" {
|
||||
var x: [5]i32 = undefined;
|
||||
expect(@TypeOf(x).len == 5);
|
||||
}
|
||||
|
||||
test "array len field" {
|
||||
var arr = [4]u8{ 0, 0, 0, 0 };
|
||||
var ptr = &arr;
|
||||
|
@ -331,7 +331,7 @@ test "async fn with inferred error set" {
|
||||
fn doTheTest() void {
|
||||
var frame: [1]@Frame(middle) = undefined;
|
||||
var fn_ptr = middle;
|
||||
var result: @TypeOf(fn_ptr).ReturnType.ErrorSet!void = undefined;
|
||||
var result: @typeInfo(@typeInfo(@TypeOf(fn_ptr)).Fn.return_type.?).ErrorUnion.error_set!void = undefined;
|
||||
_ = @asyncCall(std.mem.sliceAsBytes(frame[0..]), &result, fn_ptr, .{});
|
||||
resume global_frame;
|
||||
std.testing.expectError(error.Fail, result);
|
||||
@ -950,7 +950,7 @@ test "@asyncCall with comptime-known function, but not awaited directly" {
|
||||
|
||||
fn doTheTest() void {
|
||||
var frame: [1]@Frame(middle) = undefined;
|
||||
var result: @TypeOf(middle).ReturnType.ErrorSet!void = undefined;
|
||||
var result: @typeInfo(@typeInfo(@TypeOf(middle)).Fn.return_type.?).ErrorUnion.error_set!void = undefined;
|
||||
_ = @asyncCall(std.mem.sliceAsBytes(frame[0..]), &result, middle, .{});
|
||||
resume global_frame;
|
||||
std.testing.expectError(error.Fail, result);
|
||||
@ -1018,7 +1018,7 @@ test "@TypeOf an async function call of generic fn with error union type" {
|
||||
const S = struct {
|
||||
fn func(comptime x: anytype) anyerror!i32 {
|
||||
const T = @TypeOf(async func(x));
|
||||
comptime expect(T == @TypeOf(@frame()).Child);
|
||||
comptime expect(T == @typeInfo(@TypeOf(@frame())).Pointer.child);
|
||||
return undefined;
|
||||
}
|
||||
};
|
||||
|
@ -2,16 +2,18 @@ const std = @import("std");
|
||||
const expect = std.testing.expect;
|
||||
|
||||
fn ShardedTable(comptime Key: type, comptime mask_bit_count: comptime_int, comptime V: type) type {
|
||||
expect(Key == std.meta.Int(false, Key.bit_count));
|
||||
expect(Key.bit_count >= mask_bit_count);
|
||||
const key_bits = @typeInfo(Key).Int.bits;
|
||||
expect(Key == std.meta.Int(false, key_bits));
|
||||
expect(key_bits >= mask_bit_count);
|
||||
const shard_key_bits = mask_bit_count;
|
||||
const ShardKey = std.meta.Int(false, mask_bit_count);
|
||||
const shift_amount = Key.bit_count - ShardKey.bit_count;
|
||||
const shift_amount = key_bits - shard_key_bits;
|
||||
return struct {
|
||||
const Self = @This();
|
||||
shards: [1 << ShardKey.bit_count]?*Node,
|
||||
shards: [1 << shard_key_bits]?*Node,
|
||||
|
||||
pub fn create() Self {
|
||||
return Self{ .shards = [_]?*Node{null} ** (1 << ShardKey.bit_count) };
|
||||
return Self{ .shards = [_]?*Node{null} ** (1 << shard_key_bits) };
|
||||
}
|
||||
|
||||
fn getShardKey(key: Key) ShardKey {
|
||||
|
@ -3,8 +3,8 @@ const io = @import("std").io;
|
||||
pub fn write(_: void, bytes: []const u8) !usize {
|
||||
return 0;
|
||||
}
|
||||
pub fn outStream() io.OutStream(void, @TypeOf(write).ReturnType.ErrorSet, write) {
|
||||
return io.OutStream(void, @TypeOf(write).ReturnType.ErrorSet, write){ .context = {} };
|
||||
pub fn outStream() io.OutStream(void, @typeInfo(@typeInfo(@TypeOf(write)).Fn.return_type.?).ErrorUnion.error_set, write) {
|
||||
return io.OutStream(void, @typeInfo(@typeInfo(@TypeOf(write)).Fn.return_type.?).ErrorUnion.error_set, write){ .context = {} };
|
||||
}
|
||||
|
||||
test "crash" {
|
||||
|
@ -84,8 +84,8 @@ fn testErrorUnionType() void {
|
||||
const x: anyerror!i32 = 1234;
|
||||
if (x) |value| expect(value == 1234) else |_| unreachable;
|
||||
expect(@typeInfo(@TypeOf(x)) == .ErrorUnion);
|
||||
expect(@typeInfo(@TypeOf(x).ErrorSet) == .ErrorSet);
|
||||
expect(@TypeOf(x).ErrorSet == anyerror);
|
||||
expect(@typeInfo(@typeInfo(@TypeOf(x)).ErrorUnion.error_set) == .ErrorSet);
|
||||
expect(@typeInfo(@TypeOf(x)).ErrorUnion.error_set == anyerror);
|
||||
}
|
||||
|
||||
test "error set type" {
|
||||
|
@ -24,12 +24,6 @@ test "call disabled extern fn" {
|
||||
disabledExternFn();
|
||||
}
|
||||
|
||||
test "floating point primitive bit counts" {
|
||||
expect(f16.bit_count == 16);
|
||||
expect(f32.bit_count == 32);
|
||||
expect(f64.bit_count == 64);
|
||||
}
|
||||
|
||||
test "short circuit" {
|
||||
testShortCircuit(false, true);
|
||||
comptime testShortCircuit(false, true);
|
||||
@ -577,10 +571,6 @@ test "slice string literal has correct type" {
|
||||
comptime expect(@TypeOf(array[runtime_zero..]) == []const i32);
|
||||
}
|
||||
|
||||
test "pointer child field" {
|
||||
expect((*u32).Child == u32);
|
||||
}
|
||||
|
||||
test "struct inside function" {
|
||||
testStructInFn();
|
||||
comptime testStructInFn();
|
||||
|
@ -2,23 +2,15 @@ const expect = @import("std").testing.expect;
|
||||
const mem = @import("std").mem;
|
||||
const reflection = @This();
|
||||
|
||||
test "reflection: array, pointer, optional, error union type child" {
|
||||
comptime {
|
||||
expect(([10]u8).Child == u8);
|
||||
expect((*u8).Child == u8);
|
||||
expect((anyerror!u8).Payload == u8);
|
||||
expect((?u8).Child == u8);
|
||||
}
|
||||
}
|
||||
|
||||
test "reflection: function return type, var args, and param types" {
|
||||
comptime {
|
||||
expect(@TypeOf(dummy).ReturnType == i32);
|
||||
expect(!@TypeOf(dummy).is_var_args);
|
||||
expect(@TypeOf(dummy).arg_count == 3);
|
||||
expect(@typeInfo(@TypeOf(dummy)).Fn.args[0].arg_type.? == bool);
|
||||
expect(@typeInfo(@TypeOf(dummy)).Fn.args[1].arg_type.? == i32);
|
||||
expect(@typeInfo(@TypeOf(dummy)).Fn.args[2].arg_type.? == f32);
|
||||
const info = @typeInfo(@TypeOf(dummy)).Fn;
|
||||
expect(info.return_type.? == i32);
|
||||
expect(!info.is_var_args);
|
||||
expect(info.args.len == 3);
|
||||
expect(info.args[0].arg_type.? == bool);
|
||||
expect(info.args[1].arg_type.? == i32);
|
||||
expect(info.args[2].arg_type.? == f32);
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user