ran zig fmt on stdlib

This commit is contained in:
emekoi 2019-05-19 15:26:43 -05:00 committed by Andrew Kelley
parent 6672ee9eb3
commit 1c73c08298
8 changed files with 69 additions and 147 deletions

View File

@ -1490,14 +1490,16 @@ fn parseFormValueTargetAddrSize(in_stream: var) !u64 {
}
fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, size: i32) !FormValue {
return FormValue{ .Ref = switch (size) {
1 => try in_stream.readIntLittle(u8),
2 => try in_stream.readIntLittle(u16),
4 => try in_stream.readIntLittle(u32),
8 => try in_stream.readIntLittle(u64),
-1 => try leb.readULEB128(u64, in_stream),
else => unreachable,
} };
return FormValue{
.Ref = switch (size) {
1 => try in_stream.readIntLittle(u8),
2 => try in_stream.readIntLittle(u16),
4 => try in_stream.readIntLittle(u32),
8 => try in_stream.readIntLittle(u64),
-1 => try leb.readULEB128(u64, in_stream),
else => unreachable,
},
};
}
fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, is_64: bool) anyerror!FormValue {

View File

@ -1116,10 +1116,12 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
pub const Stream = InStream(Error);
pub fn init(in_stream: *Stream) Self {
return Self{ .in_stream = switch (packing) {
.Bit => BitInStream(endian, Stream.Error).init(in_stream),
.Byte => in_stream,
} };
return Self{
.in_stream = switch (packing) {
.Bit => BitInStream(endian, Stream.Error).init(in_stream),
.Byte => in_stream,
},
};
}
pub fn alignToByte(self: *Self) void {
@ -1325,10 +1327,12 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
pub const Stream = OutStream(Error);
pub fn init(out_stream: *Stream) Self {
return Self{ .out_stream = switch (packing) {
.Bit => BitOutStream(endian, Stream.Error).init(out_stream),
.Byte => out_stream,
} };
return Self{
.out_stream = switch (packing) {
.Bit => BitOutStream(endian, Stream.Error).init(out_stream),
.Byte => out_stream,
},
};
}
/// Flushes any unwritten bits to the stream

View File

@ -114,7 +114,7 @@ pub fn setThreadPointer(addr: usize) void {
.aarch64 => {
asm volatile (
\\ msr tpidr_el0, %[addr]
:
:
: [addr] "r" (addr)
);
},

View File

@ -8,7 +8,10 @@ const assert = std.debug.assert;
var argc_ptr: [*]usize = undefined;
const is_wasm = switch (builtin.arch) { .wasm32, .wasm64 => true, else => false};
const is_wasm = switch (builtin.arch) {
.wasm32, .wasm64 => true,
else => false,
};
comptime {
if (builtin.link_libc) {

View File

@ -8,8 +8,14 @@ const std = @import("std");
const builtin = @import("builtin");
const maxInt = std.math.maxInt;
const is_wasm = switch (builtin.arch) { .wasm32, .wasm64 => true, else => false};
const is_freestanding = switch (builtin.os) { .freestanding => true, else => false };
const is_wasm = switch (builtin.arch) {
.wasm32, .wasm64 => true,
else => false,
};
const is_freestanding = switch (builtin.os) {
.freestanding => true,
else => false,
};
comptime {
if (is_freestanding and is_wasm and builtin.link_libc) {
@export("_start", wasm_start, .Strong);

View File

@ -76,7 +76,7 @@ fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: @IntType(false, @t
// renormalize the significand and clear the leading bit, then insert
// the correct adjusted exponent in the destination type.
const scale: u32 = @clz(src_rep_t, aAbs) -
@clz(src_rep_t, src_rep_t(srcMinNormal));
@clz(src_rep_t, src_rep_t(srcMinNormal));
absResult = dst_rep_t(aAbs) << @intCast(DstShift, dstSigBits - srcSigBits + scale);
absResult ^= dstMinNormal;
const resultExponent: u32 = dstExpBias - srcExpBias - scale + 1;

View File

@ -10,70 +10,48 @@ pub const CallgrindClientRequest = extern enum {
StopInstrumentation,
};
fn doCallgrindClientRequestExpr(default: usize, request: CallgrindClientRequest,
a1: usize, a2: usize, a3: usize, a4: usize, a5: usize
) usize
{
return valgrind.doClientRequest(
default,
@intCast(usize, @enumToInt(request)),
a1, a2, a3, a4, a5);
fn doCallgrindClientRequestExpr(default: usize, request: CallgrindClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) usize {
return valgrind.doClientRequest(default, @intCast(usize, @enumToInt(request)), a1, a2, a3, a4, a5);
}
fn doCallgrindClientRequestStmt(request: CallgrindClientRequest,
a1: usize, a2: usize, a3: usize, a4: usize, a5: usize
) void
{
fn doCallgrindClientRequestStmt(request: CallgrindClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) void {
_ = doCallgrindClientRequestExpr(0, request, a1, a2, a3, a4, a5);
}
/// Dump current state of cost centers, and zero them afterwards
pub fn dumpStats() void {
doCallgrindClientRequestStmt(CallgrindClientRequest.DumpStats,
0, 0, 0, 0, 0);
doCallgrindClientRequestStmt(CallgrindClientRequest.DumpStats, 0, 0, 0, 0, 0);
}
/// Dump current state of cost centers, and zero them afterwards.
/// The argument is appended to a string stating the reason which triggered
/// the dump. This string is written as a description field into the
/// profile data dump.
pub fn dumpStatsAt(pos_str: [*]u8) void {
doCallgrindClientRequestStmt(CallgrindClientRequest.DumpStatsAt,
@ptrToInt(pos_str),
0, 0, 0, 0);
doCallgrindClientRequestStmt(CallgrindClientRequest.DumpStatsAt, @ptrToInt(pos_str), 0, 0, 0, 0);
}
/// Zero cost centers
pub fn zeroStats() void {
doCallgrindClientRequestStmt(CallgrindClientRequest.ZeroStats,
0, 0, 0, 0, 0);
doCallgrindClientRequestStmt(CallgrindClientRequest.ZeroStats, 0, 0, 0, 0, 0);
}
/// Toggles collection state.
/// The collection state specifies whether the happening of events
/// should be noted or if they are to be ignored. Events are noted
/// by increment of counters in a cost center
pub fn toggleCollect() void {
doCallgrindClientRequestStmt(CallgrindClientRequest.ToggleCollect,
0, 0, 0, 0, 0);
doCallgrindClientRequestStmt(CallgrindClientRequest.ToggleCollect, 0, 0, 0, 0, 0);
}
/// Start full callgrind instrumentation if not already switched on.
/// When cache simulation is done, it will flush the simulated cache;
/// this will lead to an artificial cache warmup phase afterwards with
/// cache misses which would not have happened in reality.
pub fn startInstrumentation() void {
doCallgrindClientRequestStmt(CallgrindClientRequest.StartInstrumentation,
0, 0, 0, 0, 0);
doCallgrindClientRequestStmt(CallgrindClientRequest.StartInstrumentation, 0, 0, 0, 0, 0);
}
/// Stop full callgrind instrumentation if not already switched off.
/// This flushes Valgrinds translation cache, and does no additional
/// instrumentation afterwards, which effectivly will run at the same
@ -82,6 +60,5 @@ pub fn startInstrumentation() void {
/// To start Callgrind in this mode to ignore the setup phase, use
/// the option "--instr-atstart=no".
pub fn stopInstrumentation() void {
doCallgrindClientRequestStmt(CallgrindClientRequest.StopInstrumentation,
0, 0, 0, 0, 0);
doCallgrindClientRequestStmt(CallgrindClientRequest.StopInstrumentation, 0, 0, 0, 0, 0);
}

View File

@ -19,149 +19,102 @@ pub const MemCheckClientRequest = extern enum {
DisableAddrErrorReportingInRange,
};
fn doMemCheckClientRequestExpr(default: usize, request: MemCheckClientRequest,
a1: usize, a2: usize, a3: usize, a4: usize, a5: usize
) usize
{
return valgrind.doClientRequest(
default,
@intCast(usize, @enumToInt(request)),
a1, a2, a3, a4, a5);
fn doMemCheckClientRequestExpr(default: usize, request: MemCheckClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) usize {
return valgrind.doClientRequest(default, @intCast(usize, @enumToInt(request)), a1, a2, a3, a4, a5);
}
fn doMemCheckClientRequestStmt(request: MemCheckClientRequest,
a1: usize, a2: usize, a3: usize, a4: usize, a5: usize
) void
{
fn doMemCheckClientRequestStmt(request: MemCheckClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) void {
_ = doMemCheckClientRequestExpr(0, request, a1, a2, a3, a4, a5);
}
/// Mark memory at qzz.ptr as unaddressable for qzz.len bytes.
/// This returns -1 when run on Valgrind and 0 otherwise.
pub fn makeMemNoAccess(qzz: []u8) i1 {
return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
MemCheckClientRequest.MakeMemNoAccess,
@ptrToInt(qzz.ptr), qzz.len,
0, 0, 0));
MemCheckClientRequest.MakeMemNoAccess, @ptrToInt(qzz.ptr), qzz.len, 0, 0, 0));
}
/// Similarly, mark memory at qzz.ptr as addressable but undefined
/// for qzz.len bytes.
/// This returns -1 when run on Valgrind and 0 otherwise.
pub fn makeMemUndefined(qzz: []u8) i1 {
return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
MemCheckClientRequest.MakeMemUndefined,
@ptrToInt(qzz.ptr), qzz.len,
0, 0, 0));
MemCheckClientRequest.MakeMemUndefined, @ptrToInt(qzz.ptr), qzz.len, 0, 0, 0));
}
/// Similarly, mark memory at qzz.ptr as addressable and defined
/// for qzz.len bytes.
pub fn makeMemDefined(qzz: []u8) i1 {
// This returns -1 when run on Valgrind and 0 otherwise.
// This returns -1 when run on Valgrind and 0 otherwise.
return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
MemCheckClientRequest.MakeMemDefined,
@ptrToInt(qzz.ptr), qzz.len,
0, 0, 0));
MemCheckClientRequest.MakeMemDefined, @ptrToInt(qzz.ptr), qzz.len, 0, 0, 0));
}
/// Similar to makeMemDefined except that addressability is
/// not altered: bytes which are addressable are marked as defined,
/// but those which are not addressable are left unchanged.
/// This returns -1 when run on Valgrind and 0 otherwise.
pub fn makeMemDefinedIfAddressable(qzz: []u8) i1 {
return @intCast(i1, doMemCheckClientRequestExpr(0, // default return
MemCheckClientRequest.MakeMemDefinedIfAddressable,
@ptrToInt(qzz.ptr), qzz.len,
0, 0, 0));
MemCheckClientRequest.MakeMemDefinedIfAddressable, @ptrToInt(qzz.ptr), qzz.len, 0, 0, 0));
}
/// Create a block-description handle. The description is an ascii
/// string which is included in any messages pertaining to addresses
/// within the specified memory range. Has no other effect on the
/// properties of the memory range.
pub fn createBlock(qzz: []u8, desc: [*]u8) usize {
return doMemCheckClientRequestExpr(0, // default return
MemCheckClientRequest.CreateBlock,
@ptrToInt(qzz.ptr), qzz.len, @ptrToInt(desc),
0, 0);
MemCheckClientRequest.CreateBlock, @ptrToInt(qzz.ptr), qzz.len, @ptrToInt(desc), 0, 0);
}
/// Discard a block-description-handle. Returns 1 for an
/// invalid handle, 0 for a valid handle.
pub fn discard(blkindex) bool {
return doMemCheckClientRequestExpr(0, // default return
MemCheckClientRequest.Discard,
0, blkindex,
0, 0, 0) != 0;
MemCheckClientRequest.Discard, 0, blkindex, 0, 0, 0) != 0;
}
/// Check that memory at qzz.ptr is addressable for qzz.len bytes.
/// If suitable addressibility is not established, Valgrind prints an
/// error message and returns the address of the first offending byte.
/// Otherwise it returns zero.
pub fn checkMemIsAddressable(qzz: []u8) usize {
return doMemCheckClientRequestExpr(0,
MemCheckClientRequest.CheckMemIsAddressable,
@ptrToInt(qzz.ptr), qzz.len,
0, 0, 0);
return doMemCheckClientRequestExpr(0, MemCheckClientRequest.CheckMemIsAddressable, @ptrToInt(qzz.ptr), qzz.len, 0, 0, 0);
}
/// Check that memory at qzz.ptr is addressable and defined for
/// qzz.len bytes. If suitable addressibility and definedness are not
/// established, Valgrind prints an error message and returns the
/// address of the first offending byte. Otherwise it returns zero.
pub fn checkMemIsDefined(qzz: []u8) usize {
return doMemCheckClientRequestExpr(0,
MemCheckClientRequest.CheckMemIsDefined,
@ptrToInt(qzz.ptr), qzz.len,
0, 0, 0);
return doMemCheckClientRequestExpr(0, MemCheckClientRequest.CheckMemIsDefined, @ptrToInt(qzz.ptr), qzz.len, 0, 0, 0);
}
/// Do a full memory leak check (like --leak-check=full) mid-execution.
pub fn doLeakCheck() void {
doMemCheckClientRequestStmt(MemCheckClientRequest.DO_LEAK_CHECK,
0, 0,
0, 0, 0);
doMemCheckClientRequestStmt(MemCheckClientRequest.DO_LEAK_CHECK, 0, 0, 0, 0, 0);
}
/// Same as doLeakCheck() but only showing the entries for
/// which there was an increase in leaked bytes or leaked nr of blocks
/// since the previous leak search.
pub fn doAddedLeakCheck() void {
doMemCheckClientRequestStmt(MemCheckClientRequest.DO_LEAK_CHECK,
0, 1,
0, 0, 0);
doMemCheckClientRequestStmt(MemCheckClientRequest.DO_LEAK_CHECK, 0, 1, 0, 0, 0);
}
/// Same as doAddedLeakCheck() but showing entries with
/// increased or decreased leaked bytes/blocks since previous leak
/// search.
pub fn doChangedLeakCheck() void {
doMemCheckClientRequestStmt(MemCheckClientRequest.DO_LEAK_CHECK,
0, 2,
0, 0, 0);
doMemCheckClientRequestStmt(MemCheckClientRequest.DO_LEAK_CHECK, 0, 2, 0, 0, 0);
}
/// Do a summary memory leak check (like --leak-check=summary) mid-execution.
pub fn doQuickLeakCheck() void {
doMemCheckClientRequestStmt(MemCheckClientRequest.DO_LEAK_CHECK,
1, 0,
0, 0, 0);
doMemCheckClientRequestStmt(MemCheckClientRequest.DO_LEAK_CHECK, 1, 0, 0, 0, 0);
}
/// Return number of leaked, dubious, reachable and suppressed bytes found by
/// all previous leak checks.
const CountResult = struct {
@ -172,34 +125,27 @@ const CountResult = struct {
};
pub fn countLeaks() CountResult {
var res = CountResult {
var res = CountResult{
.leaked = 0,
.dubious = 0,
.reachable = 0,
.suppressed = 0,
};
doMemCheckClientRequestStmt(MemCheckClientRequest.CountLeaks,
&res.leaked, &res.dubious,
&res.reachable, &res.suppressed,
0);
doMemCheckClientRequestStmt(MemCheckClientRequest.CountLeaks, &res.leaked, &res.dubious, &res.reachable, &res.suppressed, 0);
return res;
}
pub fn countLeakBlocks() CountResult {
var res = CountResult {
var res = CountResult{
.leaked = 0,
.dubious = 0,
.reachable = 0,
.suppressed = 0,
};
doMemCheckClientRequestStmt(MemCheckClientRequest.CountLeakBlocks,
&res.leaked, &res.dubious,
&res.reachable, &res.suppressed,
0);
doMemCheckClientRequestStmt(MemCheckClientRequest.CountLeakBlocks, &res.leaked, &res.dubious, &res.reachable, &res.suppressed, 0);
return res;
}
/// Get the validity data for addresses zza and copy it
/// into the provided zzvbits array. Return values:
/// 0 if not running on valgrind
@ -210,15 +156,9 @@ pub fn countLeakBlocks() CountResult {
/// impossible to segfault your system by using this call.
pub fn getVbits(zza: []u8, zzvbits: []u8) u2 {
std.debug.assert(zzvbits.len >= zza.len / 8);
return @intCast(u2, doMemCheckClientRequestExpr(0,
MemCheckClientRequest.GetVbits,
@ptrToInt(zza.ptr),
@ptrToInt(zzvbits),
zza.len,
0, 0));
return @intCast(u2, doMemCheckClientRequestExpr(0, MemCheckClientRequest.GetVbits, @ptrToInt(zza.ptr), @ptrToInt(zzvbits), zza.len, 0, 0));
}
/// Set the validity data for addresses zza, copying it
/// from the provided zzvbits array. Return values:
/// 0 if not running on valgrind
@ -229,27 +169,17 @@ pub fn getVbits(zza: []u8, zzvbits: []u8) u2 {
/// impossible to segfault your system by using this call.
pub fn setVbits(zzvbits: []u8, zza: []u8) u2 {
std.debug.assert(zzvbits.len >= zza.len / 8);
return @intCast(u2, doMemCheckClientRequestExpr(0,
MemCheckClientRequest.SetVbits,
@ptrToInt(zza.ptr),
@ptrToInt(zzvbits),
zza.len,
0, 0));
return @intCast(u2, doMemCheckClientRequestExpr(0, MemCheckClientRequest.SetVbits, @ptrToInt(zza.ptr), @ptrToInt(zzvbits), zza.len, 0, 0));
}
/// Disable and re-enable reporting of addressing errors in the
/// specified address range.
pub fn disableAddrErrorReportingInRange(qzz: []u8) usize {
return doMemCheckClientRequestExpr(0, // default return
MemCheckClientRequest.DisableAddrErrorReportingInRange,
@ptrToInt(qzz.ptr), qzz.len,
0, 0, 0);
MemCheckClientRequest.DisableAddrErrorReportingInRange, @ptrToInt(qzz.ptr), qzz.len, 0, 0, 0);
}
pub fn enableAddrErrorReportingInRange(qzz: []u8) usize {
return doMemCheckClientRequestExpr(0, // default return
MemCheckClientRequest.EnableAddrErrorReportingInRange,
@ptrToInt(qzz.ptr), qzz.len,
0, 0, 0);
MemCheckClientRequest.EnableAddrErrorReportingInRange, @ptrToInt(qzz.ptr), qzz.len, 0, 0, 0);
}