mirror of
https://github.com/ziglang/zig.git
synced 2024-11-14 16:13:24 +00:00
std.builtin: make atomic order fields lowercase
This commit is contained in:
parent
c260b4c753
commit
6067d39522
@ -892,10 +892,10 @@ fn workerMakeOneStep(
|
||||
// then we return without doing the step, relying on another worker to
|
||||
// queue this step up again when dependencies are met.
|
||||
for (s.dependencies.items) |dep| {
|
||||
switch (@atomicLoad(Step.State, &dep.state, .SeqCst)) {
|
||||
switch (@atomicLoad(Step.State, &dep.state, .seq_cst)) {
|
||||
.success, .skipped => continue,
|
||||
.failure, .dependency_failure, .skipped_oom => {
|
||||
@atomicStore(Step.State, &s.state, .dependency_failure, .SeqCst);
|
||||
@atomicStore(Step.State, &s.state, .dependency_failure, .seq_cst);
|
||||
return;
|
||||
},
|
||||
.precheck_done, .running => {
|
||||
@ -929,7 +929,7 @@ fn workerMakeOneStep(
|
||||
s.state = .running;
|
||||
} else {
|
||||
// Avoid running steps twice.
|
||||
if (@cmpxchgStrong(Step.State, &s.state, .precheck_done, .running, .SeqCst, .SeqCst) != null) {
|
||||
if (@cmpxchgStrong(Step.State, &s.state, .precheck_done, .running, .seq_cst, .seq_cst) != null) {
|
||||
// Another worker got the job.
|
||||
return;
|
||||
}
|
||||
@ -956,13 +956,13 @@ fn workerMakeOneStep(
|
||||
|
||||
handle_result: {
|
||||
if (make_result) |_| {
|
||||
@atomicStore(Step.State, &s.state, .success, .SeqCst);
|
||||
@atomicStore(Step.State, &s.state, .success, .seq_cst);
|
||||
} else |err| switch (err) {
|
||||
error.MakeFailed => {
|
||||
@atomicStore(Step.State, &s.state, .failure, .SeqCst);
|
||||
@atomicStore(Step.State, &s.state, .failure, .seq_cst);
|
||||
break :handle_result;
|
||||
},
|
||||
error.MakeSkipped => @atomicStore(Step.State, &s.state, .skipped, .SeqCst),
|
||||
error.MakeSkipped => @atomicStore(Step.State, &s.state, .skipped, .seq_cst),
|
||||
}
|
||||
|
||||
// Successful completion of a step, so we queue up its dependants as well.
|
||||
|
@ -74,7 +74,7 @@ const SpinlockTable = struct {
|
||||
: "memory"
|
||||
);
|
||||
} else flag: {
|
||||
break :flag @atomicRmw(@TypeOf(self.v), &self.v, .Xchg, .Locked, .Acquire);
|
||||
break :flag @atomicRmw(@TypeOf(self.v), &self.v, .Xchg, .Locked, .acquire);
|
||||
};
|
||||
|
||||
switch (flag) {
|
||||
@ -91,7 +91,7 @@ const SpinlockTable = struct {
|
||||
: "memory"
|
||||
);
|
||||
} else {
|
||||
@atomicStore(@TypeOf(self.v), &self.v, .Unlocked, .Release);
|
||||
@atomicStore(@TypeOf(self.v), &self.v, .Unlocked, .release);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -172,7 +172,7 @@ inline fn atomic_load_N(comptime T: type, src: *T, model: i32) T {
|
||||
defer sl.release();
|
||||
return src.*;
|
||||
} else {
|
||||
return @atomicLoad(T, src, .SeqCst);
|
||||
return @atomicLoad(T, src, .seq_cst);
|
||||
}
|
||||
}
|
||||
|
||||
@ -203,7 +203,7 @@ inline fn atomic_store_N(comptime T: type, dst: *T, value: T, model: i32) void {
|
||||
defer sl.release();
|
||||
dst.* = value;
|
||||
} else {
|
||||
@atomicStore(T, dst, value, .SeqCst);
|
||||
@atomicStore(T, dst, value, .seq_cst);
|
||||
}
|
||||
}
|
||||
|
||||
@ -239,12 +239,12 @@ fn wideUpdate(comptime T: type, ptr: *T, val: T, update: anytype) T {
|
||||
|
||||
const mask = @as(WideAtomic, std.math.maxInt(T)) << inner_shift;
|
||||
|
||||
var wide_old = @atomicLoad(WideAtomic, wide_ptr, .SeqCst);
|
||||
var wide_old = @atomicLoad(WideAtomic, wide_ptr, .seq_cst);
|
||||
while (true) {
|
||||
const old = @as(T, @truncate((wide_old & mask) >> inner_shift));
|
||||
const new = update(val, old);
|
||||
const wide_new = wide_old & ~mask | (@as(WideAtomic, new) << inner_shift);
|
||||
if (@cmpxchgWeak(WideAtomic, wide_ptr, wide_old, wide_new, .SeqCst, .SeqCst)) |new_wide_old| {
|
||||
if (@cmpxchgWeak(WideAtomic, wide_ptr, wide_old, wide_new, .seq_cst, .seq_cst)) |new_wide_old| {
|
||||
wide_old = new_wide_old;
|
||||
} else {
|
||||
return old;
|
||||
@ -270,7 +270,7 @@ inline fn atomic_exchange_N(comptime T: type, ptr: *T, val: T, model: i32) T {
|
||||
};
|
||||
return wideUpdate(T, ptr, val, Updater.update);
|
||||
} else {
|
||||
return @atomicRmw(T, ptr, .Xchg, val, .SeqCst);
|
||||
return @atomicRmw(T, ptr, .Xchg, val, .seq_cst);
|
||||
}
|
||||
}
|
||||
|
||||
@ -315,7 +315,7 @@ inline fn atomic_compare_exchange_N(
|
||||
expected.* = value;
|
||||
return 0;
|
||||
} else {
|
||||
if (@cmpxchgStrong(T, ptr, expected.*, desired, .SeqCst, .SeqCst)) |old_value| {
|
||||
if (@cmpxchgStrong(T, ptr, expected.*, desired, .seq_cst, .seq_cst)) |old_value| {
|
||||
expected.* = old_value;
|
||||
return 0;
|
||||
}
|
||||
@ -373,7 +373,7 @@ inline fn fetch_op_N(comptime T: type, comptime op: std.builtin.AtomicRmwOp, ptr
|
||||
return wideUpdate(T, ptr, val, Updater.update);
|
||||
}
|
||||
|
||||
return @atomicRmw(T, ptr, op, val, .SeqCst);
|
||||
return @atomicRmw(T, ptr, op, val, .seq_cst);
|
||||
}
|
||||
|
||||
fn __atomic_fetch_add_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
|
||||
|
@ -95,9 +95,9 @@ pub const Node = struct {
|
||||
/// This is the same as calling `start` and then `end` on the returned `Node`. Thread-safe.
|
||||
pub fn completeOne(self: *Node) void {
|
||||
if (self.parent) |parent| {
|
||||
@atomicStore(?*Node, &parent.recently_updated_child, self, .Release);
|
||||
@atomicStore(?*Node, &parent.recently_updated_child, self, .release);
|
||||
}
|
||||
_ = @atomicRmw(usize, &self.unprotected_completed_items, .Add, 1, .Monotonic);
|
||||
_ = @atomicRmw(usize, &self.unprotected_completed_items, .Add, 1, .monotonic);
|
||||
self.context.maybeRefresh();
|
||||
}
|
||||
|
||||
@ -108,7 +108,7 @@ pub const Node = struct {
|
||||
{
|
||||
self.context.update_mutex.lock();
|
||||
defer self.context.update_mutex.unlock();
|
||||
_ = @cmpxchgStrong(?*Node, &parent.recently_updated_child, self, null, .Monotonic, .Monotonic);
|
||||
_ = @cmpxchgStrong(?*Node, &parent.recently_updated_child, self, null, .monotonic, .monotonic);
|
||||
}
|
||||
parent.completeOne();
|
||||
} else {
|
||||
@ -122,7 +122,7 @@ pub const Node = struct {
|
||||
/// Tell the parent node that this node is actively being worked on. Thread-safe.
|
||||
pub fn activate(self: *Node) void {
|
||||
if (self.parent) |parent| {
|
||||
@atomicStore(?*Node, &parent.recently_updated_child, self, .Release);
|
||||
@atomicStore(?*Node, &parent.recently_updated_child, self, .release);
|
||||
self.context.maybeRefresh();
|
||||
}
|
||||
}
|
||||
@ -134,9 +134,9 @@ pub const Node = struct {
|
||||
defer progress.update_mutex.unlock();
|
||||
self.name = name;
|
||||
if (self.parent) |parent| {
|
||||
@atomicStore(?*Node, &parent.recently_updated_child, self, .Release);
|
||||
@atomicStore(?*Node, &parent.recently_updated_child, self, .release);
|
||||
if (parent.parent) |grand_parent| {
|
||||
@atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .Release);
|
||||
@atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .release);
|
||||
}
|
||||
if (progress.timer) |*timer| progress.maybeRefreshWithHeldLock(timer);
|
||||
}
|
||||
@ -149,9 +149,9 @@ pub const Node = struct {
|
||||
defer progress.update_mutex.unlock();
|
||||
self.unit = unit;
|
||||
if (self.parent) |parent| {
|
||||
@atomicStore(?*Node, &parent.recently_updated_child, self, .Release);
|
||||
@atomicStore(?*Node, &parent.recently_updated_child, self, .release);
|
||||
if (parent.parent) |grand_parent| {
|
||||
@atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .Release);
|
||||
@atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .release);
|
||||
}
|
||||
if (progress.timer) |*timer| progress.maybeRefreshWithHeldLock(timer);
|
||||
}
|
||||
@ -159,12 +159,12 @@ pub const Node = struct {
|
||||
|
||||
/// Thread-safe. 0 means unknown.
|
||||
pub fn setEstimatedTotalItems(self: *Node, count: usize) void {
|
||||
@atomicStore(usize, &self.unprotected_estimated_total_items, count, .Monotonic);
|
||||
@atomicStore(usize, &self.unprotected_estimated_total_items, count, .monotonic);
|
||||
}
|
||||
|
||||
/// Thread-safe.
|
||||
pub fn setCompletedItems(self: *Node, completed_items: usize) void {
|
||||
@atomicStore(usize, &self.unprotected_completed_items, completed_items, .Monotonic);
|
||||
@atomicStore(usize, &self.unprotected_completed_items, completed_items, .monotonic);
|
||||
}
|
||||
};
|
||||
|
||||
@ -313,8 +313,8 @@ fn refreshWithHeldLock(self: *Progress) void {
|
||||
self.bufWrite(&end, "... ", .{});
|
||||
}
|
||||
need_ellipse = false;
|
||||
const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .Monotonic);
|
||||
const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .Monotonic);
|
||||
const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .monotonic);
|
||||
const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .monotonic);
|
||||
const current_item = completed_items + 1;
|
||||
if (node.name.len != 0 or eti > 0) {
|
||||
if (node.name.len != 0) {
|
||||
@ -331,7 +331,7 @@ fn refreshWithHeldLock(self: *Progress) void {
|
||||
need_ellipse = false;
|
||||
}
|
||||
}
|
||||
maybe_node = @atomicLoad(?*Node, &node.recently_updated_child, .Acquire);
|
||||
maybe_node = @atomicLoad(?*Node, &node.recently_updated_child, .acquire);
|
||||
}
|
||||
if (need_ellipse) {
|
||||
self.bufWrite(&end, "... ", .{});
|
||||
|
@ -510,7 +510,7 @@ const WindowsThreadImpl = struct {
|
||||
|
||||
fn entryFn(raw_ptr: windows.PVOID) callconv(.C) windows.DWORD {
|
||||
const self: *@This() = @ptrCast(@alignCast(raw_ptr));
|
||||
defer switch (self.thread.completion.swap(.completed, .SeqCst)) {
|
||||
defer switch (self.thread.completion.swap(.completed, .seq_cst)) {
|
||||
.running => {},
|
||||
.completed => unreachable,
|
||||
.detached => self.thread.free(),
|
||||
@ -563,7 +563,7 @@ const WindowsThreadImpl = struct {
|
||||
|
||||
fn detach(self: Impl) void {
|
||||
windows.CloseHandle(self.thread.thread_handle);
|
||||
switch (self.thread.completion.swap(.detached, .SeqCst)) {
|
||||
switch (self.thread.completion.swap(.detached, .seq_cst)) {
|
||||
.running => {},
|
||||
.completed => self.thread.free(),
|
||||
.detached => unreachable,
|
||||
@ -573,7 +573,7 @@ const WindowsThreadImpl = struct {
|
||||
fn join(self: Impl) void {
|
||||
windows.WaitForSingleObjectEx(self.thread.thread_handle, windows.INFINITE, false) catch unreachable;
|
||||
windows.CloseHandle(self.thread.thread_handle);
|
||||
assert(self.thread.completion.load(.SeqCst) == .completed);
|
||||
assert(self.thread.completion.load(.seq_cst) == .completed);
|
||||
self.thread.free();
|
||||
}
|
||||
};
|
||||
@ -780,11 +780,11 @@ const WasiThreadImpl = struct {
|
||||
}
|
||||
|
||||
fn getHandle(self: Impl) ThreadHandle {
|
||||
return self.thread.tid.load(.SeqCst);
|
||||
return self.thread.tid.load(.seq_cst);
|
||||
}
|
||||
|
||||
fn detach(self: Impl) void {
|
||||
switch (self.thread.state.swap(.detached, .SeqCst)) {
|
||||
switch (self.thread.state.swap(.detached, .seq_cst)) {
|
||||
.running => {},
|
||||
.completed => self.join(),
|
||||
.detached => unreachable,
|
||||
@ -801,7 +801,7 @@ const WasiThreadImpl = struct {
|
||||
|
||||
var spin: u8 = 10;
|
||||
while (true) {
|
||||
const tid = self.thread.tid.load(.SeqCst);
|
||||
const tid = self.thread.tid.load(.seq_cst);
|
||||
if (tid == 0) {
|
||||
break;
|
||||
}
|
||||
@ -901,7 +901,7 @@ const WasiThreadImpl = struct {
|
||||
if (tid < 0) {
|
||||
return error.SystemResources;
|
||||
}
|
||||
instance.thread.tid.store(tid, .SeqCst);
|
||||
instance.thread.tid.store(tid, .seq_cst);
|
||||
|
||||
return .{ .thread = &instance.thread };
|
||||
}
|
||||
@ -914,12 +914,12 @@ const WasiThreadImpl = struct {
|
||||
}
|
||||
__set_stack_pointer(arg.thread.memory.ptr + arg.stack_offset);
|
||||
__wasm_init_tls(arg.thread.memory.ptr + arg.tls_offset);
|
||||
@atomicStore(u32, &WasiThreadImpl.tls_thread_id, @intCast(tid), .SeqCst);
|
||||
@atomicStore(u32, &WasiThreadImpl.tls_thread_id, @intCast(tid), .seq_cst);
|
||||
|
||||
// Finished bootstrapping, call user's procedure.
|
||||
arg.call_back(arg.raw_ptr);
|
||||
|
||||
switch (arg.thread.state.swap(.completed, .SeqCst)) {
|
||||
switch (arg.thread.state.swap(.completed, .seq_cst)) {
|
||||
.running => {
|
||||
// reset the Thread ID
|
||||
asm volatile (
|
||||
@ -1191,7 +1191,7 @@ const LinuxThreadImpl = struct {
|
||||
|
||||
fn entryFn(raw_arg: usize) callconv(.C) u8 {
|
||||
const self = @as(*@This(), @ptrFromInt(raw_arg));
|
||||
defer switch (self.thread.completion.swap(.completed, .SeqCst)) {
|
||||
defer switch (self.thread.completion.swap(.completed, .seq_cst)) {
|
||||
.running => {},
|
||||
.completed => unreachable,
|
||||
.detached => self.thread.freeAndExit(),
|
||||
@ -1311,7 +1311,7 @@ const LinuxThreadImpl = struct {
|
||||
}
|
||||
|
||||
fn detach(self: Impl) void {
|
||||
switch (self.thread.completion.swap(.detached, .SeqCst)) {
|
||||
switch (self.thread.completion.swap(.detached, .seq_cst)) {
|
||||
.running => {},
|
||||
.completed => self.join(),
|
||||
.detached => unreachable,
|
||||
@ -1323,7 +1323,7 @@ const LinuxThreadImpl = struct {
|
||||
|
||||
var spin: u8 = 10;
|
||||
while (true) {
|
||||
const tid = self.thread.child_tid.load(.SeqCst);
|
||||
const tid = self.thread.child_tid.load(.seq_cst);
|
||||
if (tid == 0) {
|
||||
break;
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ const WindowsImpl = struct {
|
||||
|
||||
if (comptime builtin.mode == .Debug) {
|
||||
// The internal state of the DebugMutex needs to be handled here as well.
|
||||
mutex.impl.locking_thread.store(0, .Unordered);
|
||||
mutex.impl.locking_thread.store(0, .unordered);
|
||||
}
|
||||
const rc = os.windows.kernel32.SleepConditionVariableSRW(
|
||||
&self.condition,
|
||||
@ -173,7 +173,7 @@ const WindowsImpl = struct {
|
||||
);
|
||||
if (comptime builtin.mode == .Debug) {
|
||||
// The internal state of the DebugMutex needs to be handled here as well.
|
||||
mutex.impl.locking_thread.store(std.Thread.getCurrentId(), .Unordered);
|
||||
mutex.impl.locking_thread.store(std.Thread.getCurrentId(), .unordered);
|
||||
}
|
||||
|
||||
// Return error.Timeout if we know the timeout elapsed correctly.
|
||||
@ -212,8 +212,8 @@ const FutexImpl = struct {
|
||||
// - T1: s & signals == 0 -> FUTEX_WAIT(&epoch, e) (missed the state update + the epoch change)
|
||||
//
|
||||
// Acquire barrier to ensure the epoch load happens before the state load.
|
||||
var epoch = self.epoch.load(.Acquire);
|
||||
var state = self.state.fetchAdd(one_waiter, .Monotonic);
|
||||
var epoch = self.epoch.load(.acquire);
|
||||
var state = self.state.fetchAdd(one_waiter, .monotonic);
|
||||
assert(state & waiter_mask != waiter_mask);
|
||||
state += one_waiter;
|
||||
|
||||
@ -231,30 +231,30 @@ const FutexImpl = struct {
|
||||
// Acquire barrier ensures code before the wake() which added the signal happens before we decrement it and return.
|
||||
while (state & signal_mask != 0) {
|
||||
const new_state = state - one_waiter - one_signal;
|
||||
state = self.state.cmpxchgWeak(state, new_state, .Acquire, .Monotonic) orelse return;
|
||||
state = self.state.cmpxchgWeak(state, new_state, .acquire, .monotonic) orelse return;
|
||||
}
|
||||
|
||||
// Remove the waiter we added and officially return timed out.
|
||||
const new_state = state - one_waiter;
|
||||
state = self.state.cmpxchgWeak(state, new_state, .Monotonic, .Monotonic) orelse return err;
|
||||
state = self.state.cmpxchgWeak(state, new_state, .monotonic, .monotonic) orelse return err;
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
epoch = self.epoch.load(.Acquire);
|
||||
state = self.state.load(.Monotonic);
|
||||
epoch = self.epoch.load(.acquire);
|
||||
state = self.state.load(.monotonic);
|
||||
|
||||
// Try to wake up by consuming a signal and decremented the waiter we added previously.
|
||||
// Acquire barrier ensures code before the wake() which added the signal happens before we decrement it and return.
|
||||
while (state & signal_mask != 0) {
|
||||
const new_state = state - one_waiter - one_signal;
|
||||
state = self.state.cmpxchgWeak(state, new_state, .Acquire, .Monotonic) orelse return;
|
||||
state = self.state.cmpxchgWeak(state, new_state, .acquire, .monotonic) orelse return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn wake(self: *Impl, comptime notify: Notify) void {
|
||||
var state = self.state.load(.Monotonic);
|
||||
var state = self.state.load(.monotonic);
|
||||
while (true) {
|
||||
const waiters = (state & waiter_mask) / one_waiter;
|
||||
const signals = (state & signal_mask) / one_signal;
|
||||
@ -275,7 +275,7 @@ const FutexImpl = struct {
|
||||
// Reserve the amount of waiters to wake by incrementing the signals count.
|
||||
// Release barrier ensures code before the wake() happens before the signal it posted and consumed by the wait() threads.
|
||||
const new_state = state + (one_signal * to_wake);
|
||||
state = self.state.cmpxchgWeak(state, new_state, .Release, .Monotonic) orelse {
|
||||
state = self.state.cmpxchgWeak(state, new_state, .release, .monotonic) orelse {
|
||||
// Wake up the waiting threads we reserved above by changing the epoch value.
|
||||
// NOTE: a waiting thread could miss a wake up if *exactly* ((1<<32)-1) wake()s happen between it observing the epoch and sleeping on it.
|
||||
// This is very unlikely due to how many precise amount of Futex.wake() calls that would be between the waiting thread's potential preemption.
|
||||
@ -288,7 +288,7 @@ const FutexImpl = struct {
|
||||
// - T1: s = LOAD(&state)
|
||||
// - T2: UPDATE(&state, signal) + FUTEX_WAKE(&epoch)
|
||||
// - T1: s & signals == 0 -> FUTEX_WAIT(&epoch, e) (missed both epoch change and state change)
|
||||
_ = self.epoch.fetchAdd(1, .Release);
|
||||
_ = self.epoch.fetchAdd(1, .release);
|
||||
Futex.wake(&self.epoch, to_wake);
|
||||
return;
|
||||
};
|
||||
|
@ -40,7 +40,7 @@ pub fn timedWait(ptr: *const atomic.Value(u32), expect: u32, timeout_ns: u64) er
|
||||
|
||||
// Avoid calling into the OS for no-op timeouts.
|
||||
if (timeout_ns == 0) {
|
||||
if (ptr.load(.SeqCst) != expect) return;
|
||||
if (ptr.load(.seq_cst) != expect) return;
|
||||
return error.Timeout;
|
||||
}
|
||||
|
||||
@ -783,16 +783,16 @@ const PosixImpl = struct {
|
||||
// - T1: bumps pending waiters (was reordered after the ptr == expect check)
|
||||
// - T1: goes to sleep and misses both the ptr change and T2's wake up
|
||||
//
|
||||
// SeqCst as Acquire barrier to ensure the announcement happens before the ptr check below.
|
||||
// SeqCst as shared modification order to form a happens-before edge with the fence(.SeqCst)+load() in wake().
|
||||
var pending = bucket.pending.fetchAdd(1, .SeqCst);
|
||||
// seq_cst as Acquire barrier to ensure the announcement happens before the ptr check below.
|
||||
// seq_cst as shared modification order to form a happens-before edge with the fence(.seq_cst)+load() in wake().
|
||||
var pending = bucket.pending.fetchAdd(1, .seq_cst);
|
||||
assert(pending < std.math.maxInt(usize));
|
||||
|
||||
// If the wait gets cancelled, remove the pending count we previously added.
|
||||
// This is done outside the mutex lock to keep the critical section short in case of contention.
|
||||
var cancelled = false;
|
||||
defer if (cancelled) {
|
||||
pending = bucket.pending.fetchSub(1, .Monotonic);
|
||||
pending = bucket.pending.fetchSub(1, .monotonic);
|
||||
assert(pending > 0);
|
||||
};
|
||||
|
||||
@ -850,11 +850,11 @@ const PosixImpl = struct {
|
||||
// but the RMW operation unconditionally marks the cache-line as modified for others causing unnecessary fetching/contention.
|
||||
//
|
||||
// Instead we opt to do a full-fence + load instead which avoids taking ownership of the cache-line.
|
||||
// fence(SeqCst) effectively converts the ptr update to SeqCst and the pending load to SeqCst: creating a Store-Load barrier.
|
||||
// fence(seq_cst) effectively converts the ptr update to seq_cst and the pending load to seq_cst: creating a Store-Load barrier.
|
||||
//
|
||||
// The pending count increment in wait() must also now use SeqCst for the update + this pending load
|
||||
// to be in the same modification order as our load isn't using Release/Acquire to guarantee it.
|
||||
bucket.pending.fence(.SeqCst);
|
||||
// The pending count increment in wait() must also now use seq_cst for the update + this pending load
|
||||
// to be in the same modification order as our load isn't using release/acquire to guarantee it.
|
||||
bucket.pending.fence(.seq_cst);
|
||||
if (bucket.pending.load(.Monotonic) == 0) {
|
||||
return;
|
||||
}
|
||||
@ -912,7 +912,7 @@ test "signaling" {
|
||||
current: u32 = 0,
|
||||
|
||||
fn hit(self: *@This()) void {
|
||||
_ = self.value.fetchAdd(1, .Release);
|
||||
_ = self.value.fetchAdd(1, .release);
|
||||
Futex.wake(&self.value, 1);
|
||||
}
|
||||
|
||||
@ -921,7 +921,7 @@ test "signaling" {
|
||||
// Wait for the value to change from hit()
|
||||
var new_value: u32 = undefined;
|
||||
while (true) {
|
||||
new_value = self.value.load(.Acquire);
|
||||
new_value = self.value.load(.acquire);
|
||||
if (new_value != self.current) break;
|
||||
Futex.wait(&self.value, self.current);
|
||||
}
|
||||
@ -968,7 +968,7 @@ test "broadcasting" {
|
||||
fn wait(self: *@This()) !void {
|
||||
// Decrement the counter.
|
||||
// Release ensures stuff before this barrier.wait() happens before the last one.
|
||||
const count = self.count.fetchSub(1, .Release);
|
||||
const count = self.count.fetchSub(1, .release);
|
||||
try testing.expect(count <= num_threads);
|
||||
try testing.expect(count > 0);
|
||||
|
||||
@ -976,15 +976,15 @@ test "broadcasting" {
|
||||
// Acquire for the last counter ensures stuff before previous barrier.wait()s happened before it.
|
||||
// Release on futex update ensures stuff before all barrier.wait()'s happens before they all return.
|
||||
if (count - 1 == 0) {
|
||||
_ = self.count.load(.Acquire); // TODO: could be fence(Acquire) if not for TSAN
|
||||
self.futex.store(1, .Release);
|
||||
_ = self.count.load(.acquire); // TODO: could be fence(acquire) if not for TSAN
|
||||
self.futex.store(1, .release);
|
||||
Futex.wake(&self.futex, num_threads - 1);
|
||||
return;
|
||||
}
|
||||
|
||||
// Other threads wait until last counter wakes them up.
|
||||
// Acquire on futex synchronizes with last barrier count to ensure stuff before all barrier.wait()'s happen before us.
|
||||
while (self.futex.load(.Acquire) == 0) {
|
||||
while (self.futex.load(.acquire) == 0) {
|
||||
Futex.wait(&self.futex, 0);
|
||||
}
|
||||
}
|
||||
|
@ -72,23 +72,23 @@ const DebugImpl = struct {
|
||||
inline fn tryLock(self: *@This()) bool {
|
||||
const locking = self.impl.tryLock();
|
||||
if (locking) {
|
||||
self.locking_thread.store(Thread.getCurrentId(), .Unordered);
|
||||
self.locking_thread.store(Thread.getCurrentId(), .unordered);
|
||||
}
|
||||
return locking;
|
||||
}
|
||||
|
||||
inline fn lock(self: *@This()) void {
|
||||
const current_id = Thread.getCurrentId();
|
||||
if (self.locking_thread.load(.Unordered) == current_id and current_id != 0) {
|
||||
if (self.locking_thread.load(.unordered) == current_id and current_id != 0) {
|
||||
@panic("Deadlock detected");
|
||||
}
|
||||
self.impl.lock();
|
||||
self.locking_thread.store(current_id, .Unordered);
|
||||
self.locking_thread.store(current_id, .unordered);
|
||||
}
|
||||
|
||||
inline fn unlock(self: *@This()) void {
|
||||
assert(self.locking_thread.load(.Unordered) == Thread.getCurrentId());
|
||||
self.locking_thread.store(0, .Unordered);
|
||||
assert(self.locking_thread.load(.unordered) == Thread.getCurrentId());
|
||||
self.locking_thread.store(0, .unordered);
|
||||
self.impl.unlock();
|
||||
}
|
||||
};
|
||||
@ -167,12 +167,12 @@ const FutexImpl = struct {
|
||||
// - `lock bts` is smaller instruction-wise which makes it better for inlining
|
||||
if (comptime builtin.target.cpu.arch.isX86()) {
|
||||
const locked_bit = @ctz(locked);
|
||||
return self.state.bitSet(locked_bit, .Acquire) == 0;
|
||||
return self.state.bitSet(locked_bit, .acquire) == 0;
|
||||
}
|
||||
|
||||
// Acquire barrier ensures grabbing the lock happens before the critical section
|
||||
// and that the previous lock holder's critical section happens before we grab the lock.
|
||||
return self.state.cmpxchgWeak(unlocked, locked, .Acquire, .Monotonic) == null;
|
||||
return self.state.cmpxchgWeak(unlocked, locked, .acquire, .monotonic) == null;
|
||||
}
|
||||
|
||||
fn lockSlow(self: *@This()) void {
|
||||
@ -180,7 +180,7 @@ const FutexImpl = struct {
|
||||
|
||||
// Avoid doing an atomic swap below if we already know the state is contended.
|
||||
// An atomic swap unconditionally stores which marks the cache-line as modified unnecessarily.
|
||||
if (self.state.load(.Monotonic) == contended) {
|
||||
if (self.state.load(.monotonic) == contended) {
|
||||
Futex.wait(&self.state, contended);
|
||||
}
|
||||
|
||||
@ -193,7 +193,7 @@ const FutexImpl = struct {
|
||||
//
|
||||
// Acquire barrier ensures grabbing the lock happens before the critical section
|
||||
// and that the previous lock holder's critical section happens before we grab the lock.
|
||||
while (self.state.swap(contended, .Acquire) != unlocked) {
|
||||
while (self.state.swap(contended, .acquire) != unlocked) {
|
||||
Futex.wait(&self.state, contended);
|
||||
}
|
||||
}
|
||||
@ -206,7 +206,7 @@ const FutexImpl = struct {
|
||||
//
|
||||
// Release barrier ensures the critical section happens before we let go of the lock
|
||||
// and that our critical section happens before the next lock holder grabs the lock.
|
||||
const state = self.state.swap(unlocked, .Release);
|
||||
const state = self.state.swap(unlocked, .release);
|
||||
assert(state != unlocked);
|
||||
|
||||
if (state == contended) {
|
||||
|
@ -96,7 +96,7 @@ const FutexImpl = struct {
|
||||
|
||||
fn isSet(self: *const Impl) bool {
|
||||
// Acquire barrier ensures memory accesses before set() happen before we return true.
|
||||
return self.state.load(.Acquire) == is_set;
|
||||
return self.state.load(.acquire) == is_set;
|
||||
}
|
||||
|
||||
fn wait(self: *Impl, timeout: ?u64) error{Timeout}!void {
|
||||
@ -112,9 +112,9 @@ const FutexImpl = struct {
|
||||
// Try to set the state from `unset` to `waiting` to indicate
|
||||
// to the set() thread that others are blocked on the ResetEvent.
|
||||
// We avoid using any strict barriers until the end when we know the ResetEvent is set.
|
||||
var state = self.state.load(.Monotonic);
|
||||
var state = self.state.load(.monotonic);
|
||||
if (state == unset) {
|
||||
state = self.state.cmpxchgStrong(state, waiting, .Monotonic, .Monotonic) orelse waiting;
|
||||
state = self.state.cmpxchgStrong(state, waiting, .monotonic, .monotonic) orelse waiting;
|
||||
}
|
||||
|
||||
// Wait until the ResetEvent is set since the state is waiting.
|
||||
@ -124,7 +124,7 @@ const FutexImpl = struct {
|
||||
const wait_result = futex_deadline.wait(&self.state, waiting);
|
||||
|
||||
// Check if the ResetEvent was set before possibly reporting error.Timeout below.
|
||||
state = self.state.load(.Monotonic);
|
||||
state = self.state.load(.monotonic);
|
||||
if (state != waiting) {
|
||||
break;
|
||||
}
|
||||
@ -135,25 +135,25 @@ const FutexImpl = struct {
|
||||
|
||||
// Acquire barrier ensures memory accesses before set() happen before we return.
|
||||
assert(state == is_set);
|
||||
self.state.fence(.Acquire);
|
||||
self.state.fence(.acquire);
|
||||
}
|
||||
|
||||
fn set(self: *Impl) void {
|
||||
// Quick check if the ResetEvent is already set before doing the atomic swap below.
|
||||
// set() could be getting called quite often and multiple threads calling swap() increases contention unnecessarily.
|
||||
if (self.state.load(.Monotonic) == is_set) {
|
||||
if (self.state.load(.monotonic) == is_set) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Mark the ResetEvent as set and unblock all waiters waiting on it if any.
|
||||
// Release barrier ensures memory accesses before set() happen before the ResetEvent is observed to be "set".
|
||||
if (self.state.swap(is_set, .Release) == waiting) {
|
||||
if (self.state.swap(is_set, .release) == waiting) {
|
||||
Futex.wake(&self.state, std.math.maxInt(u32));
|
||||
}
|
||||
}
|
||||
|
||||
fn reset(self: *Impl) void {
|
||||
self.state.store(unset, .Monotonic);
|
||||
self.state.store(unset, .monotonic);
|
||||
}
|
||||
};
|
||||
|
||||
@ -254,7 +254,7 @@ test "broadcast" {
|
||||
counter: std.atomic.Value(usize) = std.atomic.Value(usize).init(num_threads),
|
||||
|
||||
fn wait(self: *@This()) void {
|
||||
if (self.counter.fetchSub(1, .AcqRel) == 1) {
|
||||
if (self.counter.fetchSub(1, .acq_rel) == 1) {
|
||||
self.event.set();
|
||||
}
|
||||
}
|
||||
|
@ -179,9 +179,9 @@ pub const DefaultRwLock = struct {
|
||||
|
||||
pub fn tryLock(rwl: *DefaultRwLock) bool {
|
||||
if (rwl.mutex.tryLock()) {
|
||||
const state = @atomicLoad(usize, &rwl.state, .SeqCst);
|
||||
const state = @atomicLoad(usize, &rwl.state, .seq_cst);
|
||||
if (state & READER_MASK == 0) {
|
||||
_ = @atomicRmw(usize, &rwl.state, .Or, IS_WRITING, .SeqCst);
|
||||
_ = @atomicRmw(usize, &rwl.state, .Or, IS_WRITING, .seq_cst);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -192,34 +192,34 @@ pub const DefaultRwLock = struct {
|
||||
}
|
||||
|
||||
pub fn lock(rwl: *DefaultRwLock) void {
|
||||
_ = @atomicRmw(usize, &rwl.state, .Add, WRITER, .SeqCst);
|
||||
_ = @atomicRmw(usize, &rwl.state, .Add, WRITER, .seq_cst);
|
||||
rwl.mutex.lock();
|
||||
|
||||
const state = @atomicRmw(usize, &rwl.state, .Add, IS_WRITING -% WRITER, .SeqCst);
|
||||
const state = @atomicRmw(usize, &rwl.state, .Add, IS_WRITING -% WRITER, .seq_cst);
|
||||
if (state & READER_MASK != 0)
|
||||
rwl.semaphore.wait();
|
||||
}
|
||||
|
||||
pub fn unlock(rwl: *DefaultRwLock) void {
|
||||
_ = @atomicRmw(usize, &rwl.state, .And, ~IS_WRITING, .SeqCst);
|
||||
_ = @atomicRmw(usize, &rwl.state, .And, ~IS_WRITING, .seq_cst);
|
||||
rwl.mutex.unlock();
|
||||
}
|
||||
|
||||
pub fn tryLockShared(rwl: *DefaultRwLock) bool {
|
||||
const state = @atomicLoad(usize, &rwl.state, .SeqCst);
|
||||
const state = @atomicLoad(usize, &rwl.state, .seq_cst);
|
||||
if (state & (IS_WRITING | WRITER_MASK) == 0) {
|
||||
_ = @cmpxchgStrong(
|
||||
usize,
|
||||
&rwl.state,
|
||||
state,
|
||||
state + READER,
|
||||
.SeqCst,
|
||||
.SeqCst,
|
||||
.seq_cst,
|
||||
.seq_cst,
|
||||
) orelse return true;
|
||||
}
|
||||
|
||||
if (rwl.mutex.tryLock()) {
|
||||
_ = @atomicRmw(usize, &rwl.state, .Add, READER, .SeqCst);
|
||||
_ = @atomicRmw(usize, &rwl.state, .Add, READER, .seq_cst);
|
||||
rwl.mutex.unlock();
|
||||
return true;
|
||||
}
|
||||
@ -228,25 +228,25 @@ pub const DefaultRwLock = struct {
|
||||
}
|
||||
|
||||
pub fn lockShared(rwl: *DefaultRwLock) void {
|
||||
var state = @atomicLoad(usize, &rwl.state, .SeqCst);
|
||||
var state = @atomicLoad(usize, &rwl.state, .seq_cst);
|
||||
while (state & (IS_WRITING | WRITER_MASK) == 0) {
|
||||
state = @cmpxchgWeak(
|
||||
usize,
|
||||
&rwl.state,
|
||||
state,
|
||||
state + READER,
|
||||
.SeqCst,
|
||||
.SeqCst,
|
||||
.seq_cst,
|
||||
.seq_cst,
|
||||
) orelse return;
|
||||
}
|
||||
|
||||
rwl.mutex.lock();
|
||||
_ = @atomicRmw(usize, &rwl.state, .Add, READER, .SeqCst);
|
||||
_ = @atomicRmw(usize, &rwl.state, .Add, READER, .seq_cst);
|
||||
rwl.mutex.unlock();
|
||||
}
|
||||
|
||||
pub fn unlockShared(rwl: *DefaultRwLock) void {
|
||||
const state = @atomicRmw(usize, &rwl.state, .Sub, READER, .SeqCst);
|
||||
const state = @atomicRmw(usize, &rwl.state, .Sub, READER, .seq_cst);
|
||||
|
||||
if ((state & READER_MASK == READER) and (state & IS_WRITING != 0))
|
||||
rwl.semaphore.post();
|
||||
@ -318,12 +318,12 @@ test "concurrent access" {
|
||||
self.rwl.lockShared();
|
||||
defer self.rwl.unlockShared();
|
||||
|
||||
if (self.writes >= num_writes or self.reads.load(.Unordered) >= num_reads)
|
||||
if (self.writes >= num_writes or self.reads.load(.unordered) >= num_reads)
|
||||
break;
|
||||
|
||||
try self.check();
|
||||
|
||||
_ = self.reads.fetchAdd(1, .Monotonic);
|
||||
_ = self.reads.fetchAdd(1, .monotonic);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10,22 +10,22 @@ state: std.atomic.Value(usize) = std.atomic.Value(usize).init(0),
|
||||
event: std.Thread.ResetEvent = .{},
|
||||
|
||||
pub fn start(self: *WaitGroup) void {
|
||||
const state = self.state.fetchAdd(one_pending, .Monotonic);
|
||||
const state = self.state.fetchAdd(one_pending, .monotonic);
|
||||
assert((state / one_pending) < (std.math.maxInt(usize) / one_pending));
|
||||
}
|
||||
|
||||
pub fn finish(self: *WaitGroup) void {
|
||||
const state = self.state.fetchSub(one_pending, .Release);
|
||||
const state = self.state.fetchSub(one_pending, .release);
|
||||
assert((state / one_pending) > 0);
|
||||
|
||||
if (state == (one_pending | is_waiting)) {
|
||||
self.state.fence(.Acquire);
|
||||
self.state.fence(.acquire);
|
||||
self.event.set();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn wait(self: *WaitGroup) void {
|
||||
const state = self.state.fetchAdd(is_waiting, .Acquire);
|
||||
const state = self.state.fetchAdd(is_waiting, .acquire);
|
||||
assert(state & is_waiting == 0);
|
||||
|
||||
if ((state / one_pending) > 0) {
|
||||
@ -34,12 +34,12 @@ pub fn wait(self: *WaitGroup) void {
|
||||
}
|
||||
|
||||
pub fn reset(self: *WaitGroup) void {
|
||||
self.state.store(0, .Monotonic);
|
||||
self.state.store(0, .monotonic);
|
||||
self.event.reset();
|
||||
}
|
||||
|
||||
pub fn isDone(wg: *WaitGroup) bool {
|
||||
const state = wg.state.load(.Acquire);
|
||||
const state = wg.state.load(.acquire);
|
||||
assert(state & is_waiting == 0);
|
||||
|
||||
return (state / one_pending) == 0;
|
||||
|
@ -23,10 +23,10 @@ pub fn Value(comptime T: type) type {
|
||||
|
||||
const addr: *anyopaque = self;
|
||||
return switch (order) {
|
||||
.Unordered, .Monotonic => @compileError(@tagName(order) ++ " only applies to atomic loads and stores"),
|
||||
.Acquire => tsan.__tsan_acquire(addr),
|
||||
.Release => tsan.__tsan_release(addr),
|
||||
.AcqRel, .SeqCst => {
|
||||
.unordered, .monotonic => @compileError(@tagName(order) ++ " only applies to atomic loads and stores"),
|
||||
.acquire => tsan.__tsan_acquire(addr),
|
||||
.release => tsan.__tsan_release(addr),
|
||||
.acq_rel, .seq_cst => {
|
||||
tsan.__tsan_acquire(addr);
|
||||
tsan.__tsan_release(addr);
|
||||
},
|
||||
@ -149,20 +149,20 @@ test Value {
|
||||
|
||||
fn ref(rc: *RefCount) void {
|
||||
// No ordering necessary; just updating a counter.
|
||||
_ = rc.count.fetchAdd(1, .Monotonic);
|
||||
_ = rc.count.fetchAdd(1, .monotonic);
|
||||
}
|
||||
|
||||
fn unref(rc: *RefCount) void {
|
||||
// Release ensures code before unref() happens-before the
|
||||
// count is decremented as dropFn could be called by then.
|
||||
if (rc.count.fetchSub(1, .Release) == 1) {
|
||||
// Acquire ensures count decrement and code before
|
||||
if (rc.count.fetchSub(1, .release) == 1) {
|
||||
// acquire ensures count decrement and code before
|
||||
// previous unrefs()s happens-before we call dropFn
|
||||
// below.
|
||||
// Another alternative is to use .AcqRel on the
|
||||
// fetchSub count decrement but it's extra barrier in
|
||||
// possibly hot path.
|
||||
rc.count.fence(.Acquire);
|
||||
rc.count.fence(.acquire);
|
||||
(rc.dropFn)(rc);
|
||||
}
|
||||
}
|
||||
@ -182,118 +182,118 @@ test Value {
|
||||
|
||||
test "Value.swap" {
|
||||
var x = Value(usize).init(5);
|
||||
try testing.expectEqual(@as(usize, 5), x.swap(10, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 5), x.swap(10, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.seq_cst));
|
||||
|
||||
const E = enum(usize) { a, b, c };
|
||||
var y = Value(E).init(.c);
|
||||
try testing.expectEqual(E.c, y.swap(.a, .SeqCst));
|
||||
try testing.expectEqual(E.a, y.load(.SeqCst));
|
||||
try testing.expectEqual(E.c, y.swap(.a, .seq_cst));
|
||||
try testing.expectEqual(E.a, y.load(.seq_cst));
|
||||
|
||||
var z = Value(f32).init(5.0);
|
||||
try testing.expectEqual(@as(f32, 5.0), z.swap(10.0, .SeqCst));
|
||||
try testing.expectEqual(@as(f32, 10.0), z.load(.SeqCst));
|
||||
try testing.expectEqual(@as(f32, 5.0), z.swap(10.0, .seq_cst));
|
||||
try testing.expectEqual(@as(f32, 10.0), z.load(.seq_cst));
|
||||
|
||||
var a = Value(bool).init(false);
|
||||
try testing.expectEqual(false, a.swap(true, .SeqCst));
|
||||
try testing.expectEqual(true, a.load(.SeqCst));
|
||||
try testing.expectEqual(false, a.swap(true, .seq_cst));
|
||||
try testing.expectEqual(true, a.load(.seq_cst));
|
||||
|
||||
var b = Value(?*u8).init(null);
|
||||
try testing.expectEqual(@as(?*u8, null), b.swap(@as(?*u8, @ptrFromInt(@alignOf(u8))), .SeqCst));
|
||||
try testing.expectEqual(@as(?*u8, @ptrFromInt(@alignOf(u8))), b.load(.SeqCst));
|
||||
try testing.expectEqual(@as(?*u8, null), b.swap(@as(?*u8, @ptrFromInt(@alignOf(u8))), .seq_cst));
|
||||
try testing.expectEqual(@as(?*u8, @ptrFromInt(@alignOf(u8))), b.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.store" {
|
||||
var x = Value(usize).init(5);
|
||||
x.store(10, .SeqCst);
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.SeqCst));
|
||||
x.store(10, .seq_cst);
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.cmpxchgWeak" {
|
||||
var x = Value(usize).init(0);
|
||||
|
||||
try testing.expectEqual(@as(?usize, 0), x.cmpxchgWeak(1, 0, .SeqCst, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(?usize, 0), x.cmpxchgWeak(1, 0, .seq_cst, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.seq_cst));
|
||||
|
||||
while (x.cmpxchgWeak(0, 1, .SeqCst, .SeqCst)) |_| {}
|
||||
try testing.expectEqual(@as(usize, 1), x.load(.SeqCst));
|
||||
while (x.cmpxchgWeak(0, 1, .seq_cst, .seq_cst)) |_| {}
|
||||
try testing.expectEqual(@as(usize, 1), x.load(.seq_cst));
|
||||
|
||||
while (x.cmpxchgWeak(1, 0, .SeqCst, .SeqCst)) |_| {}
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.SeqCst));
|
||||
while (x.cmpxchgWeak(1, 0, .seq_cst, .seq_cst)) |_| {}
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.cmpxchgStrong" {
|
||||
var x = Value(usize).init(0);
|
||||
try testing.expectEqual(@as(?usize, 0), x.cmpxchgStrong(1, 0, .SeqCst, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(?usize, null), x.cmpxchgStrong(0, 1, .SeqCst, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 1), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(?usize, null), x.cmpxchgStrong(1, 0, .SeqCst, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(?usize, 0), x.cmpxchgStrong(1, 0, .seq_cst, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.seq_cst));
|
||||
try testing.expectEqual(@as(?usize, null), x.cmpxchgStrong(0, 1, .seq_cst, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 1), x.load(.seq_cst));
|
||||
try testing.expectEqual(@as(?usize, null), x.cmpxchgStrong(1, 0, .seq_cst, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.fetchAdd" {
|
||||
var x = Value(usize).init(5);
|
||||
try testing.expectEqual(@as(usize, 5), x.fetchAdd(5, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 10), x.fetchAdd(std.math.maxInt(usize), .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 9), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 5), x.fetchAdd(5, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.seq_cst));
|
||||
try testing.expectEqual(@as(usize, 10), x.fetchAdd(std.math.maxInt(usize), .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 9), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.fetchSub" {
|
||||
var x = Value(usize).init(5);
|
||||
try testing.expectEqual(@as(usize, 5), x.fetchSub(5, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0), x.fetchSub(1, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, std.math.maxInt(usize)), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 5), x.fetchSub(5, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0), x.fetchSub(1, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, std.math.maxInt(usize)), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.fetchMin" {
|
||||
var x = Value(usize).init(5);
|
||||
try testing.expectEqual(@as(usize, 5), x.fetchMin(0, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0), x.fetchMin(10, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 5), x.fetchMin(0, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0), x.fetchMin(10, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.fetchMax" {
|
||||
var x = Value(usize).init(5);
|
||||
try testing.expectEqual(@as(usize, 5), x.fetchMax(10, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 10), x.fetchMax(5, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 5), x.fetchMax(10, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.seq_cst));
|
||||
try testing.expectEqual(@as(usize, 10), x.fetchMax(5, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.fetchAnd" {
|
||||
var x = Value(usize).init(0b11);
|
||||
try testing.expectEqual(@as(usize, 0b11), x.fetchAnd(0b10, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b10), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b10), x.fetchAnd(0b00, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b00), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b11), x.fetchAnd(0b10, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0b10), x.load(.seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0b10), x.fetchAnd(0b00, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0b00), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.fetchNand" {
|
||||
var x = Value(usize).init(0b11);
|
||||
try testing.expectEqual(@as(usize, 0b11), x.fetchNand(0b10, .SeqCst));
|
||||
try testing.expectEqual(~@as(usize, 0b10), x.load(.SeqCst));
|
||||
try testing.expectEqual(~@as(usize, 0b10), x.fetchNand(0b00, .SeqCst));
|
||||
try testing.expectEqual(~@as(usize, 0b00), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b11), x.fetchNand(0b10, .seq_cst));
|
||||
try testing.expectEqual(~@as(usize, 0b10), x.load(.seq_cst));
|
||||
try testing.expectEqual(~@as(usize, 0b10), x.fetchNand(0b00, .seq_cst));
|
||||
try testing.expectEqual(~@as(usize, 0b00), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.fetchOr" {
|
||||
var x = Value(usize).init(0b11);
|
||||
try testing.expectEqual(@as(usize, 0b11), x.fetchOr(0b100, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b111), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b111), x.fetchOr(0b010, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b111), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b11), x.fetchOr(0b100, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0b111), x.load(.seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0b111), x.fetchOr(0b010, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0b111), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.fetchXor" {
|
||||
var x = Value(usize).init(0b11);
|
||||
try testing.expectEqual(@as(usize, 0b11), x.fetchXor(0b10, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b01), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b01), x.fetchXor(0b01, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b00), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b11), x.fetchXor(0b10, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0b01), x.load(.seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0b01), x.fetchXor(0b01, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0b00), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.bitSet" {
|
||||
@ -304,19 +304,19 @@ test "Value.bitSet" {
|
||||
const mask = @as(usize, 1) << bit;
|
||||
|
||||
// setting the bit should change the bit
|
||||
try testing.expect(x.load(.SeqCst) & mask == 0);
|
||||
try testing.expectEqual(@as(u1, 0), x.bitSet(bit, .SeqCst));
|
||||
try testing.expect(x.load(.SeqCst) & mask != 0);
|
||||
try testing.expect(x.load(.seq_cst) & mask == 0);
|
||||
try testing.expectEqual(@as(u1, 0), x.bitSet(bit, .seq_cst));
|
||||
try testing.expect(x.load(.seq_cst) & mask != 0);
|
||||
|
||||
// setting it again shouldn't change the bit
|
||||
try testing.expectEqual(@as(u1, 1), x.bitSet(bit, .SeqCst));
|
||||
try testing.expect(x.load(.SeqCst) & mask != 0);
|
||||
try testing.expectEqual(@as(u1, 1), x.bitSet(bit, .seq_cst));
|
||||
try testing.expect(x.load(.seq_cst) & mask != 0);
|
||||
|
||||
// all the previous bits should have not changed (still be set)
|
||||
for (0..bit_index) |prev_bit_index| {
|
||||
const prev_bit = @as(std.math.Log2Int(usize), @intCast(prev_bit_index));
|
||||
const prev_mask = @as(usize, 1) << prev_bit;
|
||||
try testing.expect(x.load(.SeqCst) & prev_mask != 0);
|
||||
try testing.expect(x.load(.seq_cst) & prev_mask != 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -330,19 +330,19 @@ test "Value.bitReset" {
|
||||
x.raw |= mask;
|
||||
|
||||
// unsetting the bit should change the bit
|
||||
try testing.expect(x.load(.SeqCst) & mask != 0);
|
||||
try testing.expectEqual(@as(u1, 1), x.bitReset(bit, .SeqCst));
|
||||
try testing.expect(x.load(.SeqCst) & mask == 0);
|
||||
try testing.expect(x.load(.seq_cst) & mask != 0);
|
||||
try testing.expectEqual(@as(u1, 1), x.bitReset(bit, .seq_cst));
|
||||
try testing.expect(x.load(.seq_cst) & mask == 0);
|
||||
|
||||
// unsetting it again shouldn't change the bit
|
||||
try testing.expectEqual(@as(u1, 0), x.bitReset(bit, .SeqCst));
|
||||
try testing.expect(x.load(.SeqCst) & mask == 0);
|
||||
try testing.expectEqual(@as(u1, 0), x.bitReset(bit, .seq_cst));
|
||||
try testing.expect(x.load(.seq_cst) & mask == 0);
|
||||
|
||||
// all the previous bits should have not changed (still be reset)
|
||||
for (0..bit_index) |prev_bit_index| {
|
||||
const prev_bit = @as(std.math.Log2Int(usize), @intCast(prev_bit_index));
|
||||
const prev_mask = @as(usize, 1) << prev_bit;
|
||||
try testing.expect(x.load(.SeqCst) & prev_mask == 0);
|
||||
try testing.expect(x.load(.seq_cst) & prev_mask == 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -355,19 +355,19 @@ test "Value.bitToggle" {
|
||||
const mask = @as(usize, 1) << bit;
|
||||
|
||||
// toggling the bit should change the bit
|
||||
try testing.expect(x.load(.SeqCst) & mask == 0);
|
||||
try testing.expectEqual(@as(u1, 0), x.bitToggle(bit, .SeqCst));
|
||||
try testing.expect(x.load(.SeqCst) & mask != 0);
|
||||
try testing.expect(x.load(.seq_cst) & mask == 0);
|
||||
try testing.expectEqual(@as(u1, 0), x.bitToggle(bit, .seq_cst));
|
||||
try testing.expect(x.load(.seq_cst) & mask != 0);
|
||||
|
||||
// toggling it again *should* change the bit
|
||||
try testing.expectEqual(@as(u1, 1), x.bitToggle(bit, .SeqCst));
|
||||
try testing.expect(x.load(.SeqCst) & mask == 0);
|
||||
try testing.expectEqual(@as(u1, 1), x.bitToggle(bit, .seq_cst));
|
||||
try testing.expect(x.load(.seq_cst) & mask == 0);
|
||||
|
||||
// all the previous bits should have not changed (still be toggled back)
|
||||
for (0..bit_index) |prev_bit_index| {
|
||||
const prev_bit = @as(std.math.Log2Int(usize), @intCast(prev_bit_index));
|
||||
const prev_mask = @as(usize, 1) << prev_bit;
|
||||
try testing.expect(x.load(.SeqCst) & prev_mask == 0);
|
||||
try testing.expect(x.load(.seq_cst) & prev_mask == 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -81,12 +81,12 @@ pub const SymbolVisibility = enum {
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const AtomicOrder = enum {
|
||||
Unordered,
|
||||
Monotonic,
|
||||
Acquire,
|
||||
Release,
|
||||
AcqRel,
|
||||
SeqCst,
|
||||
unordered,
|
||||
monotonic,
|
||||
acquire,
|
||||
release,
|
||||
acq_rel,
|
||||
seq_cst,
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
|
@ -1420,7 +1420,7 @@ fn windowsMakeAsyncPipe(rd: *?windows.HANDLE, wr: *?windows.HANDLE, sattr: *cons
|
||||
const pipe_path = std.fmt.bufPrintZ(
|
||||
&tmp_buf,
|
||||
"\\\\.\\pipe\\zig-childprocess-{d}-{d}",
|
||||
.{ windows.kernel32.GetCurrentProcessId(), pipe_name_counter.fetchAdd(1, .Monotonic) },
|
||||
.{ windows.kernel32.GetCurrentProcessId(), pipe_name_counter.fetchAdd(1, .monotonic) },
|
||||
) catch unreachable;
|
||||
const len = std.unicode.wtf8ToWtf16Le(&tmp_bufw, pipe_path) catch unreachable;
|
||||
tmp_bufw[len] = 0;
|
||||
|
@ -461,7 +461,7 @@ pub fn panicImpl(trace: ?*const std.builtin.StackTrace, first_trace_addr: ?usize
|
||||
0 => {
|
||||
panic_stage = 1;
|
||||
|
||||
_ = panicking.fetchAdd(1, .SeqCst);
|
||||
_ = panicking.fetchAdd(1, .seq_cst);
|
||||
|
||||
// Make sure to release the mutex when done
|
||||
{
|
||||
@ -503,7 +503,7 @@ pub fn panicImpl(trace: ?*const std.builtin.StackTrace, first_trace_addr: ?usize
|
||||
|
||||
/// Must be called only after adding 1 to `panicking`. There are three callsites.
|
||||
fn waitForOtherThreadToFinishPanicking() void {
|
||||
if (panicking.fetchSub(1, .SeqCst) != 1) {
|
||||
if (panicking.fetchSub(1, .seq_cst) != 1) {
|
||||
// Another thread is panicking, wait for the last one to finish
|
||||
// and call abort()
|
||||
if (builtin.single_threaded) unreachable;
|
||||
@ -2587,7 +2587,7 @@ fn handleSegfaultPosix(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const any
|
||||
nosuspend switch (panic_stage) {
|
||||
0 => {
|
||||
panic_stage = 1;
|
||||
_ = panicking.fetchAdd(1, .SeqCst);
|
||||
_ = panicking.fetchAdd(1, .seq_cst);
|
||||
|
||||
{
|
||||
panic_mutex.lock();
|
||||
@ -2663,7 +2663,7 @@ fn handleSegfaultWindowsExtra(
|
||||
nosuspend switch (panic_stage) {
|
||||
0 => {
|
||||
panic_stage = 1;
|
||||
_ = panicking.fetchAdd(1, .SeqCst);
|
||||
_ = panicking.fetchAdd(1, .seq_cst);
|
||||
|
||||
{
|
||||
panic_mutex.lock();
|
||||
|
@ -303,11 +303,11 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
|
||||
const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
|
||||
const amt = n + ptr_align - 1 + @sizeOf(usize);
|
||||
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .SeqCst);
|
||||
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .seq_cst);
|
||||
const heap_handle = optional_heap_handle orelse blk: {
|
||||
const options = if (builtin.single_threaded) os.windows.HEAP_NO_SERIALIZE else 0;
|
||||
const hh = os.windows.kernel32.HeapCreate(options, amt, 0) orelse return null;
|
||||
const other_hh = @cmpxchgStrong(?HeapHandle, &self.heap_handle, null, hh, .SeqCst, .SeqCst) orelse break :blk hh;
|
||||
const other_hh = @cmpxchgStrong(?HeapHandle, &self.heap_handle, null, hh, .seq_cst, .seq_cst) orelse break :blk hh;
|
||||
os.windows.HeapDestroy(hh);
|
||||
break :blk other_hh.?; // can't be null because of the cmpxchg
|
||||
};
|
||||
@ -482,13 +482,13 @@ pub const FixedBufferAllocator = struct {
|
||||
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
|
||||
_ = ra;
|
||||
const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
|
||||
var end_index = @atomicLoad(usize, &self.end_index, .SeqCst);
|
||||
var end_index = @atomicLoad(usize, &self.end_index, .seq_cst);
|
||||
while (true) {
|
||||
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse return null;
|
||||
const adjusted_index = end_index + adjust_off;
|
||||
const new_end_index = adjusted_index + n;
|
||||
if (new_end_index > self.buffer.len) return null;
|
||||
end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .SeqCst, .SeqCst) orelse
|
||||
end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .seq_cst, .seq_cst) orelse
|
||||
return self.buffer[adjusted_index..new_end_index].ptr;
|
||||
}
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
|
||||
return @ptrCast(addr);
|
||||
}
|
||||
|
||||
const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .Unordered);
|
||||
const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .unordered);
|
||||
const slice = os.mmap(
|
||||
hint,
|
||||
aligned_len,
|
||||
@ -41,7 +41,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
|
||||
) catch return null;
|
||||
assert(mem.isAligned(@intFromPtr(slice.ptr), mem.page_size));
|
||||
const new_hint: [*]align(mem.page_size) u8 = @alignCast(slice.ptr + aligned_len);
|
||||
_ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .Monotonic, .Monotonic);
|
||||
_ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .monotonic, .monotonic);
|
||||
return slice.ptr;
|
||||
}
|
||||
|
||||
|
@ -1642,7 +1642,7 @@ pub fn open(
|
||||
|
||||
const host = uri.host orelse return error.UriMissingHost;
|
||||
|
||||
if (protocol == .tls and @atomicLoad(bool, &client.next_https_rescan_certs, .Acquire)) {
|
||||
if (protocol == .tls and @atomicLoad(bool, &client.next_https_rescan_certs, .acquire)) {
|
||||
if (disable_tls) unreachable;
|
||||
|
||||
client.ca_bundle_mutex.lock();
|
||||
@ -1650,7 +1650,7 @@ pub fn open(
|
||||
|
||||
if (client.next_https_rescan_certs) {
|
||||
client.ca_bundle.rescan(client.allocator) catch return error.CertificateBundleLoadFailure;
|
||||
@atomicStore(bool, &client.next_https_rescan_certs, false, .Release);
|
||||
@atomicStore(bool, &client.next_https_rescan_certs, false, .release);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@ pub fn Once(comptime f: fn () void) type {
|
||||
/// first time.
|
||||
/// The invocations are thread-safe.
|
||||
pub fn call(self: *@This()) void {
|
||||
if (@atomicLoad(bool, &self.done, .Acquire))
|
||||
if (@atomicLoad(bool, &self.done, .acquire))
|
||||
return;
|
||||
|
||||
return self.callSlow();
|
||||
@ -32,7 +32,7 @@ pub fn Once(comptime f: fn () void) type {
|
||||
// The first thread to acquire the mutex gets to run the initializer
|
||||
if (!self.done) {
|
||||
f();
|
||||
@atomicStore(bool, &self.done, true, .Release);
|
||||
@atomicStore(bool, &self.done, true, .release);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -436,7 +436,7 @@ fn fchmodat1(dirfd: fd_t, path: []const u8, mode: mode_t, flags: u32) FChmodAtEr
|
||||
fn fchmodat2(dirfd: fd_t, path: []const u8, mode: mode_t, flags: u32) FChmodAtError!void {
|
||||
const path_c = try toPosixPath(path);
|
||||
const use_fchmodat2 = (builtin.os.isAtLeast(.linux, .{ .major = 6, .minor = 6, .patch = 0 }) orelse false) and
|
||||
has_fchmodat2_syscall.load(.Monotonic);
|
||||
has_fchmodat2_syscall.load(.monotonic);
|
||||
while (use_fchmodat2) {
|
||||
// Later on this should be changed to `system.fchmodat2`
|
||||
// when the musl/glibc add a wrapper.
|
||||
@ -458,7 +458,7 @@ fn fchmodat2(dirfd: fd_t, path: []const u8, mode: mode_t, flags: u32) FChmodAtEr
|
||||
.ROFS => return error.ReadOnlyFileSystem,
|
||||
|
||||
.NOSYS => { // Use fallback.
|
||||
has_fchmodat2_syscall.store(false, .Monotonic);
|
||||
has_fchmodat2_syscall.store(false, .monotonic);
|
||||
break;
|
||||
},
|
||||
else => |err| return unexpectedErrno(err),
|
||||
@ -729,7 +729,7 @@ pub fn abort() noreturn {
|
||||
const global = struct {
|
||||
var abort_entered: bool = false;
|
||||
};
|
||||
while (@cmpxchgWeak(bool, &global.abort_entered, false, true, .SeqCst, .SeqCst)) |_| {}
|
||||
while (@cmpxchgWeak(bool, &global.abort_entered, false, true, .seq_cst, .seq_cst)) |_| {}
|
||||
}
|
||||
|
||||
// Install default handler so that the tkill below will terminate.
|
||||
@ -6809,7 +6809,7 @@ pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len
|
||||
if ((comptime builtin.os.isAtLeast(.freebsd, .{ .major = 13, .minor = 0, .patch = 0 }) orelse false) or
|
||||
((comptime builtin.os.isAtLeast(.linux, .{ .major = 4, .minor = 5, .patch = 0 }) orelse false and
|
||||
std.c.versionCheck(.{ .major = 2, .minor = 27, .patch = 0 })) and
|
||||
has_copy_file_range_syscall.load(.Monotonic)))
|
||||
has_copy_file_range_syscall.load(.monotonic)))
|
||||
{
|
||||
var off_in_copy: i64 = @bitCast(off_in);
|
||||
var off_out_copy: i64 = @bitCast(off_out);
|
||||
@ -6844,7 +6844,7 @@ pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len
|
||||
.TXTBSY => return error.SwapFile,
|
||||
.XDEV => break, // support for cross-filesystem copy added in Linux 5.3, use fallback
|
||||
.NOSYS => { // syscall added in Linux 4.5, use fallback
|
||||
has_copy_file_range_syscall.store(false, .Monotonic);
|
||||
has_copy_file_range_syscall.store(false, .monotonic);
|
||||
break;
|
||||
},
|
||||
else => |err| return unexpectedErrno(err),
|
||||
|
@ -1334,7 +1334,7 @@ const vdso_clock_gettime_ty = *align(1) const fn (i32, *timespec) callconv(.C) u
|
||||
|
||||
pub fn clock_gettime(clk_id: i32, tp: *timespec) usize {
|
||||
if (@hasDecl(VDSO, "CGT_SYM")) {
|
||||
const ptr = @atomicLoad(?*const anyopaque, &vdso_clock_gettime, .Unordered);
|
||||
const ptr = @atomicLoad(?*const anyopaque, &vdso_clock_gettime, .unordered);
|
||||
if (ptr) |fn_ptr| {
|
||||
const f = @as(vdso_clock_gettime_ty, @ptrCast(fn_ptr));
|
||||
const rc = f(clk_id, tp);
|
||||
@ -1351,7 +1351,7 @@ fn init_vdso_clock_gettime(clk: i32, ts: *timespec) callconv(.C) usize {
|
||||
const ptr = @as(?*const anyopaque, @ptrFromInt(vdso.lookup(VDSO.CGT_VER, VDSO.CGT_SYM)));
|
||||
// Note that we may not have a VDSO at all, update the stub address anyway
|
||||
// so that clock_gettime will fall back on the good old (and slow) syscall
|
||||
@atomicStore(?*const anyopaque, &vdso_clock_gettime, ptr, .Monotonic);
|
||||
@atomicStore(?*const anyopaque, &vdso_clock_gettime, ptr, .monotonic);
|
||||
// Call into the VDSO if available
|
||||
if (ptr) |fn_ptr| {
|
||||
const f = @as(vdso_clock_gettime_ty, @ptrCast(fn_ptr));
|
||||
|
@ -133,7 +133,7 @@ pub fn deinit(self: *IoUring) void {
|
||||
/// alternative. In Zig, we have first-class error handling... so let's use it.
|
||||
/// Matches the implementation of io_uring_get_sqe() in liburing.
|
||||
pub fn get_sqe(self: *IoUring) !*linux.io_uring_sqe {
|
||||
const head = @atomicLoad(u32, self.sq.head, .Acquire);
|
||||
const head = @atomicLoad(u32, self.sq.head, .acquire);
|
||||
// Remember that these head and tail offsets wrap around every four billion operations.
|
||||
// We must therefore use wrapping addition and subtraction to avoid a runtime crash.
|
||||
const next = self.sq.sqe_tail +% 1;
|
||||
@ -222,7 +222,7 @@ pub fn flush_sq(self: *IoUring) u32 {
|
||||
self.sq.sqe_head +%= 1;
|
||||
}
|
||||
// Ensure that the kernel can actually see the SQE updates when it sees the tail update.
|
||||
@atomicStore(u32, self.sq.tail, tail, .Release);
|
||||
@atomicStore(u32, self.sq.tail, tail, .release);
|
||||
}
|
||||
return self.sq_ready();
|
||||
}
|
||||
@ -234,7 +234,7 @@ pub fn flush_sq(self: *IoUring) u32 {
|
||||
pub fn sq_ring_needs_enter(self: *IoUring, flags: *u32) bool {
|
||||
assert(flags.* == 0);
|
||||
if ((self.flags & linux.IORING_SETUP_SQPOLL) == 0) return true;
|
||||
if ((@atomicLoad(u32, self.sq.flags, .Unordered) & linux.IORING_SQ_NEED_WAKEUP) != 0) {
|
||||
if ((@atomicLoad(u32, self.sq.flags, .unordered) & linux.IORING_SQ_NEED_WAKEUP) != 0) {
|
||||
flags.* |= linux.IORING_ENTER_SQ_WAKEUP;
|
||||
return true;
|
||||
}
|
||||
@ -248,14 +248,14 @@ pub fn sq_ring_needs_enter(self: *IoUring, flags: *u32) bool {
|
||||
pub fn sq_ready(self: *IoUring) u32 {
|
||||
// Always use the shared ring state (i.e. head and not sqe_head) to avoid going out of sync,
|
||||
// see https://github.com/axboe/liburing/issues/92.
|
||||
return self.sq.sqe_tail -% @atomicLoad(u32, self.sq.head, .Acquire);
|
||||
return self.sq.sqe_tail -% @atomicLoad(u32, self.sq.head, .acquire);
|
||||
}
|
||||
|
||||
/// Returns the number of CQEs in the completion queue, i.e. its length.
|
||||
/// These are CQEs that the application is yet to consume.
|
||||
/// Matches the implementation of io_uring_cq_ready in liburing.
|
||||
pub fn cq_ready(self: *IoUring) u32 {
|
||||
return @atomicLoad(u32, self.cq.tail, .Acquire) -% self.cq.head.*;
|
||||
return @atomicLoad(u32, self.cq.tail, .acquire) -% self.cq.head.*;
|
||||
}
|
||||
|
||||
/// Copies as many CQEs as are ready, and that can fit into the destination `cqes` slice.
|
||||
@ -313,7 +313,7 @@ pub fn copy_cqe(ring: *IoUring) !linux.io_uring_cqe {
|
||||
|
||||
/// Matches the implementation of cq_ring_needs_flush() in liburing.
|
||||
pub fn cq_ring_needs_flush(self: *IoUring) bool {
|
||||
return (@atomicLoad(u32, self.sq.flags, .Unordered) & linux.IORING_SQ_CQ_OVERFLOW) != 0;
|
||||
return (@atomicLoad(u32, self.sq.flags, .unordered) & linux.IORING_SQ_CQ_OVERFLOW) != 0;
|
||||
}
|
||||
|
||||
/// For advanced use cases only that implement custom completion queue methods.
|
||||
@ -331,7 +331,7 @@ pub fn cqe_seen(self: *IoUring, cqe: *linux.io_uring_cqe) void {
|
||||
pub fn cq_advance(self: *IoUring, count: u32) void {
|
||||
if (count > 0) {
|
||||
// Ensure the kernel only sees the new head value after the CQEs have been read.
|
||||
@atomicStore(u32, self.cq.head, self.cq.head.* +% count, .Release);
|
||||
@atomicStore(u32, self.cq.head, self.cq.head.* +% count, .release);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -425,7 +425,7 @@ fn start1() u8 {
|
||||
}
|
||||
|
||||
fn start2(ctx: *i32) u8 {
|
||||
_ = @atomicRmw(i32, ctx, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
|
||||
_ = @atomicRmw(i32, ctx, AtomicRmwOp.Add, 1, AtomicOrder.seq_cst);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
36
src/Sema.zig
36
src/Sema.zig
@ -6450,8 +6450,8 @@ fn zirFence(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) Co
|
||||
.needed_comptime_reason = "atomic order of @fence must be comptime-known",
|
||||
});
|
||||
|
||||
if (@intFromEnum(order) < @intFromEnum(std.builtin.AtomicOrder.Acquire)) {
|
||||
return sema.fail(block, order_src, "atomic ordering must be Acquire or stricter", .{});
|
||||
if (@intFromEnum(order) < @intFromEnum(std.builtin.AtomicOrder.acquire)) {
|
||||
return sema.fail(block, order_src, "atomic ordering must be acquire or stricter", .{});
|
||||
}
|
||||
|
||||
_ = try block.addInst(.{
|
||||
@ -23894,17 +23894,17 @@ fn zirCmpxchg(
|
||||
.needed_comptime_reason = "atomic order of cmpxchg failure must be comptime-known",
|
||||
});
|
||||
|
||||
if (@intFromEnum(success_order) < @intFromEnum(std.builtin.AtomicOrder.Monotonic)) {
|
||||
return sema.fail(block, success_order_src, "success atomic ordering must be Monotonic or stricter", .{});
|
||||
if (@intFromEnum(success_order) < @intFromEnum(std.builtin.AtomicOrder.monotonic)) {
|
||||
return sema.fail(block, success_order_src, "success atomic ordering must be monotonic or stricter", .{});
|
||||
}
|
||||
if (@intFromEnum(failure_order) < @intFromEnum(std.builtin.AtomicOrder.Monotonic)) {
|
||||
return sema.fail(block, failure_order_src, "failure atomic ordering must be Monotonic or stricter", .{});
|
||||
if (@intFromEnum(failure_order) < @intFromEnum(std.builtin.AtomicOrder.monotonic)) {
|
||||
return sema.fail(block, failure_order_src, "failure atomic ordering must be monotonic or stricter", .{});
|
||||
}
|
||||
if (@intFromEnum(failure_order) > @intFromEnum(success_order)) {
|
||||
return sema.fail(block, failure_order_src, "failure atomic ordering must be no stricter than success", .{});
|
||||
}
|
||||
if (failure_order == .Release or failure_order == .AcqRel) {
|
||||
return sema.fail(block, failure_order_src, "failure atomic ordering must not be Release or AcqRel", .{});
|
||||
if (failure_order == .release or failure_order == .acq_rel) {
|
||||
return sema.fail(block, failure_order_src, "failure atomic ordering must not be release or acq_rel", .{});
|
||||
}
|
||||
|
||||
const result_ty = try mod.optionalType(elem_ty.toIntern());
|
||||
@ -24346,11 +24346,11 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
|
||||
});
|
||||
|
||||
switch (order) {
|
||||
.Release, .AcqRel => {
|
||||
.release, .acq_rel => {
|
||||
return sema.fail(
|
||||
block,
|
||||
order_src,
|
||||
"@atomicLoad atomic ordering must not be Release or AcqRel",
|
||||
"@atomicLoad atomic ordering must not be release or acq_rel",
|
||||
.{},
|
||||
);
|
||||
},
|
||||
@ -24412,8 +24412,8 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
|
||||
.needed_comptime_reason = "atomic order of @atomicRmW must be comptime-known",
|
||||
});
|
||||
|
||||
if (order == .Unordered) {
|
||||
return sema.fail(block, order_src, "@atomicRmw atomic ordering must not be Unordered", .{});
|
||||
if (order == .unordered) {
|
||||
return sema.fail(block, order_src, "@atomicRmw atomic ordering must not be unordered", .{});
|
||||
}
|
||||
|
||||
// special case zero bit types
|
||||
@ -24482,18 +24482,18 @@ fn zirAtomicStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
|
||||
});
|
||||
|
||||
const air_tag: Air.Inst.Tag = switch (order) {
|
||||
.Acquire, .AcqRel => {
|
||||
.acquire, .acq_rel => {
|
||||
return sema.fail(
|
||||
block,
|
||||
order_src,
|
||||
"@atomicStore atomic ordering must not be Acquire or AcqRel",
|
||||
"@atomicStore atomic ordering must not be acquire or acq_rel",
|
||||
.{},
|
||||
);
|
||||
},
|
||||
.Unordered => .atomic_store_unordered,
|
||||
.Monotonic => .atomic_store_monotonic,
|
||||
.Release => .atomic_store_release,
|
||||
.SeqCst => .atomic_store_seq_cst,
|
||||
.unordered => .atomic_store_unordered,
|
||||
.monotonic => .atomic_store_monotonic,
|
||||
.release => .atomic_store_release,
|
||||
.seq_cst => .atomic_store_seq_cst,
|
||||
};
|
||||
|
||||
return sema.storePtr2(block, src, ptr, ptr_src, operand, operand_src, air_tag);
|
||||
|
@ -815,10 +815,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.call_never_tail => try self.airCall(inst, .never_tail),
|
||||
.call_never_inline => try self.airCall(inst, .never_inline),
|
||||
|
||||
.atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
|
||||
.atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
|
||||
.atomic_store_release => try self.airAtomicStore(inst, .Release),
|
||||
.atomic_store_seq_cst => try self.airAtomicStore(inst, .SeqCst),
|
||||
.atomic_store_unordered => try self.airAtomicStore(inst, .unordered),
|
||||
.atomic_store_monotonic => try self.airAtomicStore(inst, .monotonic),
|
||||
.atomic_store_release => try self.airAtomicStore(inst, .release),
|
||||
.atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst),
|
||||
|
||||
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
|
||||
.struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1),
|
||||
|
@ -801,10 +801,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.call_never_tail => try self.airCall(inst, .never_tail),
|
||||
.call_never_inline => try self.airCall(inst, .never_inline),
|
||||
|
||||
.atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
|
||||
.atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
|
||||
.atomic_store_release => try self.airAtomicStore(inst, .Release),
|
||||
.atomic_store_seq_cst => try self.airAtomicStore(inst, .SeqCst),
|
||||
.atomic_store_unordered => try self.airAtomicStore(inst, .unordered),
|
||||
.atomic_store_monotonic => try self.airAtomicStore(inst, .monotonic),
|
||||
.atomic_store_release => try self.airAtomicStore(inst, .release),
|
||||
.atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst),
|
||||
|
||||
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
|
||||
.struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1),
|
||||
|
@ -634,10 +634,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.call_never_tail => try self.airCall(inst, .never_tail),
|
||||
.call_never_inline => try self.airCall(inst, .never_inline),
|
||||
|
||||
.atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
|
||||
.atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
|
||||
.atomic_store_release => try self.airAtomicStore(inst, .Release),
|
||||
.atomic_store_seq_cst => try self.airAtomicStore(inst, .SeqCst),
|
||||
.atomic_store_unordered => try self.airAtomicStore(inst, .unordered),
|
||||
.atomic_store_monotonic => try self.airAtomicStore(inst, .monotonic),
|
||||
.atomic_store_release => try self.airAtomicStore(inst, .release),
|
||||
.atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst),
|
||||
|
||||
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
|
||||
.struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1),
|
||||
|
@ -2111,10 +2111,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.call_never_tail => try self.airCall(inst, .never_tail),
|
||||
.call_never_inline => try self.airCall(inst, .never_inline),
|
||||
|
||||
.atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
|
||||
.atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
|
||||
.atomic_store_release => try self.airAtomicStore(inst, .Release),
|
||||
.atomic_store_seq_cst => try self.airAtomicStore(inst, .SeqCst),
|
||||
.atomic_store_unordered => try self.airAtomicStore(inst, .unordered),
|
||||
.atomic_store_monotonic => try self.airAtomicStore(inst, .monotonic),
|
||||
.atomic_store_release => try self.airAtomicStore(inst, .release),
|
||||
.atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst),
|
||||
|
||||
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
|
||||
.struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1),
|
||||
@ -11977,9 +11977,9 @@ fn airFrameAddress(self: *Self, inst: Air.Inst.Index) !void {
|
||||
fn airFence(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const order = self.air.instructions.items(.data)[@intFromEnum(inst)].fence;
|
||||
switch (order) {
|
||||
.Unordered, .Monotonic => unreachable,
|
||||
.Acquire, .Release, .AcqRel => {},
|
||||
.SeqCst => try self.asmOpOnly(.{ ._, .mfence }),
|
||||
.unordered, .monotonic => unreachable,
|
||||
.acquire, .release, .acq_rel => {},
|
||||
.seq_cst => try self.asmOpOnly(.{ ._, .mfence }),
|
||||
}
|
||||
self.finishAirBookkeeping();
|
||||
}
|
||||
@ -15747,9 +15747,9 @@ fn atomicOp(
|
||||
.Xor => .xor,
|
||||
else => unreachable,
|
||||
} else switch (order) {
|
||||
.Unordered, .Monotonic, .Release, .AcqRel => .mov,
|
||||
.Acquire => unreachable,
|
||||
.SeqCst => .xchg,
|
||||
.unordered, .monotonic, .release, .acq_rel => .mov,
|
||||
.acquire => unreachable,
|
||||
.seq_cst => .xchg,
|
||||
};
|
||||
|
||||
const dst_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
||||
|
@ -3278,10 +3278,10 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
|
||||
|
||||
.int_from_ptr => try airIntFromPtr(f, inst),
|
||||
|
||||
.atomic_store_unordered => try airAtomicStore(f, inst, toMemoryOrder(.Unordered)),
|
||||
.atomic_store_monotonic => try airAtomicStore(f, inst, toMemoryOrder(.Monotonic)),
|
||||
.atomic_store_release => try airAtomicStore(f, inst, toMemoryOrder(.Release)),
|
||||
.atomic_store_seq_cst => try airAtomicStore(f, inst, toMemoryOrder(.SeqCst)),
|
||||
.atomic_store_unordered => try airAtomicStore(f, inst, toMemoryOrder(.unordered)),
|
||||
.atomic_store_monotonic => try airAtomicStore(f, inst, toMemoryOrder(.monotonic)),
|
||||
.atomic_store_release => try airAtomicStore(f, inst, toMemoryOrder(.release)),
|
||||
.atomic_store_seq_cst => try airAtomicStore(f, inst, toMemoryOrder(.seq_cst)),
|
||||
|
||||
.struct_field_ptr_index_0 => try airStructFieldPtrIndex(f, inst, 0),
|
||||
.struct_field_ptr_index_1 => try airStructFieldPtrIndex(f, inst, 1),
|
||||
@ -7482,11 +7482,11 @@ fn airCVaCopy(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
fn toMemoryOrder(order: std.builtin.AtomicOrder) [:0]const u8 {
|
||||
return switch (order) {
|
||||
// Note: unordered is actually even less atomic than relaxed
|
||||
.Unordered, .Monotonic => "zig_memory_order_relaxed",
|
||||
.Acquire => "zig_memory_order_acquire",
|
||||
.Release => "zig_memory_order_release",
|
||||
.AcqRel => "zig_memory_order_acq_rel",
|
||||
.SeqCst => "zig_memory_order_seq_cst",
|
||||
.unordered, .monotonic => "zig_memory_order_relaxed",
|
||||
.acquire => "zig_memory_order_acquire",
|
||||
.release => "zig_memory_order_release",
|
||||
.acq_rel => "zig_memory_order_acq_rel",
|
||||
.seq_cst => "zig_memory_order_seq_cst",
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -1278,7 +1278,7 @@ pub const Object = struct {
|
||||
|
||||
const reloc_mode: llvm.RelocMode = if (pic)
|
||||
.PIC
|
||||
else if (self.module.comp.config.link_mode == .Dynamic)
|
||||
else if (self.module.comp.config.link_mode == .dynamic)
|
||||
llvm.RelocMode.DynamicNoPIC
|
||||
else
|
||||
.Static;
|
||||
@ -10801,12 +10801,12 @@ pub const FuncGen = struct {
|
||||
|
||||
fn toLlvmAtomicOrdering(atomic_order: std.builtin.AtomicOrder) Builder.AtomicOrdering {
|
||||
return switch (atomic_order) {
|
||||
.Unordered => .unordered,
|
||||
.Monotonic => .monotonic,
|
||||
.Acquire => .acquire,
|
||||
.Release => .release,
|
||||
.AcqRel => .acq_rel,
|
||||
.SeqCst => .seq_cst,
|
||||
.unordered => .unordered,
|
||||
.monotonic => .monotonic,
|
||||
.acquire => .acquire,
|
||||
.release => .release,
|
||||
.acq_rel => .acq_rel,
|
||||
.seq_cst => .seq_cst,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -8398,7 +8398,7 @@ pub const Metadata = enum(u32) {
|
||||
fmt_str = fmt_str ++ ")\n";
|
||||
|
||||
var fmt_args: @Type(.{ .Struct = .{
|
||||
.layout = .Auto,
|
||||
.layout = .auto,
|
||||
.fields = &fields,
|
||||
.decls = &.{},
|
||||
.is_tuple = false,
|
||||
|
@ -415,8 +415,8 @@ fn BufType(comptime T: type, comptime min_len: usize) type {
|
||||
.Enum => |info| info.tag_type,
|
||||
.Bool => u1,
|
||||
.Struct => |info| switch (info.layout) {
|
||||
.Auto, .Extern => @compileError("Unsupported type: " ++ @typeName(T)),
|
||||
.Packed => std.meta.Int(.unsigned, @bitSizeOf(T)),
|
||||
.auto, .@"extern" => @compileError("Unsupported type: " ++ @typeName(T)),
|
||||
.@"packed" => std.meta.Int(.unsigned, @bitSizeOf(T)),
|
||||
},
|
||||
else => @compileError("Unsupported type: " ++ @typeName(T)),
|
||||
})));
|
||||
|
@ -376,7 +376,7 @@ const PanicSwitch = struct {
|
||||
};
|
||||
state.* = new_state;
|
||||
|
||||
_ = panicking.fetchAdd(1, .SeqCst);
|
||||
_ = panicking.fetchAdd(1, .seq_cst);
|
||||
|
||||
state.recover_stage = .release_ref_count;
|
||||
|
||||
@ -458,7 +458,7 @@ const PanicSwitch = struct {
|
||||
noinline fn releaseRefCount(state: *volatile PanicState) noreturn {
|
||||
state.recover_stage = .abort;
|
||||
|
||||
if (panicking.fetchSub(1, .SeqCst) != 1) {
|
||||
if (panicking.fetchSub(1, .seq_cst) != 1) {
|
||||
// Another thread is panicking, wait for the last one to finish
|
||||
// and call abort()
|
||||
|
||||
|
@ -4145,8 +4145,8 @@ fn progressThread(progress: *std.Progress, server: *const Server, reset: *std.Th
|
||||
buf.appendSlice("... ") catch {};
|
||||
}
|
||||
need_ellipse = false;
|
||||
const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .Monotonic);
|
||||
const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .Monotonic);
|
||||
const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .monotonic);
|
||||
const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .monotonic);
|
||||
const current_item = completed_items + 1;
|
||||
if (node.name.len != 0 or eti > 0) {
|
||||
if (node.name.len != 0) {
|
||||
@ -4163,7 +4163,7 @@ fn progressThread(progress: *std.Progress, server: *const Server, reset: *std.Th
|
||||
need_ellipse = false;
|
||||
}
|
||||
}
|
||||
maybe_node = @atomicLoad(?*std.Progress.Node, &node.recently_updated_child, .Acquire);
|
||||
maybe_node = @atomicLoad(?*std.Progress.Node, &node.recently_updated_child, .acquire);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -303,10 +303,10 @@ const Writer = struct {
|
||||
.fence => try w.writeFence(s, inst),
|
||||
.atomic_load => try w.writeAtomicLoad(s, inst),
|
||||
.prefetch => try w.writePrefetch(s, inst),
|
||||
.atomic_store_unordered => try w.writeAtomicStore(s, inst, .Unordered),
|
||||
.atomic_store_monotonic => try w.writeAtomicStore(s, inst, .Monotonic),
|
||||
.atomic_store_release => try w.writeAtomicStore(s, inst, .Release),
|
||||
.atomic_store_seq_cst => try w.writeAtomicStore(s, inst, .SeqCst),
|
||||
.atomic_store_unordered => try w.writeAtomicStore(s, inst, .unordered),
|
||||
.atomic_store_monotonic => try w.writeAtomicStore(s, inst, .monotonic),
|
||||
.atomic_store_release => try w.writeAtomicStore(s, inst, .release),
|
||||
.atomic_store_seq_cst => try w.writeAtomicStore(s, inst, .seq_cst),
|
||||
.atomic_rmw => try w.writeAtomicRmw(s, inst),
|
||||
.field_parent_ptr => try w.writeFieldParentPtr(s, inst),
|
||||
.wasm_memory_size => try w.writeWasmMemorySize(s, inst),
|
||||
|
@ -22,18 +22,18 @@ test "cmpxchg" {
|
||||
|
||||
fn testCmpxchg() !void {
|
||||
var x: i32 = 1234;
|
||||
if (@cmpxchgWeak(i32, &x, 99, 5678, .SeqCst, .SeqCst)) |x1| {
|
||||
if (@cmpxchgWeak(i32, &x, 99, 5678, .seq_cst, .seq_cst)) |x1| {
|
||||
try expect(x1 == 1234);
|
||||
} else {
|
||||
@panic("cmpxchg should have failed");
|
||||
}
|
||||
|
||||
while (@cmpxchgWeak(i32, &x, 1234, 5678, .SeqCst, .SeqCst)) |x1| {
|
||||
while (@cmpxchgWeak(i32, &x, 1234, 5678, .seq_cst, .seq_cst)) |x1| {
|
||||
try expect(x1 == 1234);
|
||||
}
|
||||
try expect(x == 5678);
|
||||
|
||||
try expect(@cmpxchgStrong(i32, &x, 5678, 42, .SeqCst, .SeqCst) == null);
|
||||
try expect(@cmpxchgStrong(i32, &x, 5678, 42, .seq_cst, .seq_cst) == null);
|
||||
try expect(x == 42);
|
||||
}
|
||||
|
||||
@ -43,7 +43,7 @@ test "fence" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
var x: i32 = 1234;
|
||||
@fence(.SeqCst);
|
||||
@fence(.seq_cst);
|
||||
x = 5678;
|
||||
}
|
||||
|
||||
@ -60,18 +60,18 @@ test "atomicrmw and atomicload" {
|
||||
}
|
||||
|
||||
fn testAtomicRmw(ptr: *u8) !void {
|
||||
const prev_value = @atomicRmw(u8, ptr, .Xchg, 42, .SeqCst);
|
||||
const prev_value = @atomicRmw(u8, ptr, .Xchg, 42, .seq_cst);
|
||||
try expect(prev_value == 200);
|
||||
comptime {
|
||||
var x: i32 = 1234;
|
||||
const y: i32 = 12345;
|
||||
try expect(@atomicLoad(i32, &x, .SeqCst) == 1234);
|
||||
try expect(@atomicLoad(i32, &y, .SeqCst) == 12345);
|
||||
try expect(@atomicLoad(i32, &x, .seq_cst) == 1234);
|
||||
try expect(@atomicLoad(i32, &y, .seq_cst) == 12345);
|
||||
}
|
||||
}
|
||||
|
||||
fn testAtomicLoad(ptr: *u8) !void {
|
||||
const x = @atomicLoad(u8, ptr, .SeqCst);
|
||||
const x = @atomicLoad(u8, ptr, .seq_cst);
|
||||
try expect(x == 42);
|
||||
}
|
||||
|
||||
@ -85,18 +85,18 @@ test "cmpxchg with ptr" {
|
||||
var data2: i32 = 5678;
|
||||
var data3: i32 = 9101;
|
||||
var x: *i32 = &data1;
|
||||
if (@cmpxchgWeak(*i32, &x, &data2, &data3, .SeqCst, .SeqCst)) |x1| {
|
||||
if (@cmpxchgWeak(*i32, &x, &data2, &data3, .seq_cst, .seq_cst)) |x1| {
|
||||
try expect(x1 == &data1);
|
||||
} else {
|
||||
@panic("cmpxchg should have failed");
|
||||
}
|
||||
|
||||
while (@cmpxchgWeak(*i32, &x, &data1, &data3, .SeqCst, .SeqCst)) |x1| {
|
||||
while (@cmpxchgWeak(*i32, &x, &data1, &data3, .seq_cst, .seq_cst)) |x1| {
|
||||
try expect(x1 == &data1);
|
||||
}
|
||||
try expect(x == &data3);
|
||||
|
||||
try expect(@cmpxchgStrong(*i32, &x, &data3, &data2, .SeqCst, .SeqCst) == null);
|
||||
try expect(@cmpxchgStrong(*i32, &x, &data3, &data2, .seq_cst, .seq_cst) == null);
|
||||
try expect(x == &data2);
|
||||
}
|
||||
|
||||
@ -108,7 +108,7 @@ test "cmpxchg with ignored result" {
|
||||
|
||||
var x: i32 = 1234;
|
||||
|
||||
_ = @cmpxchgStrong(i32, &x, 1234, 5678, .Monotonic, .Monotonic);
|
||||
_ = @cmpxchgStrong(i32, &x, 1234, 5678, .monotonic, .monotonic);
|
||||
|
||||
try expect(5678 == x);
|
||||
}
|
||||
@ -127,18 +127,18 @@ test "128-bit cmpxchg" {
|
||||
|
||||
fn test_u128_cmpxchg() !void {
|
||||
var x: u128 align(16) = 1234;
|
||||
if (@cmpxchgWeak(u128, &x, 99, 5678, .SeqCst, .SeqCst)) |x1| {
|
||||
if (@cmpxchgWeak(u128, &x, 99, 5678, .seq_cst, .seq_cst)) |x1| {
|
||||
try expect(x1 == 1234);
|
||||
} else {
|
||||
@panic("cmpxchg should have failed");
|
||||
}
|
||||
|
||||
while (@cmpxchgWeak(u128, &x, 1234, 5678, .SeqCst, .SeqCst)) |x1| {
|
||||
while (@cmpxchgWeak(u128, &x, 1234, 5678, .seq_cst, .seq_cst)) |x1| {
|
||||
try expect(x1 == 1234);
|
||||
}
|
||||
try expect(x == 5678);
|
||||
|
||||
try expect(@cmpxchgStrong(u128, &x, 5678, 42, .SeqCst, .SeqCst) == null);
|
||||
try expect(@cmpxchgStrong(u128, &x, 5678, 42, .seq_cst, .seq_cst) == null);
|
||||
try expect(x == 42);
|
||||
}
|
||||
|
||||
@ -155,7 +155,7 @@ test "cmpxchg on a global variable" {
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
|
||||
_ = @cmpxchgWeak(u32, &a_global_variable, 1234, 42, .Acquire, .Monotonic);
|
||||
_ = @cmpxchgWeak(u32, &a_global_variable, 1234, 42, .acquire, .monotonic);
|
||||
try expect(a_global_variable == 42);
|
||||
}
|
||||
|
||||
@ -168,12 +168,12 @@ test "atomic load and rmw with enum" {
|
||||
const Value = enum(u8) { a, b, c };
|
||||
var x = Value.a;
|
||||
|
||||
try expect(@atomicLoad(Value, &x, .SeqCst) != .b);
|
||||
try expect(@atomicLoad(Value, &x, .seq_cst) != .b);
|
||||
|
||||
_ = @atomicRmw(Value, &x, .Xchg, .c, .SeqCst);
|
||||
try expect(@atomicLoad(Value, &x, .SeqCst) == .c);
|
||||
try expect(@atomicLoad(Value, &x, .SeqCst) != .a);
|
||||
try expect(@atomicLoad(Value, &x, .SeqCst) != .b);
|
||||
_ = @atomicRmw(Value, &x, .Xchg, .c, .seq_cst);
|
||||
try expect(@atomicLoad(Value, &x, .seq_cst) == .c);
|
||||
try expect(@atomicLoad(Value, &x, .seq_cst) != .a);
|
||||
try expect(@atomicLoad(Value, &x, .seq_cst) != .b);
|
||||
}
|
||||
|
||||
test "atomic store" {
|
||||
@ -183,10 +183,10 @@ test "atomic store" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
var x: u32 = 0;
|
||||
@atomicStore(u32, &x, 1, .SeqCst);
|
||||
try expect(@atomicLoad(u32, &x, .SeqCst) == 1);
|
||||
@atomicStore(u32, &x, 12345678, .SeqCst);
|
||||
try expect(@atomicLoad(u32, &x, .SeqCst) == 12345678);
|
||||
@atomicStore(u32, &x, 1, .seq_cst);
|
||||
try expect(@atomicLoad(u32, &x, .seq_cst) == 1);
|
||||
@atomicStore(u32, &x, 12345678, .seq_cst);
|
||||
try expect(@atomicLoad(u32, &x, .seq_cst) == 12345678);
|
||||
}
|
||||
|
||||
test "atomic store comptime" {
|
||||
@ -201,10 +201,10 @@ test "atomic store comptime" {
|
||||
|
||||
fn testAtomicStore() !void {
|
||||
var x: u32 = 0;
|
||||
@atomicStore(u32, &x, 1, .SeqCst);
|
||||
try expect(@atomicLoad(u32, &x, .SeqCst) == 1);
|
||||
@atomicStore(u32, &x, 12345678, .SeqCst);
|
||||
try expect(@atomicLoad(u32, &x, .SeqCst) == 12345678);
|
||||
@atomicStore(u32, &x, 1, .seq_cst);
|
||||
try expect(@atomicLoad(u32, &x, .seq_cst) == 1);
|
||||
@atomicStore(u32, &x, 12345678, .seq_cst);
|
||||
try expect(@atomicLoad(u32, &x, .seq_cst) == 12345678);
|
||||
}
|
||||
|
||||
test "atomicrmw with floats" {
|
||||
@ -224,15 +224,15 @@ test "atomicrmw with floats" {
|
||||
fn testAtomicRmwFloat() !void {
|
||||
var x: f32 = 0;
|
||||
try expect(x == 0);
|
||||
_ = @atomicRmw(f32, &x, .Xchg, 1, .SeqCst);
|
||||
_ = @atomicRmw(f32, &x, .Xchg, 1, .seq_cst);
|
||||
try expect(x == 1);
|
||||
_ = @atomicRmw(f32, &x, .Add, 5, .SeqCst);
|
||||
_ = @atomicRmw(f32, &x, .Add, 5, .seq_cst);
|
||||
try expect(x == 6);
|
||||
_ = @atomicRmw(f32, &x, .Sub, 2, .SeqCst);
|
||||
_ = @atomicRmw(f32, &x, .Sub, 2, .seq_cst);
|
||||
try expect(x == 4);
|
||||
_ = @atomicRmw(f32, &x, .Max, 13, .SeqCst);
|
||||
_ = @atomicRmw(f32, &x, .Max, 13, .seq_cst);
|
||||
try expect(x == 13);
|
||||
_ = @atomicRmw(f32, &x, .Min, 42, .SeqCst);
|
||||
_ = @atomicRmw(f32, &x, .Min, 42, .seq_cst);
|
||||
try expect(x == 13);
|
||||
}
|
||||
|
||||
@ -266,46 +266,46 @@ fn testAtomicRmwInt(comptime signedness: std.builtin.Signedness, comptime N: usi
|
||||
const int = std.meta.Int(signedness, N);
|
||||
|
||||
var x: int = 1;
|
||||
var res = @atomicRmw(int, &x, .Xchg, 3, .SeqCst);
|
||||
var res = @atomicRmw(int, &x, .Xchg, 3, .seq_cst);
|
||||
try expect(x == 3 and res == 1);
|
||||
|
||||
res = @atomicRmw(int, &x, .Add, 3, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Add, 3, .seq_cst);
|
||||
var y: int = 3;
|
||||
try expect(res == y);
|
||||
y = y + 3;
|
||||
try expect(x == y);
|
||||
|
||||
res = @atomicRmw(int, &x, .Sub, 1, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Sub, 1, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = y - 1;
|
||||
try expect(x == y);
|
||||
|
||||
res = @atomicRmw(int, &x, .And, 4, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .And, 4, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = y & 4;
|
||||
try expect(x == y);
|
||||
|
||||
res = @atomicRmw(int, &x, .Nand, 4, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Nand, 4, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = ~(y & 4);
|
||||
try expect(x == y);
|
||||
|
||||
res = @atomicRmw(int, &x, .Or, 6, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Or, 6, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = y | 6;
|
||||
try expect(x == y);
|
||||
|
||||
res = @atomicRmw(int, &x, .Xor, 2, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Xor, 2, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = y ^ 2;
|
||||
try expect(x == y);
|
||||
|
||||
res = @atomicRmw(int, &x, .Max, 1, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Max, 1, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = @max(y, 1);
|
||||
try expect(x == y);
|
||||
|
||||
res = @atomicRmw(int, &x, .Min, 1, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Min, 1, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = @min(y, 1);
|
||||
try expect(x == y);
|
||||
@ -333,53 +333,53 @@ fn testAtomicRmwInt128(comptime signedness: std.builtin.Signedness) !void {
|
||||
const replacement: int = 0x00000000_00000005_00000000_00000003;
|
||||
|
||||
var x: int align(16) = initial;
|
||||
var res = @atomicRmw(int, &x, .Xchg, replacement, .SeqCst);
|
||||
var res = @atomicRmw(int, &x, .Xchg, replacement, .seq_cst);
|
||||
try expect(x == replacement and res == initial);
|
||||
|
||||
var operator: int = 0x00000001_00000000_20000000_00000000;
|
||||
res = @atomicRmw(int, &x, .Add, operator, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Add, operator, .seq_cst);
|
||||
var y: int = replacement;
|
||||
try expect(res == y);
|
||||
y = y + operator;
|
||||
try expect(x == y);
|
||||
|
||||
operator = 0x00000000_10000000_00000000_20000000;
|
||||
res = @atomicRmw(int, &x, .Sub, operator, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Sub, operator, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = y - operator;
|
||||
try expect(x == y);
|
||||
|
||||
operator = 0x12345678_87654321_12345678_87654321;
|
||||
res = @atomicRmw(int, &x, .And, operator, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .And, operator, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = y & operator;
|
||||
try expect(x == y);
|
||||
|
||||
operator = 0x00000000_10000000_00000000_20000000;
|
||||
res = @atomicRmw(int, &x, .Nand, operator, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Nand, operator, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = ~(y & operator);
|
||||
try expect(x == y);
|
||||
|
||||
operator = 0x12340000_56780000_67890000_98760000;
|
||||
res = @atomicRmw(int, &x, .Or, operator, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Or, operator, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = y | operator;
|
||||
try expect(x == y);
|
||||
|
||||
operator = 0x0a0b0c0d_0e0f0102_03040506_0708090a;
|
||||
res = @atomicRmw(int, &x, .Xor, operator, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Xor, operator, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = y ^ operator;
|
||||
try expect(x == y);
|
||||
|
||||
operator = 0x00000000_10000000_00000000_20000000;
|
||||
res = @atomicRmw(int, &x, .Max, operator, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Max, operator, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = @max(y, operator);
|
||||
try expect(x == y);
|
||||
|
||||
res = @atomicRmw(int, &x, .Min, operator, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Min, operator, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = @min(y, operator);
|
||||
try expect(x == y);
|
||||
@ -405,13 +405,13 @@ test "atomics with different types" {
|
||||
|
||||
fn testAtomicsWithType(comptime T: type, a: T, b: T) !void {
|
||||
var x: T = b;
|
||||
@atomicStore(T, &x, a, .SeqCst);
|
||||
@atomicStore(T, &x, a, .seq_cst);
|
||||
try expect(x == a);
|
||||
try expect(@atomicLoad(T, &x, .SeqCst) == a);
|
||||
try expect(@atomicRmw(T, &x, .Xchg, b, .SeqCst) == a);
|
||||
try expect(@cmpxchgStrong(T, &x, b, a, .SeqCst, .SeqCst) == null);
|
||||
try expect(@atomicLoad(T, &x, .seq_cst) == a);
|
||||
try expect(@atomicRmw(T, &x, .Xchg, b, .seq_cst) == a);
|
||||
try expect(@cmpxchgStrong(T, &x, b, a, .seq_cst, .seq_cst) == null);
|
||||
if (@sizeOf(T) != 0)
|
||||
try expect(@cmpxchgStrong(T, &x, b, a, .SeqCst, .SeqCst).? == a);
|
||||
try expect(@cmpxchgStrong(T, &x, b, a, .seq_cst, .seq_cst).? == a);
|
||||
}
|
||||
|
||||
test "return @atomicStore, using it as a void value" {
|
||||
@ -425,12 +425,12 @@ test "return @atomicStore, using it as a void value" {
|
||||
value: usize,
|
||||
|
||||
pub fn store(self: *A, value: usize) void {
|
||||
return @atomicStore(usize, &self.value, value, .Unordered);
|
||||
return @atomicStore(usize, &self.value, value, .unordered);
|
||||
}
|
||||
|
||||
pub fn store2(self: *A, value: usize) void {
|
||||
return switch (value) {
|
||||
else => @atomicStore(usize, &self.value, value, .Unordered),
|
||||
else => @atomicStore(usize, &self.value, value, .unordered),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
@ -14,10 +14,10 @@ test {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
var val: u8 = undefined;
|
||||
try testing.expectEqual({}, @atomicStore(u8, &val, 0, .Unordered));
|
||||
try testing.expectEqual({}, @atomicStore(u8, &val, 0, .unordered));
|
||||
try testing.expectEqual(void, @TypeOf(@breakpoint()));
|
||||
try testing.expectEqual({}, @export(x, .{ .name = "x" }));
|
||||
try testing.expectEqual({}, @fence(.Acquire));
|
||||
try testing.expectEqual({}, @fence(.acquire));
|
||||
try testing.expectEqual({}, @memcpy(@as([*]u8, @ptrFromInt(1))[0..0], @as([*]u8, @ptrFromInt(1))[0..0]));
|
||||
try testing.expectEqual({}, @memset(@as([*]u8, @ptrFromInt(1))[0..0], undefined));
|
||||
try testing.expectEqual(noreturn, @TypeOf(if (true) @panic("") else {}));
|
||||
|
@ -1,10 +1,10 @@
|
||||
export fn entry() void {
|
||||
var x: u32 = 0;
|
||||
@atomicStore(u32, &x, 1, .Acquire);
|
||||
@atomicStore(u32, &x, 1, .acquire);
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :3:31: error: @atomicStore atomic ordering must not be Acquire or AcqRel
|
||||
// :3:31: error: @atomicStore atomic ordering must not be acquire or acq_rel
|
||||
|
@ -1,7 +1,7 @@
|
||||
const AtomicOrder = @import("std").builtin.AtomicOrder;
|
||||
export fn f() void {
|
||||
var x: i32 = 1234;
|
||||
while (!@cmpxchgWeak(i32, &x, 1234, 5678, AtomicOrder.Monotonic, AtomicOrder.SeqCst)) {}
|
||||
while (!@cmpxchgWeak(i32, &x, 1234, 5678, AtomicOrder.monotonic, AtomicOrder.seq_cst)) {}
|
||||
}
|
||||
|
||||
// error
|
||||
|
@ -1,11 +1,11 @@
|
||||
const AtomicOrder = @import("std").builtin.AtomicOrder;
|
||||
export fn f() void {
|
||||
var x: i32 = 1234;
|
||||
while (!@cmpxchgWeak(i32, &x, 1234, 5678, AtomicOrder.Unordered, AtomicOrder.Unordered)) {}
|
||||
while (!@cmpxchgWeak(i32, &x, 1234, 5678, AtomicOrder.unordered, AtomicOrder.unordered)) {}
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :4:58: error: success atomic ordering must be Monotonic or stricter
|
||||
// :4:58: error: success atomic ordering must be monotonic or stricter
|
||||
|
@ -1,9 +1,9 @@
|
||||
export fn entry() void {
|
||||
@fence(.Monotonic);
|
||||
@fence(.monotonic);
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :2:13: error: atomic ordering must be Acquire or stricter
|
||||
// :2:13: error: atomic ordering must be acquire or stricter
|
||||
|
@ -1,6 +1,6 @@
|
||||
export fn entry() void {
|
||||
var x = false;
|
||||
_ = @atomicRmw(bool, &x, .Add, true, .SeqCst);
|
||||
_ = @atomicRmw(bool, &x, .Add, true, .seq_cst);
|
||||
}
|
||||
|
||||
// error
|
||||
|
@ -6,7 +6,7 @@ export fn entry() void {
|
||||
d,
|
||||
};
|
||||
var x: E = .a;
|
||||
_ = @atomicRmw(E, &x, .Add, .b, .SeqCst);
|
||||
_ = @atomicRmw(E, &x, .Add, .b, .seq_cst);
|
||||
}
|
||||
|
||||
// error
|
||||
|
@ -1,6 +1,6 @@
|
||||
export fn entry() void {
|
||||
var x: f32 = 0;
|
||||
_ = @atomicRmw(f32, &x, .And, 2, .SeqCst);
|
||||
_ = @atomicRmw(f32, &x, .And, 2, .seq_cst);
|
||||
}
|
||||
|
||||
// error
|
||||
|
@ -1,6 +1,6 @@
|
||||
export fn entry() void {
|
||||
var x: f32 = 0;
|
||||
_ = @cmpxchgWeak(f32, &x, 1, 2, .SeqCst, .SeqCst);
|
||||
_ = @cmpxchgWeak(f32, &x, 1, 2, .seq_cst, .seq_cst);
|
||||
}
|
||||
|
||||
// error
|
||||
|
@ -1,7 +1,7 @@
|
||||
const AtomicOrder = @import("std").builtin.AtomicOrder;
|
||||
export fn entry() bool {
|
||||
var x: i32 align(1) = 1234;
|
||||
while (!@cmpxchgWeak(i32, &x, 1234, 5678, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) {}
|
||||
while (!@cmpxchgWeak(i32, &x, 1234, 5678, AtomicOrder.seq_cst, AtomicOrder.seq_cst)) {}
|
||||
return x == 5678;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user