std.Thread.Mutex: change API to lock() and unlock()

This is a breaking change. Before, usage looked like this:

```zig
const held = mutex.acquire();
defer held.release();
```

Now it looks like this:

```zig
mutex.lock();
defer mutex.unlock();
```

The `Held` type was an idea to make mutexes slightly safer by making it
more difficult to forget to release an aquired lock. However, this
ultimately caused more problems than it solved, when any data structures
needed to store a held mutex. Simplify everything by reducing the API
down to the primitives: lock() and unlock().

Closes #8051
Closes #8246
Closes #10105
This commit is contained in:
Andrew Kelley 2021-11-09 18:27:12 -07:00
parent 65e518e8e8
commit 008b0ec5e5
18 changed files with 141 additions and 180 deletions

View File

@ -56,7 +56,7 @@ done: bool = true,
/// Protects the `refresh` function, as well as `node.recently_updated_child`.
/// Without this, callsites would call `Node.end` and then free `Node` memory
/// while it was still being accessed by the `refresh` function.
update_lock: std.Thread.Mutex = .{},
update_mutex: std.Thread.Mutex = .{},
/// Keeps track of how many columns in the terminal have been output, so that
/// we can move the cursor back later.
@ -103,14 +103,14 @@ pub const Node = struct {
self.context.maybeRefresh();
if (self.parent) |parent| {
{
const held = self.context.update_lock.acquire();
defer held.release();
self.context.update_mutex.lock();
defer self.context.update_mutex.unlock();
_ = @cmpxchgStrong(?*Node, &parent.recently_updated_child, self, null, .Monotonic, .Monotonic);
}
parent.completeOne();
} else {
const held = self.context.update_lock.acquire();
defer held.release();
self.context.update_mutex.lock();
defer self.context.update_mutex.unlock();
self.context.done = true;
self.context.refreshWithHeldLock();
}
@ -170,8 +170,8 @@ pub fn start(self: *Progress, name: []const u8, estimated_total_items: usize) !*
pub fn maybeRefresh(self: *Progress) void {
const now = self.timer.read();
if (now < self.initial_delay_ns) return;
const held = self.update_lock.tryAcquire() orelse return;
defer held.release();
if (!self.update_mutex.tryLock()) return;
defer self.update_mutex.unlock();
// TODO I have observed this to happen sometimes. I think we need to follow Rust's
// lead and guarantee monotonically increasing times in the std lib itself.
if (now < self.prev_refresh_timestamp) return;
@ -181,8 +181,8 @@ pub fn maybeRefresh(self: *Progress) void {
/// Updates the terminal and resets `self.next_refresh_timestamp`. Thread-safe.
pub fn refresh(self: *Progress) void {
const held = self.update_lock.tryAcquire() orelse return;
defer held.release();
if (!self.update_mutex.tryLock()) return;
defer self.update_mutex.unlock();
return self.refreshWithHeldLock();
}

View File

@ -145,8 +145,8 @@ pub const AtomicCondition = struct {
var waiter = QueueList.Node{ .data = .{} };
{
const held = cond.queue_mutex.acquire();
defer held.release();
cond.queue_mutex.lock();
defer cond.queue_mutex.unlock();
cond.queue_list.prepend(&waiter);
@atomicStore(bool, &cond.pending, true, .SeqCst);
@ -162,8 +162,8 @@ pub const AtomicCondition = struct {
return;
const maybe_waiter = blk: {
const held = cond.queue_mutex.acquire();
defer held.release();
cond.queue_mutex.lock();
defer cond.queue_mutex.unlock();
const maybe_waiter = cond.queue_list.popFirst();
@atomicStore(bool, &cond.pending, cond.queue_list.first != null, .SeqCst);
@ -181,8 +181,8 @@ pub const AtomicCondition = struct {
@atomicStore(bool, &cond.pending, false, .SeqCst);
var waiters = blk: {
const held = cond.queue_mutex.acquire();
defer held.release();
cond.queue_mutex.lock();
defer cond.queue_mutex.unlock();
const waiters = cond.queue_list;
cond.queue_list = .{};

View File

@ -8,13 +8,13 @@
//! Example usage:
//! var m = Mutex{};
//!
//! const lock = m.acquire();
//! defer lock.release();
//! m.lock();
//! defer m.release();
//! ... critical code
//!
//! Non-blocking:
//! if (m.tryAcquire) |lock| {
//! defer lock.release();
//! if (m.tryLock()) {
//! defer m.unlock();
//! // ... critical section
//! } else {
//! // ... lock not acquired
@ -32,30 +32,22 @@ const linux = os.linux;
const testing = std.testing;
const StaticResetEvent = std.thread.StaticResetEvent;
/// Try to acquire the mutex without blocking. Returns `null` if the mutex is
/// unavailable. Otherwise returns `Held`. Call `release` on `Held`, or use
/// releaseDirect().
pub fn tryAcquire(m: *Mutex) ?Held {
return m.impl.tryAcquire();
/// Try to acquire the mutex without blocking. Returns `false` if the mutex is
/// unavailable. Otherwise returns `true`. Call `unlock` on the mutex to release.
pub fn tryLock(m: *Mutex) bool {
return m.impl.tryLock();
}
/// Acquire the mutex. Deadlocks if the mutex is already
/// held by the calling thread.
pub fn acquire(m: *Mutex) Held {
return m.impl.acquire();
pub fn lock(m: *Mutex) void {
m.impl.lock();
}
/// Release the mutex. Prefer Held.release() if available.
pub fn releaseDirect(m: *Mutex) void {
return m.impl.releaseDirect();
pub fn unlock(m: *Mutex) void {
m.impl.unlock();
}
/// A held mutex handle. Call release to allow other threads to
/// take the mutex. Do not call release() more than once.
/// For more complex scenarios, this handle can be discarded
/// and Mutex.releaseDirect can be called instead.
pub const Held = Impl.Held;
const Impl = if (builtin.single_threaded)
Dummy
else if (builtin.os.tag == .windows)
@ -65,32 +57,6 @@ else if (std.Thread.use_pthreads)
else
AtomicMutex;
fn HeldInterface(comptime MutexType: type) type {
return struct {
const Mixin = @This();
pub const Held = struct {
mutex: *MutexType,
pub fn release(held: Mixin.Held) void {
held.mutex.releaseDirect();
}
};
pub fn tryAcquire(m: *MutexType) ?Mixin.Held {
if (m.tryAcquireDirect()) {
return Mixin.Held{ .mutex = m };
} else {
return null;
}
}
pub fn acquire(m: *MutexType) Mixin.Held {
m.acquireDirect();
return Mixin.Held{ .mutex = m };
}
};
}
pub const AtomicMutex = struct {
state: State = .unlocked,
@ -100,9 +66,7 @@ pub const AtomicMutex = struct {
waiting,
};
pub usingnamespace HeldInterface(@This());
fn tryAcquireDirect(m: *AtomicMutex) bool {
pub fn tryLock(m: *AtomicMutex) bool {
return @cmpxchgStrong(
State,
&m.state,
@ -113,14 +77,14 @@ pub const AtomicMutex = struct {
) == null;
}
fn acquireDirect(m: *AtomicMutex) void {
pub fn lock(m: *AtomicMutex) void {
switch (@atomicRmw(State, &m.state, .Xchg, .locked, .Acquire)) {
.unlocked => {},
else => |s| m.lockSlow(s),
}
}
fn releaseDirect(m: *AtomicMutex) void {
pub fn unlock(m: *AtomicMutex) void {
switch (@atomicRmw(State, &m.state, .Xchg, .unlocked, .Release)) {
.unlocked => unreachable,
.locked => {},
@ -202,18 +166,16 @@ pub const AtomicMutex = struct {
pub const PthreadMutex = struct {
pthread_mutex: std.c.pthread_mutex_t = .{},
pub usingnamespace HeldInterface(@This());
/// Try to acquire the mutex without blocking. Returns true if
/// the mutex is unavailable. Otherwise returns false. Call
/// release when done.
fn tryAcquireDirect(m: *PthreadMutex) bool {
pub fn tryLock(m: *PthreadMutex) bool {
return std.c.pthread_mutex_trylock(&m.pthread_mutex) == .SUCCESS;
}
/// Acquire the mutex. Will deadlock if the mutex is already
/// held by the calling thread.
fn acquireDirect(m: *PthreadMutex) void {
pub fn lock(m: *PthreadMutex) void {
switch (std.c.pthread_mutex_lock(&m.pthread_mutex)) {
.SUCCESS => {},
.INVAL => unreachable,
@ -225,7 +187,7 @@ pub const PthreadMutex = struct {
}
}
fn releaseDirect(m: *PthreadMutex) void {
pub fn unlock(m: *PthreadMutex) void {
switch (std.c.pthread_mutex_unlock(&m.pthread_mutex)) {
.SUCCESS => return,
.INVAL => unreachable,
@ -239,51 +201,47 @@ pub const PthreadMutex = struct {
/// This has the sematics as `Mutex`, however it does not actually do any
/// synchronization. Operations are safety-checked no-ops.
pub const Dummy = struct {
lock: @TypeOf(lock_init) = lock_init,
pub usingnamespace HeldInterface(@This());
locked: @TypeOf(lock_init) = lock_init,
const lock_init = if (std.debug.runtime_safety) false else {};
/// Try to acquire the mutex without blocking. Returns false if
/// the mutex is unavailable. Otherwise returns true.
fn tryAcquireDirect(m: *Dummy) bool {
pub fn tryLock(m: *Dummy) bool {
if (std.debug.runtime_safety) {
if (m.lock) return false;
m.lock = true;
if (m.locked) return false;
m.locked = true;
}
return true;
}
/// Acquire the mutex. Will deadlock if the mutex is already
/// held by the calling thread.
fn acquireDirect(m: *Dummy) void {
if (!m.tryAcquireDirect()) {
pub fn lock(m: *Dummy) void {
if (!m.tryLock()) {
@panic("deadlock detected");
}
}
fn releaseDirect(m: *Dummy) void {
pub fn unlock(m: *Dummy) void {
if (std.debug.runtime_safety) {
m.lock = false;
m.locked = false;
}
}
};
const WindowsMutex = struct {
pub const WindowsMutex = struct {
srwlock: windows.SRWLOCK = windows.SRWLOCK_INIT,
pub usingnamespace HeldInterface(@This());
fn tryAcquireDirect(m: *WindowsMutex) bool {
pub fn tryLock(m: *WindowsMutex) bool {
return windows.kernel32.TryAcquireSRWLockExclusive(&m.srwlock) != windows.FALSE;
}
fn acquireDirect(m: *WindowsMutex) void {
pub fn lock(m: *WindowsMutex) void {
windows.kernel32.AcquireSRWLockExclusive(&m.srwlock);
}
fn releaseDirect(m: *WindowsMutex) void {
pub fn unlock(m: *WindowsMutex) void {
windows.kernel32.ReleaseSRWLockExclusive(&m.srwlock);
}
};
@ -322,8 +280,8 @@ test "basic usage" {
fn worker(ctx: *TestContext) void {
var i: usize = 0;
while (i != TestContext.incr_count) : (i += 1) {
const held = ctx.mutex.acquire();
defer held.release();
ctx.mutex.lock();
defer ctx.mutex.unlock();
ctx.data += 1;
}

View File

@ -13,8 +13,8 @@ const Mutex = std.Thread.Mutex;
const Condition = std.Thread.Condition;
pub fn wait(sem: *Semaphore) void {
const held = sem.mutex.acquire();
defer held.release();
sem.mutex.lock();
defer sem.mutex.unlock();
while (sem.permits == 0)
sem.cond.wait(&sem.mutex);
@ -25,8 +25,8 @@ pub fn wait(sem: *Semaphore) void {
}
pub fn post(sem: *Semaphore) void {
const held = sem.mutex.acquire();
defer held.release();
sem.mutex.lock();
defer sem.mutex.unlock();
sem.permits += 1;
sem.cond.signal();

View File

@ -31,8 +31,8 @@ pub fn Queue(comptime T: type) type {
pub fn put(self: *Self, node: *Node) void {
node.next = null;
const held = self.mutex.acquire();
defer held.release();
self.mutex.lock();
defer self.mutex.unlock();
node.prev = self.tail;
self.tail = node;
@ -48,8 +48,8 @@ pub fn Queue(comptime T: type) type {
/// It is safe to `get()` a node from the queue while another thread tries
/// to `remove()` the same node at the same time.
pub fn get(self: *Self) ?*Node {
const held = self.mutex.acquire();
defer held.release();
self.mutex.lock();
defer self.mutex.unlock();
const head = self.head orelse return null;
self.head = head.next;
@ -67,8 +67,8 @@ pub fn Queue(comptime T: type) type {
pub fn unget(self: *Self, node: *Node) void {
node.prev = null;
const held = self.mutex.acquire();
defer held.release();
self.mutex.lock();
defer self.mutex.unlock();
const opt_head = self.head;
self.head = node;
@ -84,8 +84,8 @@ pub fn Queue(comptime T: type) type {
/// It is safe to `remove()` a node from the queue while another thread tries
/// to `get()` the same node at the same time.
pub fn remove(self: *Self, node: *Node) bool {
const held = self.mutex.acquire();
defer held.release();
self.mutex.lock();
defer self.mutex.unlock();
if (node.prev == null and node.next == null and self.head != node) {
return false;
@ -110,8 +110,8 @@ pub fn Queue(comptime T: type) type {
/// Note that in a multi-consumer environment a return value of `false`
/// does not mean that `get` will yield a non-`null` value!
pub fn isEmpty(self: *Self) bool {
const held = self.mutex.acquire();
defer held.release();
self.mutex.lock();
defer self.mutex.unlock();
return self.head == null;
}
@ -144,8 +144,8 @@ pub fn Queue(comptime T: type) type {
}
}
};
const held = self.mutex.acquire();
defer held.release();
self.mutex.lock();
defer self.mutex.unlock();
try stream.print("head: ", .{});
try S.dumpRecursive(stream, self.head, 0, 4);

View File

@ -62,8 +62,8 @@ pub const warn = print;
/// Print to stderr, unbuffered, and silently returning on failure. Intended
/// for use in "printf debugging." Use `std.log` functions for proper logging.
pub fn print(comptime fmt: []const u8, args: anytype) void {
const held = stderr_mutex.acquire();
defer held.release();
stderr_mutex.lock();
defer stderr_mutex.unlock();
const stderr = io.getStdErr().writer();
nosuspend stderr.print(fmt, args) catch return;
}
@ -286,8 +286,8 @@ pub fn panicImpl(trace: ?*const std.builtin.StackTrace, first_trace_addr: ?usize
// Make sure to release the mutex when done
{
const held = panic_mutex.acquire();
defer held.release();
panic_mutex.lock();
defer panic_mutex.unlock();
const stderr = io.getStdErr().writer();
if (builtin.single_threaded) {

View File

@ -32,7 +32,7 @@ pub const Lock = struct {
}
pub fn acquire(self: *Lock) Held {
const held = self.mutex.acquire();
self.mutex.lock();
// self.head transitions from multiple stages depending on the value:
// UNLOCKED -> LOCKED:
@ -44,7 +44,7 @@ pub const Lock = struct {
if (self.head == UNLOCKED) {
self.head = LOCKED;
held.release();
self.mutex.unlock();
return Held{ .lock = self };
}
@ -71,7 +71,7 @@ pub const Lock = struct {
.next = undefined,
.data = @frame(),
};
held.release();
self.mutex.unlock();
}
return Held{ .lock = self };
@ -82,8 +82,8 @@ pub const Lock = struct {
pub fn release(self: Held) void {
const waiter = blk: {
const held = self.lock.mutex.acquire();
defer held.release();
self.lock.mutex.lock();
defer self.lock.mutex.unlock();
// self.head goes through the reverse transition from acquire():
// <head ptr> -> <new head ptr>:

View File

@ -925,8 +925,8 @@ pub const Loop = struct {
}
fn peekExpiringEntry(self: *Waiters) ?*Entry {
const held = self.entries.mutex.acquire();
defer held.release();
self.entries.mutex.lock();
defer self.entries.mutex.unlock();
// starting from the head
var head = self.entries.head orelse return null;

View File

@ -615,8 +615,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
) Error!usize {
const self = @fieldParentPtr(Self, "allocator", allocator);
const held = self.mutex.acquire();
defer held.release();
self.mutex.lock();
defer self.mutex.unlock();
assert(old_mem.len != 0);
@ -758,8 +758,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
fn alloc(allocator: *Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
const self = @fieldParentPtr(Self, "allocator", allocator);
const held = self.mutex.acquire();
defer held.release();
self.mutex.lock();
defer self.mutex.unlock();
if (!self.isAllocationAllowed(len)) {
return error.OutOfMemory;

View File

@ -1319,8 +1319,8 @@ pub const Value = union(enum) {
}
pub fn dump(self: Value) void {
var held = std.debug.getStderrMutex().acquire();
defer held.release();
std.debug.getStderrMutex().lock();
defer std.debug.getStderrMutex().unlock();
const stderr = std.io.getStdErr().writer();
std.json.stringify(self, std.json.StringifyOptions{ .whitespace = null }, stderr) catch return;

View File

@ -41,8 +41,8 @@
//! const prefix = "[" ++ level.asText() ++ "] " ++ scope_prefix;
//!
//! // Print the message to stderr, silently ignoring any errors
//! const held = std.debug.getStderrMutex().acquire();
//! defer held.release();
//! std.debug.getStderrMutex().lock();
//! defer std.debug.getStderrMutex().unlock();
//! const stderr = std.io.getStdErr().writer();
//! nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return;
//! }
@ -165,8 +165,8 @@ pub fn defaultLog(
const level_txt = comptime message_level.asText();
const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): ";
const stderr = std.io.getStdErr().writer();
const held = std.debug.getStderrMutex().acquire();
defer held.release();
std.debug.getStderrMutex().lock();
defer std.debug.getStderrMutex().unlock();
nosuspend stderr.print(level_txt ++ prefix2 ++ format ++ "\n", args) catch return;
}

View File

@ -26,8 +26,8 @@ pub fn Once(comptime f: fn () void) type {
fn callSlow(self: *@This()) void {
@setCold(true);
const T = self.mutex.acquire();
defer T.release();
self.mutex.lock();
defer self.mutex.unlock();
// The first thread to acquire the mutex gets to run the initializer
if (!self.done) {

View File

@ -1324,8 +1324,8 @@ pub fn WSASocketW(
if (!first) return error.Unexpected;
first = false;
var held = wsa_startup_mutex.acquire();
defer held.release();
wsa_startup_mutex.lock();
defer wsa_startup_mutex.unlock();
// Here we could use a flag to prevent multiple threads to prevent
// multiple calls to WSAStartup, but it doesn't matter. We're globally

View File

@ -339,8 +339,8 @@ pub const AllErrors = struct {
},
pub fn renderToStdErr(msg: Message, ttyconf: std.debug.TTY.Config) void {
const held = std.debug.getStderrMutex().acquire();
defer held.release();
std.debug.getStderrMutex().lock();
defer std.debug.getStderrMutex().unlock();
const stderr = std.io.getStdErr();
return msg.renderToStdErrInner(ttyconf, stderr, "error:", .Red, 0) catch return;
}
@ -2691,8 +2691,8 @@ fn workerAstGenFile(
const import_path = file.zir.nullTerminatedString(item.data.name);
const import_result = blk: {
const lock = comp.mutex.acquire();
defer lock.release();
comp.mutex.lock();
defer comp.mutex.unlock();
break :blk mod.importFile(file, import_path) catch continue;
};
@ -2933,8 +2933,8 @@ fn reportRetryableCObjectError(
.column = 0,
};
{
const lock = comp.mutex.acquire();
defer lock.release();
comp.mutex.lock();
defer comp.mutex.unlock();
try comp.failed_c_objects.putNoClobber(comp.gpa, c_object, c_obj_err_msg);
}
}
@ -2981,8 +2981,8 @@ fn reportRetryableAstGenError(
errdefer err_msg.destroy(gpa);
{
const lock = comp.mutex.acquire();
defer lock.release();
comp.mutex.lock();
defer comp.mutex.unlock();
try mod.failed_files.putNoClobber(gpa, file, err_msg);
}
}
@ -3011,8 +3011,8 @@ fn reportRetryableEmbedFileError(
errdefer err_msg.destroy(gpa);
{
const lock = comp.mutex.acquire();
defer lock.release();
comp.mutex.lock();
defer comp.mutex.unlock();
try mod.failed_embed_files.putNoClobber(gpa, embed_file, err_msg);
}
}
@ -3031,8 +3031,8 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
if (c_object.clearStatus(comp.gpa)) {
// There was previous failure.
const lock = comp.mutex.acquire();
defer lock.release();
comp.mutex.lock();
defer comp.mutex.unlock();
// If the failure was OOM, there will not be an entry here, so we do
// not assert discard.
_ = comp.failed_c_objects.swapRemove(c_object);
@ -3576,8 +3576,8 @@ fn failCObjWithOwnedErrorMsg(
) SemaError {
@setCold(true);
{
const lock = comp.mutex.acquire();
defer lock.release();
comp.mutex.lock();
defer comp.mutex.unlock();
{
errdefer err_msg.destroy(comp.gpa);
try comp.failed_c_objects.ensureUnusedCapacity(comp.gpa, 1);

View File

@ -2629,8 +2629,8 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
// TODO don't report compile errors until Sema @importFile
if (file.zir.hasCompileErrors()) {
{
const lock = comp.mutex.acquire();
defer lock.release();
comp.mutex.lock();
defer comp.mutex.unlock();
try mod.failed_files.putNoClobber(gpa, file, null);
}
file.status = .astgen_failure;
@ -2742,8 +2742,8 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
}
{
const lock = comp.mutex.acquire();
defer lock.release();
comp.mutex.lock();
defer comp.mutex.unlock();
try mod.failed_files.putNoClobber(gpa, file, err_msg);
}
file.status = .parse_failure;
@ -2817,8 +2817,8 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
if (file.zir.hasCompileErrors()) {
{
const lock = comp.mutex.acquire();
defer lock.release();
comp.mutex.lock();
defer comp.mutex.unlock();
try mod.failed_files.putNoClobber(gpa, file, null);
}
file.status = .astgen_failure;
@ -3701,8 +3701,8 @@ pub fn detectEmbedFileUpdate(mod: *Module, embed_file: *EmbedFile) !void {
embed_file.stat_mtime = stat.mtime;
embed_file.stat_inode = stat.inode;
const lock = mod.comp.mutex.acquire();
defer lock.release();
mod.comp.mutex.lock();
defer mod.comp.mutex.unlock();
try mod.comp.work_queue.writeItem(.{ .update_embed_file = embed_file });
}
@ -4459,8 +4459,8 @@ fn lockAndClearFileCompileError(mod: *Module, file: *File) void {
switch (file.status) {
.success_zir, .retryable_failure => {},
.never_loaded, .parse_failure, .astgen_failure => {
const lock = mod.comp.mutex.acquire();
defer lock.release();
mod.comp.mutex.lock();
defer mod.comp.mutex.unlock();
if (mod.failed_files.fetchSwapRemove(file)) |kv| {
if (kv.value) |msg| msg.destroy(mod.gpa); // Delete previous error message.
}

View File

@ -7,7 +7,7 @@ const std = @import("std");
const builtin = @import("builtin");
const ThreadPool = @This();
lock: std.Thread.Mutex = .{},
mutex: std.Thread.Mutex = .{},
is_running: bool = true,
allocator: *std.mem.Allocator,
workers: []Worker,
@ -28,26 +28,28 @@ const Worker = struct {
idle_node: IdleQueue.Node,
fn run(worker: *Worker) void {
while (true) {
const held = worker.pool.lock.acquire();
const pool = worker.pool;
if (worker.pool.run_queue.popFirst()) |run_node| {
held.release();
while (true) {
pool.mutex.lock();
if (pool.run_queue.popFirst()) |run_node| {
pool.mutex.unlock();
(run_node.data.runFn)(&run_node.data);
continue;
}
if (worker.pool.is_running) {
if (pool.is_running) {
worker.idle_node.data.reset();
worker.pool.idle_queue.prepend(&worker.idle_node);
held.release();
pool.idle_queue.prepend(&worker.idle_node);
pool.mutex.unlock();
worker.idle_node.data.wait();
continue;
}
held.release();
pool.mutex.unlock();
return;
}
}
@ -88,8 +90,8 @@ fn destroyWorkers(self: *ThreadPool, spawned: usize) void {
pub fn deinit(self: *ThreadPool) void {
{
const held = self.lock.acquire();
defer held.release();
self.mutex.lock();
defer self.mutex.unlock();
self.is_running = false;
while (self.idle_queue.popFirst()) |idle_node|
@ -117,14 +119,15 @@ pub fn spawn(self: *ThreadPool, comptime func: anytype, args: anytype) !void {
const closure = @fieldParentPtr(@This(), "run_node", run_node);
@call(.{}, func, closure.arguments);
const held = closure.pool.lock.acquire();
defer held.release();
const mutex = &closure.pool.mutex;
mutex.lock();
defer mutex.unlock();
closure.pool.allocator.destroy(closure);
}
};
const held = self.lock.acquire();
defer held.release();
self.mutex.lock();
defer self.mutex.unlock();
const closure = try self.allocator.create(Closure);
closure.* = .{

View File

@ -6,13 +6,13 @@
const std = @import("std");
const WaitGroup = @This();
lock: std.Thread.Mutex = .{},
mutex: std.Thread.Mutex = .{},
counter: usize = 0,
event: std.Thread.ResetEvent,
pub fn init(self: *WaitGroup) !void {
self.* = .{
.lock = .{},
.mutex = .{},
.counter = 0,
.event = undefined,
};
@ -25,15 +25,15 @@ pub fn deinit(self: *WaitGroup) void {
}
pub fn start(self: *WaitGroup) void {
const held = self.lock.acquire();
defer held.release();
self.mutex.lock();
defer self.mutex.unlock();
self.counter += 1;
}
pub fn finish(self: *WaitGroup) void {
const held = self.lock.acquire();
defer held.release();
self.mutex.lock();
defer self.mutex.unlock();
self.counter -= 1;
@ -44,14 +44,14 @@ pub fn finish(self: *WaitGroup) void {
pub fn wait(self: *WaitGroup) void {
while (true) {
const held = self.lock.acquire();
self.mutex.lock();
if (self.counter == 0) {
held.release();
self.mutex.unlock();
return;
}
held.release();
self.mutex.unlock();
self.event.wait();
}
}

View File

@ -422,7 +422,7 @@ const PanicSwitch = struct {
state.recover_stage = .release_ref_count;
_ = panic_mutex.acquire();
panic_mutex.lock();
state.recover_stage = .release_mutex;
@ -482,7 +482,7 @@ const PanicSwitch = struct {
noinline fn releaseMutex(state: *volatile PanicState) noreturn {
state.recover_stage = .abort;
panic_mutex.releaseDirect();
panic_mutex.unlock();
goTo(releaseRefCount, .{state});
}