Merge branch 'windows-mutex' of https://github.com/emekoi/zig into emekoi-windows-mutex

This commit is contained in:
Andrew Kelley 2019-02-01 10:21:16 -05:00
commit 8d3eb25e92
3 changed files with 155 additions and 43 deletions

View File

@ -5,74 +5,138 @@ const AtomicRmwOp = builtin.AtomicRmwOp;
const assert = std.debug.assert;
const SpinLock = std.SpinLock;
const linux = std.os.linux;
const windows = std.os.windows;
/// Lock may be held only once. If the same thread
/// tries to acquire the same mutex twice, it deadlocks.
/// The Linux implementation is based on mutex3 from
/// https://www.akkadia.org/drepper/futex.pdf
pub const Mutex = struct {
/// 0: unlocked
/// 1: locked, no waiters
/// 2: locked, one or more waiters
linux_lock: @typeOf(linux_lock_init),
pub const Mutex = switch(builtin.os) {
builtin.Os.linux => struct {
/// 0: unlocked
/// 1: locked, no waiters
/// 2: locked, one or more waiters
lock: i32,
/// TODO better implementation than spin lock
spin_lock: @typeOf(spin_lock_init),
pub const Held = struct {
mutex: *Mutex,
const linux_lock_init = if (builtin.os == builtin.Os.linux) i32(0) else {};
const spin_lock_init = if (builtin.os != builtin.Os.linux) SpinLock.init() else {};
pub const Held = struct {
mutex: *Mutex,
pub fn release(self: Held) void {
if (builtin.os == builtin.Os.linux) {
const c = @atomicRmw(i32, &self.mutex.linux_lock, AtomicRmwOp.Sub, 1, AtomicOrder.Release);
pub fn release(self: Held) void {
const c = @atomicRmw(i32, &self.mutex.lock, AtomicRmwOp.Sub, 1, AtomicOrder.Release);
if (c != 1) {
_ = @atomicRmw(i32, &self.mutex.linux_lock, AtomicRmwOp.Xchg, 0, AtomicOrder.Release);
const rc = linux.futex_wake(&self.mutex.linux_lock, linux.FUTEX_WAKE | linux.FUTEX_PRIVATE_FLAG, 1);
_ = @atomicRmw(i32, &self.mutex.lock, AtomicRmwOp.Xchg, 0, AtomicOrder.Release);
const rc = linux.futex_wake(&self.mutex.lock, linux.FUTEX_WAKE | linux.FUTEX_PRIVATE_FLAG, 1);
switch (linux.getErrno(rc)) {
0 => {},
linux.EINVAL => unreachable,
else => unreachable,
}
}
} else {
SpinLock.Held.release(SpinLock.Held{ .spinlock = &self.mutex.spin_lock });
}
}
};
pub fn init() Mutex {
return Mutex{
.linux_lock = linux_lock_init,
.spin_lock = spin_lock_init,
};
}
pub fn acquire(self: *Mutex) Held {
if (builtin.os == builtin.Os.linux) {
var c = @cmpxchgWeak(i32, &self.linux_lock, 0, 1, AtomicOrder.Acquire, AtomicOrder.Monotonic) orelse
pub fn init() Mutex {
return Mutex {
.lock = 0,
};
}
pub fn deinit(self: *Mutex) void {}
pub fn acquire(self: *Mutex) Held {
var c = @cmpxchgWeak(i32, &self.lock, 0, 1, AtomicOrder.Acquire, AtomicOrder.Monotonic) orelse
return Held{ .mutex = self };
if (c != 2)
c = @atomicRmw(i32, &self.linux_lock, AtomicRmwOp.Xchg, 2, AtomicOrder.Acquire);
c = @atomicRmw(i32, &self.lock, AtomicRmwOp.Xchg, 2, AtomicOrder.Acquire);
while (c != 0) {
const rc = linux.futex_wait(&self.linux_lock, linux.FUTEX_WAIT | linux.FUTEX_PRIVATE_FLAG, 2, null);
const rc = linux.futex_wait(&self.lock, linux.FUTEX_WAIT | linux.FUTEX_PRIVATE_FLAG, 2, null);
switch (linux.getErrno(rc)) {
0, linux.EINTR, linux.EAGAIN => {},
linux.EINVAL => unreachable,
else => unreachable,
}
c = @atomicRmw(i32, &self.linux_lock, AtomicRmwOp.Xchg, 2, AtomicOrder.Acquire);
c = @atomicRmw(i32, &self.lock, AtomicRmwOp.Xchg, 2, AtomicOrder.Acquire);
}
} else {
_ = self.spin_lock.acquire();
return Held { .mutex = self };
}
return Held{ .mutex = self };
}
},
builtin.Os.windows => struct {
lock: windows.CRITICAL_SECTION,
init_once: windows.RTL_RUN_ONCE,
pub const Held = struct {
mutex: *Mutex,
pub fn release(self: Held) void {
windows.LeaveCriticalSection(&self.mutex.lock);
}
};
pub fn init() Mutex {
return Mutex {
.lock = undefined,
.init_once = windows.INIT_ONCE_STATIC_INIT,
};
}
extern fn initCriticalSection(
InitOnce: *windows.RTL_RUN_ONCE,
Parameter: ?windows.PVOID,
Context: ?windows.PVOID
) windows.BOOL {
var lock = @ptrCast(
*windows.CRITICAL_SECTION,
@alignCast(@alignOf(*windows.CRITICAL_SECTION), Context.?)
);
windows.InitializeCriticalSection(lock);
return windows.TRUE;
}
pub fn deinit(self: *Mutex) void {
windows.DeleteCriticalSection(&self.lock);
}
pub fn acquire(self: *Mutex) Held {
if (windows.InitOnceExecuteOnce(
&self.init_once,
initCriticalSection,
null, @ptrCast(?windows.PVOID, self)
) == windows.FALSE) {
unreachable;
}
windows.EnterCriticalSection(&self.lock);
return Held { .mutex = self };
}
},
else => struct {
/// TODO better implementation than spin lock
lock: SpinLock,
pub const Held = struct {
mutex: *Mutex,
pub fn release(self: Held) void {
SpinLock.Held.release(SpinLock.Held { .spinlock = &self.mutex.lock });
}
};
pub fn init() Mutex {
return Mutex {
.lock = SpinLock.init(),
};
}
pub fn deinit(self: *Mutex) void {}
pub fn acquire(self: *Mutex) Held {
_ = self.lock.acquire();
return Held { .mutex = self };
}
},
};
const Context = struct {
const TestContext = struct {
mutex: *Mutex,
data: i128,
@ -90,7 +154,9 @@ test "std.Mutex" {
var a = &fixed_buffer_allocator.allocator;
var mutex = Mutex.init();
var context = Context{
defer mutex.deinit();
var context = TestContext{
.mutex = &mutex,
.data = 0,
};
@ -103,12 +169,12 @@ test "std.Mutex" {
for (threads) |t|
t.wait();
std.debug.assertOrPanic(context.data == thread_count * Context.incr_count);
std.debug.assertOrPanic(context.data == thread_count * TestContext.incr_count);
}
fn worker(ctx: *Context) void {
fn worker(ctx: *TestContext) void {
var i: usize = 0;
while (i != Context.incr_count) : (i += 1) {
while (i != TestContext.incr_count) : (i += 1) {
const held = ctx.mutex.acquire();
defer held.release();

View File

@ -49,6 +49,7 @@ pub const UNICODE = false;
pub const WCHAR = u16;
pub const WORD = u16;
pub const LARGE_INTEGER = i64;
pub const LONG = c_long;
pub const TRUE = 1;
pub const FALSE = 0;

View File

@ -220,3 +220,48 @@ pub const FOREGROUND_BLUE = 1;
pub const FOREGROUND_GREEN = 2;
pub const FOREGROUND_RED = 4;
pub const FOREGROUND_INTENSITY = 8;
pub extern "kernel32" stdcallcc fn InitializeCriticalSection(lpCriticalSection: *CRITICAL_SECTION) void;
pub extern "kernel32" stdcallcc fn EnterCriticalSection(lpCriticalSection: *CRITICAL_SECTION) void;
pub extern "kernel32" stdcallcc fn LeaveCriticalSection(lpCriticalSection: *CRITICAL_SECTION) void;
pub extern "kernel32" stdcallcc fn DeleteCriticalSection(lpCriticalSection: *CRITICAL_SECTION) void;
pub const LIST_ENTRY = extern struct {
Flink: *LIST_ENTRY,
Blink: *LIST_ENTRY,
};
pub const RTL_CRITICAL_SECTION_DEBUG = extern struct {
Type: WORD,
CreatorBackTraceIndex: WORD,
CriticalSection: *RTL_CRITICAL_SECTION,
ProcessLocksList: LIST_ENTRY,
EntryCount: DWORD,
ContentionCount: DWORD,
Flags: DWORD,
CreatorBackTraceIndexHigh: WORD,
SpareWORD: WORD,
};
pub const RTL_CRITICAL_SECTION = extern struct {
DebugInfo: *RTL_CRITICAL_SECTION_DEBUG,
LockCount: LONG,
RecursionCount: LONG,
OwningThread: HANDLE,
LockSemaphore: HANDLE,
SpinCount: ULONG_PTR,
};
pub const CRITICAL_SECTION = RTL_CRITICAL_SECTION;
pub extern "kernel32" stdcallcc fn InitOnceExecuteOnce(InitOnce: *RTL_RUN_ONCE, InitFn: PINIT_ONCE_FN, Context: ?PVOID, Parameter: ?LPVOID) BOOL;
pub const PINIT_ONCE_FN = ?extern fn(InitOnce: *RTL_RUN_ONCE, Parameter: ?PVOID, Context: ?PVOID) BOOL;
pub const RTL_RUN_ONCE = extern struct {
Ptr: ?PVOID,
};
pub const INIT_ONCE_STATIC_INIT = RTL_RUN_ONCE {
.Ptr = null,
};