2019-03-02 21:46:04 +00:00
|
|
|
const std = @import("std.zig");
|
2017-12-24 03:08:53 +00:00
|
|
|
const debug = std.debug;
|
2017-10-31 08:47:55 +00:00
|
|
|
const assert = debug.assert;
|
2019-02-08 23:18:47 +00:00
|
|
|
const testing = std.testing;
|
2017-12-24 03:08:53 +00:00
|
|
|
const mem = std.mem;
|
|
|
|
const os = std.os;
|
2017-10-31 08:47:55 +00:00
|
|
|
const builtin = @import("builtin");
|
2017-12-24 03:08:53 +00:00
|
|
|
const c = std.c;
|
2018-10-26 18:59:58 +00:00
|
|
|
const maxInt = std.math.maxInt;
|
2017-10-31 08:47:55 +00:00
|
|
|
|
2019-06-27 16:04:14 +00:00
|
|
|
pub const LoggingAllocator = @import("heap/logging_allocator.zig").LoggingAllocator;
|
|
|
|
|
2017-10-31 08:47:55 +00:00
|
|
|
const Allocator = mem.Allocator;
|
|
|
|
|
2017-12-23 05:29:39 +00:00
|
|
|
pub const c_allocator = &c_allocator_state;
|
2018-11-13 13:08:37 +00:00
|
|
|
var c_allocator_state = Allocator{
|
2017-10-31 08:47:55 +00:00
|
|
|
.reallocFn = cRealloc,
|
2019-03-15 21:47:47 +00:00
|
|
|
.shrinkFn = cShrink,
|
2017-10-31 08:47:55 +00:00
|
|
|
};
|
|
|
|
|
2019-03-15 21:47:47 +00:00
|
|
|
fn cRealloc(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
|
|
|
assert(new_align <= @alignOf(c_longdouble));
|
|
|
|
const old_ptr = if (old_mem.len == 0) null else @ptrCast(*c_void, old_mem.ptr);
|
|
|
|
const buf = c.realloc(old_ptr, new_size) orelse return error.OutOfMemory;
|
|
|
|
return @ptrCast([*]u8, buf)[0..new_size];
|
2017-10-31 08:47:55 +00:00
|
|
|
}
|
|
|
|
|
2019-03-15 21:47:47 +00:00
|
|
|
fn cShrink(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
2018-06-05 22:03:21 +00:00
|
|
|
const old_ptr = @ptrCast(*c_void, old_mem.ptr);
|
2019-03-15 21:47:47 +00:00
|
|
|
const buf = c.realloc(old_ptr, new_size) orelse return old_mem[0..new_size];
|
|
|
|
return @ptrCast([*]u8, buf)[0..new_size];
|
2017-10-31 08:47:55 +00:00
|
|
|
}
|
|
|
|
|
2018-02-12 07:14:44 +00:00
|
|
|
/// This allocator makes a syscall directly for every allocation and free.
|
2018-07-05 19:09:02 +00:00
|
|
|
/// Thread-safe and lock-free.
|
2019-06-22 14:33:00 +00:00
|
|
|
pub const direct_allocator = &direct_allocator_state;
|
|
|
|
var direct_allocator_state = Allocator{
|
|
|
|
.reallocFn = DirectAllocator.realloc,
|
|
|
|
.shrinkFn = DirectAllocator.shrink,
|
|
|
|
};
|
2018-02-12 07:14:44 +00:00
|
|
|
|
2019-06-22 14:33:00 +00:00
|
|
|
const DirectAllocator = struct {
|
2018-12-13 01:19:46 +00:00
|
|
|
fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 {
|
2019-04-09 15:48:09 +00:00
|
|
|
if (n == 0)
|
|
|
|
return (([*]u8)(undefined))[0..0];
|
2017-10-31 08:47:55 +00:00
|
|
|
|
2019-05-25 17:07:44 +00:00
|
|
|
if (os.windows.is_the_target) {
|
|
|
|
const w = os.windows;
|
|
|
|
|
|
|
|
// Although officially it's at least aligned to page boundary,
|
|
|
|
// Windows is known to reserve pages on a 64K boundary. It's
|
|
|
|
// even more likely that the requested alignment is <= 64K than
|
|
|
|
// 4K, so we're just allocating blindly and hoping for the best.
|
|
|
|
// see https://devblogs.microsoft.com/oldnewthing/?p=42223
|
|
|
|
const addr = w.VirtualAlloc(
|
|
|
|
null,
|
|
|
|
n,
|
|
|
|
w.MEM_COMMIT | w.MEM_RESERVE,
|
|
|
|
w.PAGE_READWRITE,
|
|
|
|
) catch return error.OutOfMemory;
|
|
|
|
|
|
|
|
// If the allocation is sufficiently aligned, use it.
|
|
|
|
if (@ptrToInt(addr) & (alignment - 1) == 0) {
|
|
|
|
return @ptrCast([*]u8, addr)[0..n];
|
|
|
|
}
|
|
|
|
|
|
|
|
// If it wasn't, actually do an explicitely aligned allocation.
|
|
|
|
w.VirtualFree(addr, 0, w.MEM_RELEASE);
|
|
|
|
const alloc_size = n + alignment;
|
|
|
|
|
|
|
|
const final_addr = while (true) {
|
|
|
|
// Reserve a range of memory large enough to find a sufficiently
|
|
|
|
// aligned address.
|
|
|
|
const reserved_addr = w.VirtualAlloc(
|
2019-05-08 16:02:37 +00:00
|
|
|
null,
|
2019-05-25 17:07:44 +00:00
|
|
|
alloc_size,
|
|
|
|
w.MEM_RESERVE,
|
|
|
|
w.PAGE_NOACCESS,
|
|
|
|
) catch return error.OutOfMemory;
|
|
|
|
const aligned_addr = mem.alignForward(@ptrToInt(reserved_addr), alignment);
|
|
|
|
|
|
|
|
// Release the reserved pages (not actually used).
|
|
|
|
w.VirtualFree(reserved_addr, 0, w.MEM_RELEASE);
|
|
|
|
|
|
|
|
// At this point, it is possible that another thread has
|
|
|
|
// obtained some memory space that will cause the next
|
|
|
|
// VirtualAlloc call to fail. To handle this, we will retry
|
|
|
|
// until it succeeds.
|
2019-05-27 04:48:56 +00:00
|
|
|
const ptr = w.VirtualAlloc(
|
2019-05-25 17:07:44 +00:00
|
|
|
@intToPtr(*c_void, aligned_addr),
|
2019-05-08 16:02:37 +00:00
|
|
|
n,
|
|
|
|
w.MEM_COMMIT | w.MEM_RESERVE,
|
|
|
|
w.PAGE_READWRITE,
|
2019-05-25 17:07:44 +00:00
|
|
|
) catch continue;
|
2019-05-27 04:48:56 +00:00
|
|
|
|
|
|
|
return @ptrCast([*]u8, ptr)[0..n];
|
2019-05-25 17:07:44 +00:00
|
|
|
};
|
2019-05-08 16:02:37 +00:00
|
|
|
|
2019-05-25 17:07:44 +00:00
|
|
|
return @ptrCast([*]u8, final_addr)[0..n];
|
|
|
|
}
|
2019-05-08 16:02:37 +00:00
|
|
|
|
2019-05-26 17:17:34 +00:00
|
|
|
const alloc_size = if (alignment <= mem.page_size) n else n + alignment;
|
2019-05-26 23:56:37 +00:00
|
|
|
const slice = os.mmap(
|
2019-05-25 17:07:44 +00:00
|
|
|
null,
|
2019-05-26 23:56:37 +00:00
|
|
|
mem.alignForward(alloc_size, mem.page_size),
|
2019-05-25 17:07:44 +00:00
|
|
|
os.PROT_READ | os.PROT_WRITE,
|
|
|
|
os.MAP_PRIVATE | os.MAP_ANONYMOUS,
|
|
|
|
-1,
|
|
|
|
0,
|
|
|
|
) catch return error.OutOfMemory;
|
2019-05-27 03:35:26 +00:00
|
|
|
if (alloc_size == n) return slice[0..n];
|
2019-05-25 17:07:44 +00:00
|
|
|
|
2019-05-26 23:56:37 +00:00
|
|
|
const aligned_addr = mem.alignForward(@ptrToInt(slice.ptr), alignment);
|
2019-05-25 17:07:44 +00:00
|
|
|
|
|
|
|
// Unmap the extra bytes that were only requested in order to guarantee
|
|
|
|
// that the range of memory we were provided had a proper alignment in
|
|
|
|
// it somewhere. The extra bytes could be at the beginning, or end, or both.
|
2019-05-26 23:56:37 +00:00
|
|
|
const unused_start_len = aligned_addr - @ptrToInt(slice.ptr);
|
2019-05-25 17:07:44 +00:00
|
|
|
if (unused_start_len != 0) {
|
2019-05-26 23:56:37 +00:00
|
|
|
os.munmap(slice[0..unused_start_len]);
|
2019-05-25 17:07:44 +00:00
|
|
|
}
|
2019-05-26 23:56:37 +00:00
|
|
|
const aligned_end_addr = mem.alignForward(aligned_addr + n, mem.page_size);
|
|
|
|
const unused_end_len = @ptrToInt(slice.ptr) + slice.len - aligned_end_addr;
|
2019-05-25 17:07:44 +00:00
|
|
|
if (unused_end_len != 0) {
|
2019-05-26 23:56:37 +00:00
|
|
|
os.munmap(@intToPtr([*]align(mem.page_size) u8, aligned_end_addr)[0..unused_end_len]);
|
2018-02-12 07:14:44 +00:00
|
|
|
}
|
2019-05-25 17:07:44 +00:00
|
|
|
|
|
|
|
return @intToPtr([*]u8, aligned_addr)[0..n];
|
2018-02-12 07:14:44 +00:00
|
|
|
}
|
|
|
|
|
2019-05-26 23:56:37 +00:00
|
|
|
fn shrink(allocator: *Allocator, old_mem_unaligned: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
|
|
|
const old_mem = @alignCast(mem.page_size, old_mem_unaligned);
|
2019-05-25 17:07:44 +00:00
|
|
|
if (os.windows.is_the_target) {
|
|
|
|
const w = os.windows;
|
|
|
|
if (new_size == 0) {
|
|
|
|
// From the docs:
|
|
|
|
// "If the dwFreeType parameter is MEM_RELEASE, this parameter
|
|
|
|
// must be 0 (zero). The function frees the entire region that
|
|
|
|
// is reserved in the initial allocation call to VirtualAlloc."
|
|
|
|
// So we can only use MEM_RELEASE when actually releasing the
|
|
|
|
// whole allocation.
|
|
|
|
w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE);
|
|
|
|
} else {
|
2019-03-15 21:47:47 +00:00
|
|
|
const base_addr = @ptrToInt(old_mem.ptr);
|
|
|
|
const old_addr_end = base_addr + old_mem.len;
|
|
|
|
const new_addr_end = base_addr + new_size;
|
2019-05-26 17:17:34 +00:00
|
|
|
const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size);
|
2019-03-15 21:47:47 +00:00
|
|
|
if (old_addr_end > new_addr_end_rounded) {
|
2019-05-25 17:07:44 +00:00
|
|
|
// For shrinking that is not releasing, we will only
|
|
|
|
// decommit the pages not needed anymore.
|
|
|
|
w.VirtualFree(
|
|
|
|
@intToPtr(*c_void, new_addr_end_rounded),
|
|
|
|
old_addr_end - new_addr_end_rounded,
|
|
|
|
w.MEM_DECOMMIT,
|
|
|
|
);
|
2018-02-12 07:14:44 +00:00
|
|
|
}
|
2019-05-25 17:07:44 +00:00
|
|
|
}
|
|
|
|
return old_mem[0..new_size];
|
|
|
|
}
|
|
|
|
const base_addr = @ptrToInt(old_mem.ptr);
|
|
|
|
const old_addr_end = base_addr + old_mem.len;
|
|
|
|
const new_addr_end = base_addr + new_size;
|
2019-05-26 17:17:34 +00:00
|
|
|
const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size);
|
2019-05-25 17:07:44 +00:00
|
|
|
if (old_addr_end > new_addr_end_rounded) {
|
2019-05-26 23:56:37 +00:00
|
|
|
const ptr = @intToPtr([*]align(mem.page_size) u8, new_addr_end_rounded);
|
|
|
|
os.munmap(ptr[0 .. old_addr_end - new_addr_end_rounded]);
|
2019-03-15 21:47:47 +00:00
|
|
|
}
|
2019-05-25 17:07:44 +00:00
|
|
|
return old_mem[0..new_size];
|
2019-03-15 21:47:47 +00:00
|
|
|
}
|
2018-02-12 07:14:44 +00:00
|
|
|
|
2019-05-26 23:56:37 +00:00
|
|
|
fn realloc(allocator: *Allocator, old_mem_unaligned: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
|
|
|
const old_mem = @alignCast(mem.page_size, old_mem_unaligned);
|
2019-05-25 17:07:44 +00:00
|
|
|
if (os.windows.is_the_target) {
|
|
|
|
if (old_mem.len == 0) {
|
|
|
|
return alloc(allocator, new_size, new_align);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_size <= old_mem.len and new_align <= old_align) {
|
|
|
|
return shrink(allocator, old_mem, old_align, new_size, new_align);
|
|
|
|
}
|
|
|
|
|
|
|
|
const w = os.windows;
|
|
|
|
const base_addr = @ptrToInt(old_mem.ptr);
|
|
|
|
|
|
|
|
if (new_align > old_align and base_addr & (new_align - 1) != 0) {
|
|
|
|
// Current allocation doesn't satisfy the new alignment.
|
|
|
|
// For now we'll do a new one no matter what, but maybe
|
|
|
|
// there is something smarter to do instead.
|
2019-03-15 21:47:47 +00:00
|
|
|
const result = try alloc(allocator, new_size, new_align);
|
2019-05-25 17:07:44 +00:00
|
|
|
assert(old_mem.len != 0);
|
|
|
|
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
|
|
|
|
w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE);
|
2019-05-02 13:56:49 +00:00
|
|
|
|
2019-05-25 17:07:44 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
const old_addr_end = base_addr + old_mem.len;
|
2019-05-26 17:17:34 +00:00
|
|
|
const old_addr_end_rounded = mem.alignForward(old_addr_end, mem.page_size);
|
2019-05-25 17:07:44 +00:00
|
|
|
const new_addr_end = base_addr + new_size;
|
2019-05-26 17:17:34 +00:00
|
|
|
const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size);
|
2019-05-25 17:07:44 +00:00
|
|
|
if (new_addr_end_rounded == old_addr_end_rounded) {
|
|
|
|
// The reallocation fits in the already allocated pages.
|
2019-05-08 16:02:37 +00:00
|
|
|
return @ptrCast([*]u8, old_mem.ptr)[0..new_size];
|
2019-05-25 17:07:44 +00:00
|
|
|
}
|
|
|
|
assert(new_addr_end_rounded > old_addr_end_rounded);
|
|
|
|
|
|
|
|
// We need to commit new pages.
|
|
|
|
const additional_size = new_addr_end - old_addr_end_rounded;
|
|
|
|
const realloc_addr = w.kernel32.VirtualAlloc(
|
|
|
|
@intToPtr(*c_void, old_addr_end_rounded),
|
|
|
|
additional_size,
|
|
|
|
w.MEM_COMMIT | w.MEM_RESERVE,
|
|
|
|
w.PAGE_READWRITE,
|
|
|
|
) orelse {
|
|
|
|
// Committing new pages at the end of the existing allocation
|
|
|
|
// failed, we need to try a new one.
|
|
|
|
const new_alloc_mem = try alloc(allocator, new_size, new_align);
|
|
|
|
@memcpy(new_alloc_mem.ptr, old_mem.ptr, old_mem.len);
|
|
|
|
w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE);
|
|
|
|
|
|
|
|
return new_alloc_mem;
|
|
|
|
};
|
|
|
|
|
|
|
|
assert(@ptrToInt(realloc_addr) == old_addr_end_rounded);
|
|
|
|
return @ptrCast([*]u8, old_mem.ptr)[0..new_size];
|
|
|
|
}
|
|
|
|
if (new_size <= old_mem.len and new_align <= old_align) {
|
|
|
|
return shrink(allocator, old_mem, old_align, new_size, new_align);
|
|
|
|
}
|
|
|
|
const result = try alloc(allocator, new_size, new_align);
|
|
|
|
if (old_mem.len != 0) {
|
|
|
|
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
|
2019-05-26 23:56:37 +00:00
|
|
|
os.munmap(old_mem);
|
2017-10-31 08:47:55 +00:00
|
|
|
}
|
2019-05-25 17:07:44 +00:00
|
|
|
return result;
|
2017-10-31 08:47:55 +00:00
|
|
|
}
|
2018-02-12 07:14:44 +00:00
|
|
|
};
|
2017-10-31 08:47:55 +00:00
|
|
|
|
2019-05-08 15:53:58 +00:00
|
|
|
pub const HeapAllocator = switch (builtin.os) {
|
|
|
|
.windows => struct {
|
|
|
|
allocator: Allocator,
|
|
|
|
heap_handle: ?HeapHandle,
|
|
|
|
|
|
|
|
const HeapHandle = os.windows.HANDLE;
|
|
|
|
|
|
|
|
pub fn init() HeapAllocator {
|
|
|
|
return HeapAllocator{
|
|
|
|
.allocator = Allocator{
|
|
|
|
.reallocFn = realloc,
|
|
|
|
.shrinkFn = shrink,
|
|
|
|
},
|
|
|
|
.heap_handle = null,
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn deinit(self: *HeapAllocator) void {
|
|
|
|
if (self.heap_handle) |heap_handle| {
|
2019-05-27 05:35:58 +00:00
|
|
|
os.windows.HeapDestroy(heap_handle);
|
2019-05-08 15:53:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 {
|
|
|
|
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
|
|
|
|
if (n == 0)
|
|
|
|
return (([*]u8)(undefined))[0..0];
|
|
|
|
|
|
|
|
const amt = n + alignment + @sizeOf(usize);
|
|
|
|
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, builtin.AtomicOrder.SeqCst);
|
|
|
|
const heap_handle = optional_heap_handle orelse blk: {
|
2019-05-08 16:14:35 +00:00
|
|
|
const options = if (builtin.single_threaded) os.windows.HEAP_NO_SERIALIZE else 0;
|
2019-05-27 05:35:58 +00:00
|
|
|
const hh = os.windows.kernel32.HeapCreate(options, amt, 0) orelse return error.OutOfMemory;
|
2019-05-08 15:53:58 +00:00
|
|
|
const other_hh = @cmpxchgStrong(?HeapHandle, &self.heap_handle, null, hh, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) orelse break :blk hh;
|
2019-05-27 05:35:58 +00:00
|
|
|
os.windows.HeapDestroy(hh);
|
2019-05-08 15:53:58 +00:00
|
|
|
break :blk other_hh.?; // can't be null because of the cmpxchg
|
|
|
|
};
|
2019-05-27 05:35:58 +00:00
|
|
|
const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return error.OutOfMemory;
|
2019-05-08 15:53:58 +00:00
|
|
|
const root_addr = @ptrToInt(ptr);
|
|
|
|
const adjusted_addr = mem.alignForward(root_addr, alignment);
|
|
|
|
const record_addr = adjusted_addr + n;
|
|
|
|
@intToPtr(*align(1) usize, record_addr).* = root_addr;
|
|
|
|
return @intToPtr([*]u8, adjusted_addr)[0..n];
|
|
|
|
}
|
|
|
|
|
|
|
|
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
|
|
|
return realloc(allocator, old_mem, old_align, new_size, new_align) catch {
|
|
|
|
const old_adjusted_addr = @ptrToInt(old_mem.ptr);
|
|
|
|
const old_record_addr = old_adjusted_addr + old_mem.len;
|
|
|
|
const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
|
|
|
|
const old_ptr = @intToPtr(*c_void, root_addr);
|
|
|
|
const new_record_addr = old_record_addr - new_size + old_mem.len;
|
|
|
|
@intToPtr(*align(1) usize, new_record_addr).* = root_addr;
|
|
|
|
return old_mem[0..new_size];
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
|
|
|
if (old_mem.len == 0) return alloc(allocator, new_size, new_align);
|
|
|
|
|
|
|
|
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
|
|
|
|
const old_adjusted_addr = @ptrToInt(old_mem.ptr);
|
|
|
|
const old_record_addr = old_adjusted_addr + old_mem.len;
|
|
|
|
const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
|
|
|
|
const old_ptr = @intToPtr(*c_void, root_addr);
|
|
|
|
|
|
|
|
if (new_size == 0) {
|
2019-05-27 05:35:58 +00:00
|
|
|
os.windows.HeapFree(self.heap_handle.?, 0, old_ptr);
|
2019-05-08 15:53:58 +00:00
|
|
|
return old_mem[0..0];
|
|
|
|
}
|
|
|
|
|
|
|
|
const amt = new_size + new_align + @sizeOf(usize);
|
2019-05-27 05:35:58 +00:00
|
|
|
const new_ptr = os.windows.kernel32.HeapReAlloc(
|
2019-05-08 15:53:58 +00:00
|
|
|
self.heap_handle.?,
|
|
|
|
0,
|
|
|
|
old_ptr,
|
|
|
|
amt,
|
|
|
|
) orelse return error.OutOfMemory;
|
|
|
|
const offset = old_adjusted_addr - root_addr;
|
|
|
|
const new_root_addr = @ptrToInt(new_ptr);
|
|
|
|
var new_adjusted_addr = new_root_addr + offset;
|
|
|
|
const offset_is_valid = new_adjusted_addr + new_size + @sizeOf(usize) <= new_root_addr + amt;
|
|
|
|
const offset_is_aligned = new_adjusted_addr % new_align == 0;
|
|
|
|
if (!offset_is_valid or !offset_is_aligned) {
|
|
|
|
// If HeapReAlloc didn't happen to move the memory to the new alignment,
|
|
|
|
// or the memory starting at the old offset would be outside of the new allocation,
|
|
|
|
// then we need to copy the memory to a valid aligned address and use that
|
|
|
|
const new_aligned_addr = mem.alignForward(new_root_addr, new_align);
|
|
|
|
@memcpy(@intToPtr([*]u8, new_aligned_addr), @intToPtr([*]u8, new_adjusted_addr), std.math.min(old_mem.len, new_size));
|
|
|
|
new_adjusted_addr = new_aligned_addr;
|
|
|
|
}
|
|
|
|
const new_record_addr = new_adjusted_addr + new_size;
|
|
|
|
@intToPtr(*align(1) usize, new_record_addr).* = new_root_addr;
|
|
|
|
return @intToPtr([*]u8, new_adjusted_addr)[0..new_size];
|
|
|
|
}
|
|
|
|
},
|
|
|
|
else => @compileError("Unsupported OS"),
|
|
|
|
};
|
|
|
|
|
2018-02-12 07:14:44 +00:00
|
|
|
/// This allocator takes an existing allocator, wraps it, and provides an interface
|
|
|
|
/// where you can allocate without freeing, and then free it all together.
|
2018-11-13 13:08:37 +00:00
|
|
|
pub const ArenaAllocator = struct {
|
2018-02-12 07:14:44 +00:00
|
|
|
pub allocator: Allocator,
|
|
|
|
|
2018-05-31 14:56:59 +00:00
|
|
|
child_allocator: *Allocator,
|
2019-05-04 04:02:04 +00:00
|
|
|
buffer_list: std.SinglyLinkedList([]u8),
|
2018-02-12 07:14:44 +00:00
|
|
|
end_index: usize,
|
|
|
|
|
2019-05-04 04:02:04 +00:00
|
|
|
const BufNode = std.SinglyLinkedList([]u8).Node;
|
2018-02-12 07:14:44 +00:00
|
|
|
|
2018-05-31 14:56:59 +00:00
|
|
|
pub fn init(child_allocator: *Allocator) ArenaAllocator {
|
2018-11-13 13:08:37 +00:00
|
|
|
return ArenaAllocator{
|
|
|
|
.allocator = Allocator{
|
2018-02-12 07:14:44 +00:00
|
|
|
.reallocFn = realloc,
|
2019-03-15 21:47:47 +00:00
|
|
|
.shrinkFn = shrink,
|
2018-02-12 07:14:44 +00:00
|
|
|
},
|
|
|
|
.child_allocator = child_allocator,
|
2019-05-04 04:02:04 +00:00
|
|
|
.buffer_list = std.SinglyLinkedList([]u8).init(),
|
2018-02-12 07:14:44 +00:00
|
|
|
.end_index = 0,
|
|
|
|
};
|
2017-10-31 08:47:55 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 14:56:59 +00:00
|
|
|
pub fn deinit(self: *ArenaAllocator) void {
|
2018-02-12 07:14:44 +00:00
|
|
|
var it = self.buffer_list.first;
|
|
|
|
while (it) |node| {
|
|
|
|
// this has to occur before the free because the free frees node
|
2019-06-22 05:13:10 +00:00
|
|
|
const next_it = node.next;
|
2018-02-12 07:14:44 +00:00
|
|
|
self.child_allocator.free(node.data);
|
2019-06-22 05:13:10 +00:00
|
|
|
it = next_it;
|
2018-02-12 07:14:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-31 14:56:59 +00:00
|
|
|
fn createNode(self: *ArenaAllocator, prev_len: usize, minimum_size: usize) !*BufNode {
|
2018-02-12 07:14:44 +00:00
|
|
|
const actual_min_size = minimum_size + @sizeOf(BufNode);
|
|
|
|
var len = prev_len;
|
|
|
|
while (true) {
|
|
|
|
len += len / 2;
|
2019-05-26 17:17:34 +00:00
|
|
|
len += mem.page_size - @rem(len, mem.page_size);
|
2018-02-12 07:14:44 +00:00
|
|
|
if (len >= actual_min_size) break;
|
|
|
|
}
|
|
|
|
const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len);
|
2018-06-18 21:25:29 +00:00
|
|
|
const buf_node_slice = @bytesToSlice(BufNode, buf[0..@sizeOf(BufNode)]);
|
2018-02-12 07:14:44 +00:00
|
|
|
const buf_node = &buf_node_slice[0];
|
2018-11-13 13:08:37 +00:00
|
|
|
buf_node.* = BufNode{
|
2018-02-12 07:14:44 +00:00
|
|
|
.data = buf,
|
|
|
|
.next = null,
|
|
|
|
};
|
2019-05-04 04:02:04 +00:00
|
|
|
self.buffer_list.prepend(buf_node);
|
2018-02-12 07:14:44 +00:00
|
|
|
self.end_index = 0;
|
|
|
|
return buf_node;
|
2017-10-31 08:47:55 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 14:56:59 +00:00
|
|
|
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
|
2018-02-12 07:14:44 +00:00
|
|
|
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
|
|
|
|
|
2019-05-04 04:02:04 +00:00
|
|
|
var cur_node = if (self.buffer_list.first) |first_node| first_node else try self.createNode(0, n + alignment);
|
2018-02-12 07:14:44 +00:00
|
|
|
while (true) {
|
|
|
|
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
|
2018-02-12 08:21:18 +00:00
|
|
|
const addr = @ptrToInt(cur_buf.ptr) + self.end_index;
|
2019-03-11 17:34:51 +00:00
|
|
|
const adjusted_addr = mem.alignForward(addr, alignment);
|
|
|
|
const adjusted_index = self.end_index + (adjusted_addr - addr);
|
2018-02-12 07:14:44 +00:00
|
|
|
const new_end_index = adjusted_index + n;
|
|
|
|
if (new_end_index > cur_buf.len) {
|
|
|
|
cur_node = try self.createNode(cur_buf.len, n + alignment);
|
|
|
|
continue;
|
|
|
|
}
|
2018-05-01 05:53:04 +00:00
|
|
|
const result = cur_buf[adjusted_index..new_end_index];
|
2018-02-12 07:14:44 +00:00
|
|
|
self.end_index = new_end_index;
|
|
|
|
return result;
|
2017-10-31 08:47:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-15 21:47:47 +00:00
|
|
|
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
|
|
|
if (new_size <= old_mem.len and new_align <= new_size) {
|
|
|
|
// We can't do anything with the memory, so tell the client to keep it.
|
|
|
|
return error.OutOfMemory;
|
2017-10-31 08:47:55 +00:00
|
|
|
} else {
|
2019-03-15 21:47:47 +00:00
|
|
|
const result = try alloc(allocator, new_size, new_align);
|
2019-04-25 20:41:19 +00:00
|
|
|
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
|
2017-10-31 08:47:55 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-15 21:47:47 +00:00
|
|
|
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
|
|
|
return old_mem[0..new_size];
|
|
|
|
}
|
2017-10-31 08:47:55 +00:00
|
|
|
};
|
|
|
|
|
2018-11-13 13:08:37 +00:00
|
|
|
pub const FixedBufferAllocator = struct {
|
2018-02-12 07:27:02 +00:00
|
|
|
allocator: Allocator,
|
|
|
|
end_index: usize,
|
|
|
|
buffer: []u8,
|
2018-02-12 07:14:44 +00:00
|
|
|
|
2018-02-12 07:27:02 +00:00
|
|
|
pub fn init(buffer: []u8) FixedBufferAllocator {
|
2018-11-13 13:08:37 +00:00
|
|
|
return FixedBufferAllocator{
|
|
|
|
.allocator = Allocator{
|
2018-02-12 07:27:02 +00:00
|
|
|
.reallocFn = realloc,
|
2019-03-15 21:47:47 +00:00
|
|
|
.shrinkFn = shrink,
|
2018-02-12 07:27:02 +00:00
|
|
|
},
|
|
|
|
.buffer = buffer,
|
|
|
|
.end_index = 0,
|
|
|
|
};
|
2017-11-05 17:27:56 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 14:56:59 +00:00
|
|
|
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
|
2018-02-12 07:27:02 +00:00
|
|
|
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
|
2018-04-28 21:53:06 +00:00
|
|
|
const addr = @ptrToInt(self.buffer.ptr) + self.end_index;
|
2019-03-11 17:34:51 +00:00
|
|
|
const adjusted_addr = mem.alignForward(addr, alignment);
|
|
|
|
const adjusted_index = self.end_index + (adjusted_addr - addr);
|
2018-02-12 07:27:02 +00:00
|
|
|
const new_end_index = adjusted_index + n;
|
|
|
|
if (new_end_index > self.buffer.len) {
|
|
|
|
return error.OutOfMemory;
|
|
|
|
}
|
2018-05-01 05:53:04 +00:00
|
|
|
const result = self.buffer[adjusted_index..new_end_index];
|
2018-02-12 07:27:02 +00:00
|
|
|
self.end_index = new_end_index;
|
2017-10-31 08:47:55 +00:00
|
|
|
|
2018-02-12 07:27:02 +00:00
|
|
|
return result;
|
|
|
|
}
|
2017-10-31 08:47:55 +00:00
|
|
|
|
2019-03-15 21:47:47 +00:00
|
|
|
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
2018-07-14 20:31:11 +00:00
|
|
|
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
|
|
|
|
assert(old_mem.len <= self.end_index);
|
2019-03-15 21:47:47 +00:00
|
|
|
if (old_mem.ptr == self.buffer.ptr + self.end_index - old_mem.len and
|
|
|
|
mem.alignForward(@ptrToInt(old_mem.ptr), new_align) == @ptrToInt(old_mem.ptr))
|
|
|
|
{
|
2018-07-14 20:31:11 +00:00
|
|
|
const start_index = self.end_index - old_mem.len;
|
|
|
|
const new_end_index = start_index + new_size;
|
|
|
|
if (new_end_index > self.buffer.len) return error.OutOfMemory;
|
|
|
|
const result = self.buffer[start_index..new_end_index];
|
|
|
|
self.end_index = new_end_index;
|
|
|
|
return result;
|
2019-03-15 21:47:47 +00:00
|
|
|
} else if (new_size <= old_mem.len and new_align <= old_align) {
|
|
|
|
// We can't do anything with the memory, so tell the client to keep it.
|
|
|
|
return error.OutOfMemory;
|
2018-02-12 07:27:02 +00:00
|
|
|
} else {
|
2019-03-15 21:47:47 +00:00
|
|
|
const result = try alloc(allocator, new_size, new_align);
|
2019-04-25 20:41:19 +00:00
|
|
|
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
|
2018-02-12 07:27:02 +00:00
|
|
|
return result;
|
|
|
|
}
|
2017-10-31 08:47:55 +00:00
|
|
|
}
|
|
|
|
|
2019-03-15 21:47:47 +00:00
|
|
|
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
|
|
|
return old_mem[0..new_size];
|
|
|
|
}
|
2019-09-16 01:35:01 +00:00
|
|
|
|
|
|
|
pub inline fn reset(self: *FixedBufferAllocator) void {
|
|
|
|
self.end_index = 0;
|
|
|
|
}
|
2018-02-12 07:27:02 +00:00
|
|
|
};
|
|
|
|
|
2019-04-16 17:23:45 +00:00
|
|
|
// FIXME: Exposed LLVM intrinsics is a bug
|
|
|
|
// See: https://github.com/ziglang/zig/issues/2291
|
2019-04-16 06:40:16 +00:00
|
|
|
extern fn @"llvm.wasm.memory.size.i32"(u32) u32;
|
2019-04-16 02:21:46 +00:00
|
|
|
extern fn @"llvm.wasm.memory.grow.i32"(u32, u32) i32;
|
2019-04-16 06:40:16 +00:00
|
|
|
|
|
|
|
pub const wasm_allocator = &wasm_allocator_state.allocator;
|
|
|
|
var wasm_allocator_state = WasmAllocator{
|
|
|
|
.allocator = Allocator{
|
|
|
|
.reallocFn = WasmAllocator.realloc,
|
|
|
|
.shrinkFn = WasmAllocator.shrink,
|
|
|
|
},
|
|
|
|
.start_ptr = undefined,
|
|
|
|
.num_pages = 0,
|
|
|
|
.end_index = 0,
|
|
|
|
};
|
2019-04-16 02:21:46 +00:00
|
|
|
|
2019-04-16 17:23:45 +00:00
|
|
|
const WasmAllocator = struct {
|
2019-04-16 06:40:16 +00:00
|
|
|
allocator: Allocator,
|
|
|
|
start_ptr: [*]u8,
|
|
|
|
num_pages: usize,
|
|
|
|
end_index: usize,
|
2019-04-16 02:21:46 +00:00
|
|
|
|
2019-05-02 13:56:49 +00:00
|
|
|
comptime {
|
|
|
|
if (builtin.arch != .wasm32) {
|
|
|
|
@compileError("WasmAllocator is only available for wasm32 arch");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-16 06:40:16 +00:00
|
|
|
fn alloc(allocator: *Allocator, size: usize, alignment: u29) ![]u8 {
|
|
|
|
const self = @fieldParentPtr(WasmAllocator, "allocator", allocator);
|
2019-04-16 02:21:46 +00:00
|
|
|
|
2019-04-16 06:40:16 +00:00
|
|
|
const addr = @ptrToInt(self.start_ptr) + self.end_index;
|
|
|
|
const adjusted_addr = mem.alignForward(addr, alignment);
|
|
|
|
const adjusted_index = self.end_index + (adjusted_addr - addr);
|
|
|
|
const new_end_index = adjusted_index + size;
|
2019-04-16 02:21:46 +00:00
|
|
|
|
2019-05-26 17:17:34 +00:00
|
|
|
if (new_end_index > self.num_pages * mem.page_size) {
|
|
|
|
const required_memory = new_end_index - (self.num_pages * mem.page_size);
|
2019-04-16 02:21:46 +00:00
|
|
|
|
2019-05-26 17:17:34 +00:00
|
|
|
var num_pages: usize = required_memory / mem.page_size;
|
|
|
|
if (required_memory % mem.page_size != 0) {
|
2019-04-16 06:40:16 +00:00
|
|
|
num_pages += 1;
|
2019-04-16 02:21:46 +00:00
|
|
|
}
|
|
|
|
|
2019-05-02 13:56:49 +00:00
|
|
|
const prev_page = @"llvm.wasm.memory.grow.i32"(0, @intCast(u32, num_pages));
|
2019-04-16 06:40:16 +00:00
|
|
|
if (prev_page == -1) {
|
|
|
|
return error.OutOfMemory;
|
2019-04-16 02:21:46 +00:00
|
|
|
}
|
|
|
|
|
2019-04-16 06:40:16 +00:00
|
|
|
self.num_pages += num_pages;
|
|
|
|
}
|
|
|
|
|
|
|
|
const result = self.start_ptr[adjusted_index..new_end_index];
|
|
|
|
self.end_index = new_end_index;
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-04-16 17:23:45 +00:00
|
|
|
// Check if memory is the last "item" and is aligned correctly
|
2019-04-16 06:40:16 +00:00
|
|
|
fn is_last_item(allocator: *Allocator, memory: []u8, alignment: u29) bool {
|
|
|
|
const self = @fieldParentPtr(WasmAllocator, "allocator", allocator);
|
|
|
|
return memory.ptr == self.start_ptr + self.end_index - memory.len and mem.alignForward(@ptrToInt(memory.ptr), alignment) == @ptrToInt(memory.ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
|
|
|
const self = @fieldParentPtr(WasmAllocator, "allocator", allocator);
|
|
|
|
|
|
|
|
// Initialize start_ptr at the first realloc
|
|
|
|
if (self.num_pages == 0) {
|
2019-05-26 17:17:34 +00:00
|
|
|
self.start_ptr = @intToPtr([*]u8, @intCast(usize, @"llvm.wasm.memory.size.i32"(0)) * mem.page_size);
|
2019-04-16 06:40:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (is_last_item(allocator, old_mem, new_align)) {
|
|
|
|
const start_index = self.end_index - old_mem.len;
|
|
|
|
const new_end_index = start_index + new_size;
|
|
|
|
|
2019-05-26 17:17:34 +00:00
|
|
|
if (new_end_index > self.num_pages * mem.page_size) {
|
2019-04-16 06:40:16 +00:00
|
|
|
_ = try alloc(allocator, new_end_index - self.end_index, new_align);
|
2019-04-16 02:21:46 +00:00
|
|
|
}
|
2019-04-16 06:40:16 +00:00
|
|
|
const result = self.start_ptr[start_index..new_end_index];
|
|
|
|
|
|
|
|
self.end_index = new_end_index;
|
|
|
|
return result;
|
|
|
|
} else if (new_size <= old_mem.len and new_align <= old_align) {
|
|
|
|
return error.OutOfMemory;
|
|
|
|
} else {
|
|
|
|
const result = try alloc(allocator, new_size, new_align);
|
|
|
|
mem.copy(u8, result, old_mem);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
2019-03-15 21:47:47 +00:00
|
|
|
return old_mem[0..new_size];
|
|
|
|
}
|
2018-02-12 07:27:02 +00:00
|
|
|
};
|
|
|
|
|
2019-02-07 20:28:37 +00:00
|
|
|
pub const ThreadSafeFixedBufferAllocator = blk: {
|
|
|
|
if (builtin.single_threaded) {
|
|
|
|
break :blk FixedBufferAllocator;
|
|
|
|
} else {
|
2019-03-11 17:34:51 +00:00
|
|
|
// lock free
|
2019-02-07 20:28:37 +00:00
|
|
|
break :blk struct {
|
|
|
|
allocator: Allocator,
|
|
|
|
end_index: usize,
|
|
|
|
buffer: []u8,
|
|
|
|
|
|
|
|
pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator {
|
|
|
|
return ThreadSafeFixedBufferAllocator{
|
|
|
|
.allocator = Allocator{
|
|
|
|
.reallocFn = realloc,
|
2019-03-15 21:47:47 +00:00
|
|
|
.shrinkFn = shrink,
|
2019-02-07 20:28:37 +00:00
|
|
|
},
|
|
|
|
.buffer = buffer,
|
|
|
|
.end_index = 0,
|
|
|
|
};
|
|
|
|
}
|
2018-04-28 21:53:06 +00:00
|
|
|
|
2019-02-07 20:28:37 +00:00
|
|
|
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
|
|
|
|
const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
|
|
|
|
var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
|
|
|
|
while (true) {
|
|
|
|
const addr = @ptrToInt(self.buffer.ptr) + end_index;
|
2019-03-11 17:34:51 +00:00
|
|
|
const adjusted_addr = mem.alignForward(addr, alignment);
|
|
|
|
const adjusted_index = end_index + (adjusted_addr - addr);
|
2019-02-07 20:28:37 +00:00
|
|
|
const new_end_index = adjusted_index + n;
|
|
|
|
if (new_end_index > self.buffer.len) {
|
|
|
|
return error.OutOfMemory;
|
|
|
|
}
|
|
|
|
end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) orelse return self.buffer[adjusted_index..new_end_index];
|
|
|
|
}
|
|
|
|
}
|
2018-04-28 21:53:06 +00:00
|
|
|
|
2019-03-15 21:47:47 +00:00
|
|
|
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
|
|
|
if (new_size <= old_mem.len and new_align <= old_align) {
|
|
|
|
// We can't do anything useful with the memory, tell the client to keep it.
|
|
|
|
return error.OutOfMemory;
|
2019-02-07 20:28:37 +00:00
|
|
|
} else {
|
2019-03-15 21:47:47 +00:00
|
|
|
const result = try alloc(allocator, new_size, new_align);
|
2019-04-25 20:41:19 +00:00
|
|
|
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
|
2019-02-07 20:28:37 +00:00
|
|
|
return result;
|
|
|
|
}
|
2018-04-28 21:53:06 +00:00
|
|
|
}
|
|
|
|
|
2019-03-15 21:47:47 +00:00
|
|
|
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
|
|
|
return old_mem[0..new_size];
|
|
|
|
}
|
2019-02-07 20:28:37 +00:00
|
|
|
};
|
2018-04-28 21:53:06 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-07-07 05:23:18 +00:00
|
|
|
pub fn stackFallback(comptime size: usize, fallback_allocator: *Allocator) StackFallbackAllocator(size) {
|
2018-11-13 13:08:37 +00:00
|
|
|
return StackFallbackAllocator(size){
|
2018-07-07 05:23:18 +00:00
|
|
|
.buffer = undefined,
|
|
|
|
.fallback_allocator = fallback_allocator,
|
|
|
|
.fixed_buffer_allocator = undefined,
|
2018-11-13 13:08:37 +00:00
|
|
|
.allocator = Allocator{
|
2018-07-07 05:23:18 +00:00
|
|
|
.reallocFn = StackFallbackAllocator(size).realloc,
|
2019-03-15 21:47:47 +00:00
|
|
|
.shrinkFn = StackFallbackAllocator(size).shrink,
|
2018-07-07 05:23:18 +00:00
|
|
|
},
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn StackFallbackAllocator(comptime size: usize) type {
|
2018-11-13 13:08:37 +00:00
|
|
|
return struct {
|
2018-09-13 20:34:33 +00:00
|
|
|
const Self = @This();
|
2018-07-07 05:23:18 +00:00
|
|
|
|
|
|
|
buffer: [size]u8,
|
|
|
|
allocator: Allocator,
|
|
|
|
fallback_allocator: *Allocator,
|
|
|
|
fixed_buffer_allocator: FixedBufferAllocator,
|
|
|
|
|
|
|
|
pub fn get(self: *Self) *Allocator {
|
|
|
|
self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]);
|
|
|
|
return &self.allocator;
|
|
|
|
}
|
|
|
|
|
2019-03-15 21:47:47 +00:00
|
|
|
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
2018-07-07 05:23:18 +00:00
|
|
|
const self = @fieldParentPtr(Self, "allocator", allocator);
|
|
|
|
const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and
|
|
|
|
@ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
|
|
|
|
if (in_buffer) {
|
|
|
|
return FixedBufferAllocator.realloc(
|
|
|
|
&self.fixed_buffer_allocator.allocator,
|
|
|
|
old_mem,
|
2019-03-15 21:47:47 +00:00
|
|
|
old_align,
|
2018-07-07 05:23:18 +00:00
|
|
|
new_size,
|
2019-03-15 21:47:47 +00:00
|
|
|
new_align,
|
2018-07-07 05:23:18 +00:00
|
|
|
) catch {
|
2019-03-15 21:47:47 +00:00
|
|
|
const result = try self.fallback_allocator.reallocFn(
|
2018-07-07 05:23:18 +00:00
|
|
|
self.fallback_allocator,
|
2019-03-15 21:47:47 +00:00
|
|
|
([*]u8)(undefined)[0..0],
|
|
|
|
undefined,
|
2018-07-07 05:23:18 +00:00
|
|
|
new_size,
|
2019-03-15 21:47:47 +00:00
|
|
|
new_align,
|
2018-07-07 05:23:18 +00:00
|
|
|
);
|
|
|
|
mem.copy(u8, result, old_mem);
|
|
|
|
return result;
|
|
|
|
};
|
|
|
|
}
|
2019-03-15 21:47:47 +00:00
|
|
|
return self.fallback_allocator.reallocFn(
|
|
|
|
self.fallback_allocator,
|
|
|
|
old_mem,
|
|
|
|
old_align,
|
|
|
|
new_size,
|
|
|
|
new_align,
|
|
|
|
);
|
2018-07-07 05:23:18 +00:00
|
|
|
}
|
|
|
|
|
2019-03-15 21:47:47 +00:00
|
|
|
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
2018-07-07 05:23:18 +00:00
|
|
|
const self = @fieldParentPtr(Self, "allocator", allocator);
|
2019-03-15 21:47:47 +00:00
|
|
|
const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and
|
|
|
|
@ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
|
|
|
|
if (in_buffer) {
|
|
|
|
return FixedBufferAllocator.shrink(
|
|
|
|
&self.fixed_buffer_allocator.allocator,
|
|
|
|
old_mem,
|
|
|
|
old_align,
|
|
|
|
new_size,
|
|
|
|
new_align,
|
|
|
|
);
|
2018-07-07 05:23:18 +00:00
|
|
|
}
|
2019-03-15 21:47:47 +00:00
|
|
|
return self.fallback_allocator.shrinkFn(
|
|
|
|
self.fallback_allocator,
|
|
|
|
old_mem,
|
|
|
|
old_align,
|
|
|
|
new_size,
|
|
|
|
new_align,
|
|
|
|
);
|
2018-07-07 05:23:18 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-02-12 07:27:02 +00:00
|
|
|
test "c_allocator" {
|
|
|
|
if (builtin.link_libc) {
|
2019-03-15 21:47:47 +00:00
|
|
|
var slice = try c_allocator.alloc(u8, 50);
|
2018-02-12 07:27:02 +00:00
|
|
|
defer c_allocator.free(slice);
|
2019-03-15 21:47:47 +00:00
|
|
|
slice = try c_allocator.realloc(slice, 100);
|
2018-02-12 07:27:02 +00:00
|
|
|
}
|
2017-10-31 08:47:55 +00:00
|
|
|
}
|
|
|
|
|
2018-02-12 07:14:44 +00:00
|
|
|
test "DirectAllocator" {
|
2019-06-22 14:33:00 +00:00
|
|
|
const allocator = direct_allocator;
|
2018-02-12 07:14:44 +00:00
|
|
|
try testAllocator(allocator);
|
2018-07-14 16:03:06 +00:00
|
|
|
try testAllocatorAligned(allocator, 16);
|
2018-04-22 01:41:49 +00:00
|
|
|
try testAllocatorLargeAlignment(allocator);
|
2019-04-20 00:54:53 +00:00
|
|
|
try testAllocatorAlignedShrink(allocator);
|
2019-05-08 16:02:37 +00:00
|
|
|
|
|
|
|
if (builtin.os == .windows) {
|
|
|
|
// Trying really large alignment. As mentionned in the implementation,
|
|
|
|
// VirtualAlloc returns 64K aligned addresses. We want to make sure
|
|
|
|
// DirectAllocator works beyond that, as it's not tested by
|
|
|
|
// `testAllocatorLargeAlignment`.
|
|
|
|
const slice = try allocator.alignedAlloc(u8, 1 << 20, 128);
|
|
|
|
slice[0] = 0x12;
|
|
|
|
slice[127] = 0x34;
|
|
|
|
allocator.free(slice);
|
|
|
|
}
|
2018-02-12 07:14:44 +00:00
|
|
|
}
|
|
|
|
|
2019-05-08 15:53:58 +00:00
|
|
|
test "HeapAllocator" {
|
|
|
|
if (builtin.os == .windows) {
|
|
|
|
var heap_allocator = HeapAllocator.init();
|
|
|
|
defer heap_allocator.deinit();
|
|
|
|
|
|
|
|
const allocator = &heap_allocator.allocator;
|
|
|
|
try testAllocator(allocator);
|
|
|
|
try testAllocatorAligned(allocator, 16);
|
|
|
|
try testAllocatorLargeAlignment(allocator);
|
|
|
|
try testAllocatorAlignedShrink(allocator);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-22 05:13:10 +00:00
|
|
|
test "ArenaAllocator" {
|
2019-06-22 14:33:00 +00:00
|
|
|
var arena_allocator = ArenaAllocator.init(direct_allocator);
|
2019-06-22 05:13:10 +00:00
|
|
|
defer arena_allocator.deinit();
|
|
|
|
|
|
|
|
try testAllocator(&arena_allocator.allocator);
|
|
|
|
try testAllocatorAligned(&arena_allocator.allocator, 16);
|
|
|
|
try testAllocatorLargeAlignment(&arena_allocator.allocator);
|
|
|
|
try testAllocatorAlignedShrink(&arena_allocator.allocator);
|
|
|
|
}
|
2018-02-12 07:14:44 +00:00
|
|
|
|
2019-05-08 16:02:37 +00:00
|
|
|
var test_fixed_buffer_allocator_memory: [80000 * @sizeOf(u64)]u8 = undefined;
|
2018-02-12 07:27:02 +00:00
|
|
|
test "FixedBufferAllocator" {
|
|
|
|
var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
|
|
|
|
|
|
|
|
try testAllocator(&fixed_buffer_allocator.allocator);
|
2018-07-14 16:03:06 +00:00
|
|
|
try testAllocatorAligned(&fixed_buffer_allocator.allocator, 16);
|
2018-04-22 01:41:49 +00:00
|
|
|
try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
|
2019-04-20 00:54:53 +00:00
|
|
|
try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator);
|
2018-02-12 07:27:02 +00:00
|
|
|
}
|
|
|
|
|
2019-09-16 01:35:01 +00:00
|
|
|
test "FixedBufferAllocator.reset" {
|
|
|
|
var buf: [8]u8 align(@alignOf(usize)) = undefined;
|
|
|
|
var fba = FixedBufferAllocator.init(buf[0..]);
|
|
|
|
|
|
|
|
const X = 0xeeeeeeeeeeeeeeee;
|
|
|
|
const Y = 0xffffffffffffffff;
|
|
|
|
|
|
|
|
var x = try fba.allocator.create(u64);
|
|
|
|
x.* = X;
|
|
|
|
testing.expectError(error.OutOfMemory, fba.allocator.create(u64));
|
|
|
|
|
|
|
|
fba.reset();
|
|
|
|
var y = try fba.allocator.create(u64);
|
|
|
|
y.* = Y;
|
|
|
|
|
|
|
|
// we expect Y to have overwritten X.
|
|
|
|
testing.expect(x.* == y.*);
|
|
|
|
testing.expect(y.* == Y);
|
|
|
|
}
|
|
|
|
|
2018-07-14 20:31:11 +00:00
|
|
|
test "FixedBufferAllocator Reuse memory on realloc" {
|
|
|
|
var small_fixed_buffer: [10]u8 = undefined;
|
|
|
|
// check if we re-use the memory
|
|
|
|
{
|
|
|
|
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
|
|
|
|
|
|
|
|
var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 5);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(slice0.len == 5);
|
2019-03-15 21:47:47 +00:00
|
|
|
var slice1 = try fixed_buffer_allocator.allocator.realloc(slice0, 10);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(slice1.ptr == slice0.ptr);
|
|
|
|
testing.expect(slice1.len == 10);
|
2019-03-15 21:47:47 +00:00
|
|
|
testing.expectError(error.OutOfMemory, fixed_buffer_allocator.allocator.realloc(slice1, 11));
|
2018-07-14 20:31:11 +00:00
|
|
|
}
|
|
|
|
// check that we don't re-use the memory if it's not the most recent block
|
|
|
|
{
|
|
|
|
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
|
|
|
|
|
|
|
|
var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
|
|
|
|
slice0[0] = 1;
|
|
|
|
slice0[1] = 2;
|
|
|
|
var slice1 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
|
2019-03-15 21:47:47 +00:00
|
|
|
var slice2 = try fixed_buffer_allocator.allocator.realloc(slice0, 4);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(slice0.ptr != slice2.ptr);
|
|
|
|
testing.expect(slice1.ptr != slice2.ptr);
|
|
|
|
testing.expect(slice2[0] == 1);
|
|
|
|
testing.expect(slice2[1] == 2);
|
2018-07-14 20:31:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-28 21:53:06 +00:00
|
|
|
test "ThreadSafeFixedBufferAllocator" {
|
|
|
|
var fixed_buffer_allocator = ThreadSafeFixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
|
|
|
|
|
|
|
|
try testAllocator(&fixed_buffer_allocator.allocator);
|
2018-07-14 16:03:06 +00:00
|
|
|
try testAllocatorAligned(&fixed_buffer_allocator.allocator, 16);
|
2018-04-28 21:53:06 +00:00
|
|
|
try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
|
2019-04-20 00:54:53 +00:00
|
|
|
try testAllocatorAlignedShrink(&fixed_buffer_allocator.allocator);
|
2018-04-28 21:53:06 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 14:56:59 +00:00
|
|
|
fn testAllocator(allocator: *mem.Allocator) !void {
|
|
|
|
var slice = try allocator.alloc(*i32, 100);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(slice.len == 100);
|
2018-02-12 07:14:44 +00:00
|
|
|
for (slice) |*item, i| {
|
2019-02-03 21:13:28 +00:00
|
|
|
item.* = try allocator.create(i32);
|
|
|
|
item.*.* = @intCast(i32, i);
|
2018-02-12 07:14:44 +00:00
|
|
|
}
|
|
|
|
|
2019-03-15 21:47:47 +00:00
|
|
|
slice = try allocator.realloc(slice, 20000);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(slice.len == 20000);
|
2018-07-14 20:31:11 +00:00
|
|
|
|
|
|
|
for (slice[0..100]) |item, i| {
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(item.* == @intCast(i32, i));
|
2018-02-12 07:14:44 +00:00
|
|
|
allocator.destroy(item);
|
|
|
|
}
|
|
|
|
|
2019-03-15 21:47:47 +00:00
|
|
|
slice = allocator.shrink(slice, 50);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(slice.len == 50);
|
2019-03-15 21:47:47 +00:00
|
|
|
slice = allocator.shrink(slice, 25);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(slice.len == 25);
|
2019-03-15 21:47:47 +00:00
|
|
|
slice = allocator.shrink(slice, 0);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(slice.len == 0);
|
2019-03-15 21:47:47 +00:00
|
|
|
slice = try allocator.realloc(slice, 10);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(slice.len == 10);
|
2018-02-12 07:14:44 +00:00
|
|
|
|
|
|
|
allocator.free(slice);
|
|
|
|
}
|
2018-04-22 01:41:49 +00:00
|
|
|
|
2018-07-14 16:03:06 +00:00
|
|
|
fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !void {
|
|
|
|
// initial
|
|
|
|
var slice = try allocator.alignedAlloc(u8, alignment, 10);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(slice.len == 10);
|
2018-07-14 16:03:06 +00:00
|
|
|
// grow
|
2019-03-15 21:47:47 +00:00
|
|
|
slice = try allocator.realloc(slice, 100);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(slice.len == 100);
|
2018-07-14 16:03:06 +00:00
|
|
|
// shrink
|
2019-03-15 21:47:47 +00:00
|
|
|
slice = allocator.shrink(slice, 10);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(slice.len == 10);
|
2018-07-14 16:03:06 +00:00
|
|
|
// go to zero
|
2019-03-15 21:47:47 +00:00
|
|
|
slice = allocator.shrink(slice, 0);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(slice.len == 0);
|
2018-07-14 16:03:06 +00:00
|
|
|
// realloc from zero
|
2019-03-15 21:47:47 +00:00
|
|
|
slice = try allocator.realloc(slice, 100);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(slice.len == 100);
|
2018-07-14 16:03:06 +00:00
|
|
|
// shrink with shrink
|
2019-03-15 21:47:47 +00:00
|
|
|
slice = allocator.shrink(slice, 10);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(slice.len == 10);
|
2018-07-14 16:03:06 +00:00
|
|
|
// shrink to zero
|
2019-03-15 21:47:47 +00:00
|
|
|
slice = allocator.shrink(slice, 0);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(slice.len == 0);
|
2018-07-14 16:03:06 +00:00
|
|
|
}
|
|
|
|
|
2018-05-31 14:56:59 +00:00
|
|
|
fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!void {
|
2018-05-29 00:23:55 +00:00
|
|
|
//Maybe a platform's page_size is actually the same as or
|
2018-04-22 01:41:49 +00:00
|
|
|
// very near usize?
|
2019-05-26 17:17:34 +00:00
|
|
|
if (mem.page_size << 2 > maxInt(usize)) return;
|
2018-05-01 05:53:04 +00:00
|
|
|
|
2018-04-22 01:41:49 +00:00
|
|
|
const USizeShift = @IntType(false, std.math.log2(usize.bit_count));
|
2019-05-26 17:17:34 +00:00
|
|
|
const large_align = u29(mem.page_size << 2);
|
2018-05-01 05:53:04 +00:00
|
|
|
|
2018-04-22 01:41:49 +00:00
|
|
|
var align_mask: usize = undefined;
|
2019-04-03 14:00:39 +00:00
|
|
|
_ = @shlWithOverflow(usize, ~usize(0), USizeShift(@ctz(u29, large_align)), &align_mask);
|
2018-05-01 05:53:04 +00:00
|
|
|
|
2019-03-15 21:47:47 +00:00
|
|
|
var slice = try allocator.alignedAlloc(u8, large_align, 500);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
|
2018-05-01 05:53:04 +00:00
|
|
|
|
2019-03-15 21:47:47 +00:00
|
|
|
slice = allocator.shrink(slice, 100);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
|
2018-05-01 05:53:04 +00:00
|
|
|
|
2019-03-15 21:47:47 +00:00
|
|
|
slice = try allocator.realloc(slice, 5000);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
|
2018-05-01 05:53:04 +00:00
|
|
|
|
2019-03-15 21:47:47 +00:00
|
|
|
slice = allocator.shrink(slice, 10);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
|
2018-05-01 05:53:04 +00:00
|
|
|
|
2019-03-15 21:47:47 +00:00
|
|
|
slice = try allocator.realloc(slice, 20000);
|
2019-02-08 23:18:47 +00:00
|
|
|
testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
|
2018-04-22 01:41:49 +00:00
|
|
|
|
|
|
|
allocator.free(slice);
|
|
|
|
}
|
2019-04-20 00:54:53 +00:00
|
|
|
|
|
|
|
fn testAllocatorAlignedShrink(allocator: *mem.Allocator) mem.Allocator.Error!void {
|
|
|
|
var debug_buffer: [1000]u8 = undefined;
|
|
|
|
const debug_allocator = &FixedBufferAllocator.init(&debug_buffer).allocator;
|
|
|
|
|
2019-05-26 17:17:34 +00:00
|
|
|
const alloc_size = mem.page_size * 2 + 50;
|
2019-04-20 00:54:53 +00:00
|
|
|
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
|
|
|
|
defer allocator.free(slice);
|
|
|
|
|
|
|
|
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
|
2019-05-08 16:02:37 +00:00
|
|
|
// On Windows, VirtualAlloc returns addresses aligned to a 64K boundary,
|
|
|
|
// which is 16 pages, hence the 32. This test may require to increase
|
|
|
|
// the size of the allocations feeding the `allocator` parameter if they
|
|
|
|
// fail, because of this high over-alignment we want to have.
|
2019-05-26 17:17:34 +00:00
|
|
|
while (@ptrToInt(slice.ptr) == mem.alignForward(@ptrToInt(slice.ptr), mem.page_size * 32)) {
|
2019-04-20 00:54:53 +00:00
|
|
|
try stuff_to_free.append(slice);
|
|
|
|
slice = try allocator.alignedAlloc(u8, 16, alloc_size);
|
|
|
|
}
|
|
|
|
while (stuff_to_free.popOrNull()) |item| {
|
|
|
|
allocator.free(item);
|
|
|
|
}
|
|
|
|
slice[0] = 0x12;
|
|
|
|
slice[60] = 0x34;
|
|
|
|
|
|
|
|
// realloc to a smaller size but with a larger alignment
|
2019-05-26 17:17:34 +00:00
|
|
|
slice = try allocator.alignedRealloc(slice, mem.page_size * 32, alloc_size / 2);
|
2019-04-20 00:54:53 +00:00
|
|
|
testing.expect(slice[0] == 0x12);
|
|
|
|
testing.expect(slice[60] == 0x34);
|
|
|
|
}
|