zig/lib/std/heap.zig

1244 lines
44 KiB
Zig
Raw Normal View History

2019-03-02 21:46:04 +00:00
const std = @import("std.zig");
const builtin = @import("builtin");
const root = @import("root");
const debug = std.debug;
const assert = debug.assert;
const testing = std.testing;
const mem = std.mem;
const os = std.os;
const c = std.c;
const maxInt = std.math.maxInt;
pub const LoggingAllocator = @import("heap/logging_allocator.zig").LoggingAllocator;
pub const loggingAllocator = @import("heap/logging_allocator.zig").loggingAllocator;
pub const ScopedLoggingAllocator = @import("heap/logging_allocator.zig").ScopedLoggingAllocator;
pub const LogToWriterAllocator = @import("heap/log_to_writer_allocator.zig").LogToWriterAllocator;
pub const logToWriterAllocator = @import("heap/log_to_writer_allocator.zig").logToWriterAllocator;
rework self-hosted compiler for incremental builds * introduce std.ArrayListUnmanaged for when you have the allocator stored elsewhere * move std.heap.ArenaAllocator implementation to its own file. extract the main state into std.heap.ArenaAllocator.State, which can be stored as an alternative to storing the entire ArenaAllocator, saving 24 bytes per ArenaAllocator on 64 bit targets. * std.LinkedList.Node pointer field now defaults to being null initialized. * Rework self-hosted compiler Package API * Delete almost all the bitrotted self-hosted compiler code. The only bit rotted code left is in main.zig and compilation.zig * Add call instruction to ZIR * self-hosted compiler ir API and link API are reworked to support a long-running compiler that incrementally updates declarations * Introduce the concept of scopes to ZIR semantic analysis * ZIR text format supports referencing named decls that are declared later in the file * Figure out how memory management works for the long-running compiler and incremental compilation. The main roots are top level declarations. There is a table of decls. The key is a cryptographic hash of the fully qualified decl name. Each decl has an arena allocator where all of the memory related to that decl is stored. Each code block has its own arena allocator for the lifetime of the block. Values that want to survive when going out of scope in a block must get copied into the outer block. Finally, values must get copied into the Decl arena to be long-lived. * Delete the unused MemoryCell struct. Instead, comptime pointers are based on references to Decl structs. * Figure out how caching works. Each Decl will store a set of other Decls which must be recompiled when it changes. This branch is still work-in-progress; this commit breaks the build.
2020-05-10 06:05:54 +00:00
pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator;
std: introduce GeneralPurposeAllocator `std.GeneralPurposeAllocator` is now available. It is a function that takes a configuration struct (with default field values) and returns an allocator. There is a detailed description of this allocator in the doc comments at the top of the new file. The main feature of this allocator is that it is *safe*. It prevents double-free, use-after-free, and detects leaks. Some deprecation compile errors are removed. The Allocator interface gains `old_align` as a new parameter to `resizeFn`. This is useful to quickly look up allocations. `std.heap.page_allocator` is improved to use mmap address hints to avoid obtaining the same virtual address pages when unmapping and mapping pages. The new general purpose allocator uses the page allocator as its backing allocator by default. `std.testing.allocator` is replaced with usage of this new allocator, which does leak checking, and so the LeakCheckAllocator is retired. stage1 is improved so that the `@typeInfo` of a pointer has a lazy value for the alignment of the child type, to avoid false dependency loops when dealing with pointers to async function frames. The `std.mem.Allocator` interface is refactored to be in its own file. `std.Mutex` now exposes the dummy mutex with `std.Mutex.Dummy`. This allocator is great for debug mode, however it needs some work to have better performance in release modes. The next step will be setting up a series of tests in ziglang/gotta-go-fast and then making improvements to the implementation.
2020-08-08 05:35:15 +00:00
pub const GeneralPurposeAllocator = @import("heap/general_purpose_allocator.zig").GeneralPurposeAllocator;
const Allocator = mem.Allocator;
const CAllocator = struct {
comptime {
if (!builtin.link_libc) {
@compileError("C allocator is only available when linking against libc");
}
2020-07-11 11:09:04 +00:00
}
2020-04-17 20:15:36 +00:00
usingnamespace if (@hasDecl(c, "malloc_size"))
struct {
pub const supports_malloc_size = true;
pub const malloc_size = c.malloc_size;
}
else if (@hasDecl(c, "malloc_usable_size"))
struct {
pub const supports_malloc_size = true;
pub const malloc_size = c.malloc_usable_size;
}
else if (@hasDecl(c, "_msize"))
struct {
pub const supports_malloc_size = true;
pub const malloc_size = c._msize;
}
else
struct {
pub const supports_malloc_size = false;
};
pub const supports_posix_memalign = @hasDecl(c, "posix_memalign");
fn getHeader(ptr: [*]u8) *[*]u8 {
return @intToPtr(*[*]u8, @ptrToInt(ptr) - @sizeOf(usize));
2020-07-11 11:09:04 +00:00
}
2020-04-17 20:15:36 +00:00
fn alignedAlloc(len: usize, alignment: usize) ?[*]u8 {
if (supports_posix_memalign) {
// The posix_memalign only accepts alignment values that are a
// multiple of the pointer size
const eff_alignment = std.math.max(alignment, @sizeOf(usize));
var aligned_ptr: ?*anyopaque = undefined;
if (c.posix_memalign(&aligned_ptr, eff_alignment, len) != 0)
return null;
return @ptrCast([*]u8, aligned_ptr);
2020-04-17 20:15:36 +00:00
}
// Thin wrapper around regular malloc, overallocate to account for
// alignment padding and store the orignal malloc()'ed pointer before
// the aligned address.
var unaligned_ptr = @ptrCast([*]u8, c.malloc(len + alignment - 1 + @sizeOf(usize)) orelse return null);
const unaligned_addr = @ptrToInt(unaligned_ptr);
const aligned_addr = mem.alignForward(unaligned_addr + @sizeOf(usize), alignment);
var aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr);
getHeader(aligned_ptr).* = unaligned_ptr;
return aligned_ptr;
2020-04-17 20:15:36 +00:00
}
fn alignedFree(ptr: [*]u8) void {
if (supports_posix_memalign) {
return c.free(ptr);
2020-04-17 20:15:36 +00:00
}
const unaligned_ptr = getHeader(ptr).*;
c.free(unaligned_ptr);
}
fn alignedAllocSize(ptr: [*]u8) usize {
if (supports_posix_memalign) {
return CAllocator.malloc_size(ptr);
}
const unaligned_ptr = getHeader(ptr).*;
const delta = @ptrToInt(ptr) - @ptrToInt(unaligned_ptr);
return CAllocator.malloc_size(unaligned_ptr) - delta;
2020-04-17 20:15:36 +00:00
}
fn alloc(
_: *anyopaque,
len: usize,
alignment: u29,
len_align: u29,
return_address: usize,
) error{OutOfMemory}![]u8 {
2021-06-20 01:10:22 +00:00
_ = return_address;
assert(len > 0);
assert(std.math.isPowerOfTwo(alignment));
var ptr = alignedAlloc(len, alignment) orelse return error.OutOfMemory;
if (len_align == 0) {
return ptr[0..len];
}
const full_len = init: {
if (CAllocator.supports_malloc_size) {
const s = alignedAllocSize(ptr);
assert(s >= len);
break :init s;
}
break :init len;
};
return ptr[0..mem.alignBackwardAnyAlign(full_len, len_align)];
2020-04-17 20:15:36 +00:00
}
fn resize(
_: *anyopaque,
buf: []u8,
buf_align: u29,
new_len: usize,
len_align: u29,
return_address: usize,
) ?usize {
2021-06-20 01:10:22 +00:00
_ = buf_align;
_ = return_address;
if (new_len <= buf.len) {
return mem.alignAllocLen(buf.len, new_len, len_align);
2020-04-17 20:15:36 +00:00
}
if (CAllocator.supports_malloc_size) {
const full_len = alignedAllocSize(buf.ptr);
if (new_len <= full_len) {
return mem.alignAllocLen(full_len, new_len, len_align);
}
2020-04-17 20:15:36 +00:00
}
return null;
2020-04-17 20:15:36 +00:00
}
2021-11-06 00:54:35 +00:00
fn free(
_: *anyopaque,
2021-11-06 00:54:35 +00:00
buf: []u8,
buf_align: u29,
return_address: usize,
) void {
_ = buf_align;
_ = return_address;
alignedFree(buf.ptr);
}
};
/// Supports the full Allocator interface, including alignment, and exploiting
/// `malloc_usable_size` if available. For an allocator that directly calls
/// `malloc`/`free`, see `raw_c_allocator`.
pub const c_allocator = Allocator{
.ptr = undefined,
.vtable = &c_allocator_vtable,
};
const c_allocator_vtable = Allocator.VTable{
.alloc = CAllocator.alloc,
.resize = CAllocator.resize,
2021-11-06 00:54:35 +00:00
.free = CAllocator.free,
};
/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls
/// `malloc`/`free`. Does not attempt to utilize `malloc_usable_size`.
/// This allocator is safe to use as the backing allocator with
/// `ArenaAllocator` for example and is more optimal in such a case
/// than `c_allocator`.
pub const raw_c_allocator = Allocator{
.ptr = undefined,
.vtable = &raw_c_allocator_vtable,
};
const raw_c_allocator_vtable = Allocator.VTable{
.alloc = rawCAlloc,
.resize = rawCResize,
2021-11-06 00:54:35 +00:00
.free = rawCFree,
};
fn rawCAlloc(
_: *anyopaque,
len: usize,
ptr_align: u29,
len_align: u29,
ret_addr: usize,
) Allocator.Error![]u8 {
2021-06-20 01:10:22 +00:00
_ = len_align;
_ = ret_addr;
assert(ptr_align <= @alignOf(std.c.max_align_t));
const ptr = @ptrCast([*]u8, c.malloc(len) orelse return error.OutOfMemory);
return ptr[0..len];
}
fn rawCResize(
_: *anyopaque,
buf: []u8,
old_align: u29,
new_len: usize,
len_align: u29,
ret_addr: usize,
) ?usize {
2021-06-20 01:10:22 +00:00
_ = old_align;
_ = ret_addr;
if (new_len <= buf.len) {
return mem.alignAllocLen(buf.len, new_len, len_align);
}
return null;
}
2021-11-06 00:54:35 +00:00
fn rawCFree(
_: *anyopaque,
2021-11-06 00:54:35 +00:00
buf: []u8,
old_align: u29,
ret_addr: usize,
) void {
_ = old_align;
_ = ret_addr;
c.free(buf.ptr);
}
/// This allocator makes a syscall directly for every allocation and free.
/// Thread-safe and lock-free.
pub const page_allocator = if (builtin.target.isWasm())
Allocator{
.ptr = undefined,
.vtable = &WasmPageAllocator.vtable,
}
else if (builtin.target.os.tag == .freestanding)
root.os.heap.page_allocator
else
Allocator{
.ptr = undefined,
.vtable = &PageAllocator.vtable,
};
2020-04-17 20:15:36 +00:00
/// Verifies that the adjusted length will still map to the full length
pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
const aligned_len = mem.alignAllocLen(full_len, len, len_align);
assert(mem.alignForward(aligned_len, mem.page_size) == full_len);
return aligned_len;
}
std: introduce GeneralPurposeAllocator `std.GeneralPurposeAllocator` is now available. It is a function that takes a configuration struct (with default field values) and returns an allocator. There is a detailed description of this allocator in the doc comments at the top of the new file. The main feature of this allocator is that it is *safe*. It prevents double-free, use-after-free, and detects leaks. Some deprecation compile errors are removed. The Allocator interface gains `old_align` as a new parameter to `resizeFn`. This is useful to quickly look up allocations. `std.heap.page_allocator` is improved to use mmap address hints to avoid obtaining the same virtual address pages when unmapping and mapping pages. The new general purpose allocator uses the page allocator as its backing allocator by default. `std.testing.allocator` is replaced with usage of this new allocator, which does leak checking, and so the LeakCheckAllocator is retired. stage1 is improved so that the `@typeInfo` of a pointer has a lazy value for the alignment of the child type, to avoid false dependency loops when dealing with pointers to async function frames. The `std.mem.Allocator` interface is refactored to be in its own file. `std.Mutex` now exposes the dummy mutex with `std.Mutex.Dummy`. This allocator is great for debug mode, however it needs some work to have better performance in release modes. The next step will be setting up a series of tests in ziglang/gotta-go-fast and then making improvements to the implementation.
2020-08-08 05:35:15 +00:00
/// TODO Utilize this on Windows.
pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null;
const PageAllocator = struct {
const vtable = Allocator.VTable{
.alloc = alloc,
.resize = resize,
2021-11-06 00:54:35 +00:00
.free = free,
};
fn alloc(_: *anyopaque, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
2021-06-20 01:10:22 +00:00
_ = ra;
2020-04-17 20:15:36 +00:00
assert(n > 0);
std: introduce GeneralPurposeAllocator `std.GeneralPurposeAllocator` is now available. It is a function that takes a configuration struct (with default field values) and returns an allocator. There is a detailed description of this allocator in the doc comments at the top of the new file. The main feature of this allocator is that it is *safe*. It prevents double-free, use-after-free, and detects leaks. Some deprecation compile errors are removed. The Allocator interface gains `old_align` as a new parameter to `resizeFn`. This is useful to quickly look up allocations. `std.heap.page_allocator` is improved to use mmap address hints to avoid obtaining the same virtual address pages when unmapping and mapping pages. The new general purpose allocator uses the page allocator as its backing allocator by default. `std.testing.allocator` is replaced with usage of this new allocator, which does leak checking, and so the LeakCheckAllocator is retired. stage1 is improved so that the `@typeInfo` of a pointer has a lazy value for the alignment of the child type, to avoid false dependency loops when dealing with pointers to async function frames. The `std.mem.Allocator` interface is refactored to be in its own file. `std.Mutex` now exposes the dummy mutex with `std.Mutex.Dummy`. This allocator is great for debug mode, however it needs some work to have better performance in release modes. The next step will be setting up a series of tests in ziglang/gotta-go-fast and then making improvements to the implementation.
2020-08-08 05:35:15 +00:00
const aligned_len = mem.alignForward(n, mem.page_size);
if (builtin.os.tag == .windows) {
2019-05-25 17:07:44 +00:00
const w = os.windows;
// Although officially it's at least aligned to page boundary,
// Windows is known to reserve pages on a 64K boundary. It's
// even more likely that the requested alignment is <= 64K than
// 4K, so we're just allocating blindly and hoping for the best.
// see https://devblogs.microsoft.com/oldnewthing/?p=42223
const addr = w.VirtualAlloc(
null,
std: introduce GeneralPurposeAllocator `std.GeneralPurposeAllocator` is now available. It is a function that takes a configuration struct (with default field values) and returns an allocator. There is a detailed description of this allocator in the doc comments at the top of the new file. The main feature of this allocator is that it is *safe*. It prevents double-free, use-after-free, and detects leaks. Some deprecation compile errors are removed. The Allocator interface gains `old_align` as a new parameter to `resizeFn`. This is useful to quickly look up allocations. `std.heap.page_allocator` is improved to use mmap address hints to avoid obtaining the same virtual address pages when unmapping and mapping pages. The new general purpose allocator uses the page allocator as its backing allocator by default. `std.testing.allocator` is replaced with usage of this new allocator, which does leak checking, and so the LeakCheckAllocator is retired. stage1 is improved so that the `@typeInfo` of a pointer has a lazy value for the alignment of the child type, to avoid false dependency loops when dealing with pointers to async function frames. The `std.mem.Allocator` interface is refactored to be in its own file. `std.Mutex` now exposes the dummy mutex with `std.Mutex.Dummy`. This allocator is great for debug mode, however it needs some work to have better performance in release modes. The next step will be setting up a series of tests in ziglang/gotta-go-fast and then making improvements to the implementation.
2020-08-08 05:35:15 +00:00
aligned_len,
2019-05-25 17:07:44 +00:00
w.MEM_COMMIT | w.MEM_RESERVE,
w.PAGE_READWRITE,
) catch return error.OutOfMemory;
// If the allocation is sufficiently aligned, use it.
if (mem.isAligned(@ptrToInt(addr), alignment)) {
std: introduce GeneralPurposeAllocator `std.GeneralPurposeAllocator` is now available. It is a function that takes a configuration struct (with default field values) and returns an allocator. There is a detailed description of this allocator in the doc comments at the top of the new file. The main feature of this allocator is that it is *safe*. It prevents double-free, use-after-free, and detects leaks. Some deprecation compile errors are removed. The Allocator interface gains `old_align` as a new parameter to `resizeFn`. This is useful to quickly look up allocations. `std.heap.page_allocator` is improved to use mmap address hints to avoid obtaining the same virtual address pages when unmapping and mapping pages. The new general purpose allocator uses the page allocator as its backing allocator by default. `std.testing.allocator` is replaced with usage of this new allocator, which does leak checking, and so the LeakCheckAllocator is retired. stage1 is improved so that the `@typeInfo` of a pointer has a lazy value for the alignment of the child type, to avoid false dependency loops when dealing with pointers to async function frames. The `std.mem.Allocator` interface is refactored to be in its own file. `std.Mutex` now exposes the dummy mutex with `std.Mutex.Dummy`. This allocator is great for debug mode, however it needs some work to have better performance in release modes. The next step will be setting up a series of tests in ziglang/gotta-go-fast and then making improvements to the implementation.
2020-08-08 05:35:15 +00:00
return @ptrCast([*]u8, addr)[0..alignPageAllocLen(aligned_len, n, len_align)];
2019-05-25 17:07:44 +00:00
}
// If it wasn't, actually do an explicitely aligned allocation.
w.VirtualFree(addr, 0, w.MEM_RELEASE);
2020-04-17 20:15:36 +00:00
const alloc_size = n + alignment - mem.page_size;
2019-05-25 17:07:44 +00:00
2020-04-17 20:15:36 +00:00
while (true) {
2019-05-25 17:07:44 +00:00
// Reserve a range of memory large enough to find a sufficiently
// aligned address.
const reserved_addr = w.VirtualAlloc(
null,
2019-05-25 17:07:44 +00:00
alloc_size,
w.MEM_RESERVE,
w.PAGE_NOACCESS,
) catch return error.OutOfMemory;
const aligned_addr = mem.alignForward(@ptrToInt(reserved_addr), alignment);
// Release the reserved pages (not actually used).
w.VirtualFree(reserved_addr, 0, w.MEM_RELEASE);
// At this point, it is possible that another thread has
// obtained some memory space that will cause the next
// VirtualAlloc call to fail. To handle this, we will retry
// until it succeeds.
2019-05-27 04:48:56 +00:00
const ptr = w.VirtualAlloc(
@intToPtr(*anyopaque, aligned_addr),
std: introduce GeneralPurposeAllocator `std.GeneralPurposeAllocator` is now available. It is a function that takes a configuration struct (with default field values) and returns an allocator. There is a detailed description of this allocator in the doc comments at the top of the new file. The main feature of this allocator is that it is *safe*. It prevents double-free, use-after-free, and detects leaks. Some deprecation compile errors are removed. The Allocator interface gains `old_align` as a new parameter to `resizeFn`. This is useful to quickly look up allocations. `std.heap.page_allocator` is improved to use mmap address hints to avoid obtaining the same virtual address pages when unmapping and mapping pages. The new general purpose allocator uses the page allocator as its backing allocator by default. `std.testing.allocator` is replaced with usage of this new allocator, which does leak checking, and so the LeakCheckAllocator is retired. stage1 is improved so that the `@typeInfo` of a pointer has a lazy value for the alignment of the child type, to avoid false dependency loops when dealing with pointers to async function frames. The `std.mem.Allocator` interface is refactored to be in its own file. `std.Mutex` now exposes the dummy mutex with `std.Mutex.Dummy`. This allocator is great for debug mode, however it needs some work to have better performance in release modes. The next step will be setting up a series of tests in ziglang/gotta-go-fast and then making improvements to the implementation.
2020-08-08 05:35:15 +00:00
aligned_len,
w.MEM_COMMIT | w.MEM_RESERVE,
w.PAGE_READWRITE,
2019-05-25 17:07:44 +00:00
) catch continue;
2019-05-27 04:48:56 +00:00
std: introduce GeneralPurposeAllocator `std.GeneralPurposeAllocator` is now available. It is a function that takes a configuration struct (with default field values) and returns an allocator. There is a detailed description of this allocator in the doc comments at the top of the new file. The main feature of this allocator is that it is *safe*. It prevents double-free, use-after-free, and detects leaks. Some deprecation compile errors are removed. The Allocator interface gains `old_align` as a new parameter to `resizeFn`. This is useful to quickly look up allocations. `std.heap.page_allocator` is improved to use mmap address hints to avoid obtaining the same virtual address pages when unmapping and mapping pages. The new general purpose allocator uses the page allocator as its backing allocator by default. `std.testing.allocator` is replaced with usage of this new allocator, which does leak checking, and so the LeakCheckAllocator is retired. stage1 is improved so that the `@typeInfo` of a pointer has a lazy value for the alignment of the child type, to avoid false dependency loops when dealing with pointers to async function frames. The `std.mem.Allocator` interface is refactored to be in its own file. `std.Mutex` now exposes the dummy mutex with `std.Mutex.Dummy`. This allocator is great for debug mode, however it needs some work to have better performance in release modes. The next step will be setting up a series of tests in ziglang/gotta-go-fast and then making improvements to the implementation.
2020-08-08 05:35:15 +00:00
return @ptrCast([*]u8, ptr)[0..alignPageAllocLen(aligned_len, n, len_align)];
2020-04-17 20:15:36 +00:00
}
2019-05-25 17:07:44 +00:00
}
const max_drop_len = alignment - @minimum(alignment, mem.page_size);
std: introduce GeneralPurposeAllocator `std.GeneralPurposeAllocator` is now available. It is a function that takes a configuration struct (with default field values) and returns an allocator. There is a detailed description of this allocator in the doc comments at the top of the new file. The main feature of this allocator is that it is *safe*. It prevents double-free, use-after-free, and detects leaks. Some deprecation compile errors are removed. The Allocator interface gains `old_align` as a new parameter to `resizeFn`. This is useful to quickly look up allocations. `std.heap.page_allocator` is improved to use mmap address hints to avoid obtaining the same virtual address pages when unmapping and mapping pages. The new general purpose allocator uses the page allocator as its backing allocator by default. `std.testing.allocator` is replaced with usage of this new allocator, which does leak checking, and so the LeakCheckAllocator is retired. stage1 is improved so that the `@typeInfo` of a pointer has a lazy value for the alignment of the child type, to avoid false dependency loops when dealing with pointers to async function frames. The `std.mem.Allocator` interface is refactored to be in its own file. `std.Mutex` now exposes the dummy mutex with `std.Mutex.Dummy`. This allocator is great for debug mode, however it needs some work to have better performance in release modes. The next step will be setting up a series of tests in ziglang/gotta-go-fast and then making improvements to the implementation.
2020-08-08 05:35:15 +00:00
const alloc_len = if (max_drop_len <= aligned_len - n)
aligned_len
else
mem.alignForward(aligned_len + max_drop_len, mem.page_size);
const hint = @atomicLoad(@TypeOf(next_mmap_addr_hint), &next_mmap_addr_hint, .Unordered);
2019-05-26 23:56:37 +00:00
const slice = os.mmap(
std: introduce GeneralPurposeAllocator `std.GeneralPurposeAllocator` is now available. It is a function that takes a configuration struct (with default field values) and returns an allocator. There is a detailed description of this allocator in the doc comments at the top of the new file. The main feature of this allocator is that it is *safe*. It prevents double-free, use-after-free, and detects leaks. Some deprecation compile errors are removed. The Allocator interface gains `old_align` as a new parameter to `resizeFn`. This is useful to quickly look up allocations. `std.heap.page_allocator` is improved to use mmap address hints to avoid obtaining the same virtual address pages when unmapping and mapping pages. The new general purpose allocator uses the page allocator as its backing allocator by default. `std.testing.allocator` is replaced with usage of this new allocator, which does leak checking, and so the LeakCheckAllocator is retired. stage1 is improved so that the `@typeInfo` of a pointer has a lazy value for the alignment of the child type, to avoid false dependency loops when dealing with pointers to async function frames. The `std.mem.Allocator` interface is refactored to be in its own file. `std.Mutex` now exposes the dummy mutex with `std.Mutex.Dummy`. This allocator is great for debug mode, however it needs some work to have better performance in release modes. The next step will be setting up a series of tests in ziglang/gotta-go-fast and then making improvements to the implementation.
2020-08-08 05:35:15 +00:00
hint,
alloc_len,
os.PROT.READ | os.PROT.WRITE,
os.MAP.PRIVATE | os.MAP.ANONYMOUS,
2019-05-25 17:07:44 +00:00
-1,
0,
) catch return error.OutOfMemory;
2020-04-17 20:15:36 +00:00
assert(mem.isAligned(@ptrToInt(slice.ptr), mem.page_size));
2019-05-25 17:07:44 +00:00
const result_ptr = mem.alignPointer(slice.ptr, alignment) orelse
return error.OutOfMemory;
2019-05-25 17:07:44 +00:00
// Unmap the extra bytes that were only requested in order to guarantee
// that the range of memory we were provided had a proper alignment in
// it somewhere. The extra bytes could be at the beginning, or end, or both.
const drop_len = @ptrToInt(result_ptr) - @ptrToInt(slice.ptr);
std: introduce GeneralPurposeAllocator `std.GeneralPurposeAllocator` is now available. It is a function that takes a configuration struct (with default field values) and returns an allocator. There is a detailed description of this allocator in the doc comments at the top of the new file. The main feature of this allocator is that it is *safe*. It prevents double-free, use-after-free, and detects leaks. Some deprecation compile errors are removed. The Allocator interface gains `old_align` as a new parameter to `resizeFn`. This is useful to quickly look up allocations. `std.heap.page_allocator` is improved to use mmap address hints to avoid obtaining the same virtual address pages when unmapping and mapping pages. The new general purpose allocator uses the page allocator as its backing allocator by default. `std.testing.allocator` is replaced with usage of this new allocator, which does leak checking, and so the LeakCheckAllocator is retired. stage1 is improved so that the `@typeInfo` of a pointer has a lazy value for the alignment of the child type, to avoid false dependency loops when dealing with pointers to async function frames. The `std.mem.Allocator` interface is refactored to be in its own file. `std.Mutex` now exposes the dummy mutex with `std.Mutex.Dummy`. This allocator is great for debug mode, however it needs some work to have better performance in release modes. The next step will be setting up a series of tests in ziglang/gotta-go-fast and then making improvements to the implementation.
2020-08-08 05:35:15 +00:00
if (drop_len != 0) {
os.munmap(slice[0..drop_len]);
2019-05-25 17:07:44 +00:00
}
2020-04-17 20:15:36 +00:00
// Unmap extra pages
std: introduce GeneralPurposeAllocator `std.GeneralPurposeAllocator` is now available. It is a function that takes a configuration struct (with default field values) and returns an allocator. There is a detailed description of this allocator in the doc comments at the top of the new file. The main feature of this allocator is that it is *safe*. It prevents double-free, use-after-free, and detects leaks. Some deprecation compile errors are removed. The Allocator interface gains `old_align` as a new parameter to `resizeFn`. This is useful to quickly look up allocations. `std.heap.page_allocator` is improved to use mmap address hints to avoid obtaining the same virtual address pages when unmapping and mapping pages. The new general purpose allocator uses the page allocator as its backing allocator by default. `std.testing.allocator` is replaced with usage of this new allocator, which does leak checking, and so the LeakCheckAllocator is retired. stage1 is improved so that the `@typeInfo` of a pointer has a lazy value for the alignment of the child type, to avoid false dependency loops when dealing with pointers to async function frames. The `std.mem.Allocator` interface is refactored to be in its own file. `std.Mutex` now exposes the dummy mutex with `std.Mutex.Dummy`. This allocator is great for debug mode, however it needs some work to have better performance in release modes. The next step will be setting up a series of tests in ziglang/gotta-go-fast and then making improvements to the implementation.
2020-08-08 05:35:15 +00:00
const aligned_buffer_len = alloc_len - drop_len;
if (aligned_buffer_len > aligned_len) {
os.munmap(result_ptr[aligned_len..aligned_buffer_len]);
}
2019-05-25 17:07:44 +00:00
std: introduce GeneralPurposeAllocator `std.GeneralPurposeAllocator` is now available. It is a function that takes a configuration struct (with default field values) and returns an allocator. There is a detailed description of this allocator in the doc comments at the top of the new file. The main feature of this allocator is that it is *safe*. It prevents double-free, use-after-free, and detects leaks. Some deprecation compile errors are removed. The Allocator interface gains `old_align` as a new parameter to `resizeFn`. This is useful to quickly look up allocations. `std.heap.page_allocator` is improved to use mmap address hints to avoid obtaining the same virtual address pages when unmapping and mapping pages. The new general purpose allocator uses the page allocator as its backing allocator by default. `std.testing.allocator` is replaced with usage of this new allocator, which does leak checking, and so the LeakCheckAllocator is retired. stage1 is improved so that the `@typeInfo` of a pointer has a lazy value for the alignment of the child type, to avoid false dependency loops when dealing with pointers to async function frames. The `std.mem.Allocator` interface is refactored to be in its own file. `std.Mutex` now exposes the dummy mutex with `std.Mutex.Dummy`. This allocator is great for debug mode, however it needs some work to have better performance in release modes. The next step will be setting up a series of tests in ziglang/gotta-go-fast and then making improvements to the implementation.
2020-08-08 05:35:15 +00:00
const new_hint = @alignCast(mem.page_size, result_ptr + aligned_len);
_ = @cmpxchgStrong(@TypeOf(next_mmap_addr_hint), &next_mmap_addr_hint, hint, new_hint, .Monotonic, .Monotonic);
return result_ptr[0..alignPageAllocLen(aligned_len, n, len_align)];
}
fn resize(
_: *anyopaque,
buf_unaligned: []u8,
buf_align: u29,
new_size: usize,
len_align: u29,
return_address: usize,
) ?usize {
2021-06-20 01:10:22 +00:00
_ = buf_align;
_ = return_address;
2020-04-17 20:15:36 +00:00
const new_size_aligned = mem.alignForward(new_size, mem.page_size);
if (builtin.os.tag == .windows) {
2019-05-25 17:07:44 +00:00
const w = os.windows;
if (new_size <= buf_unaligned.len) {
2020-04-17 20:15:36 +00:00
const base_addr = @ptrToInt(buf_unaligned.ptr);
const old_addr_end = base_addr + buf_unaligned.len;
const new_addr_end = mem.alignForward(base_addr + new_size, mem.page_size);
if (old_addr_end > new_addr_end) {
2019-05-25 17:07:44 +00:00
// For shrinking that is not releasing, we will only
// decommit the pages not needed anymore.
w.VirtualFree(
@intToPtr(*anyopaque, new_addr_end),
2020-04-17 20:15:36 +00:00
old_addr_end - new_addr_end,
2019-05-25 17:07:44 +00:00
w.MEM_DECOMMIT,
);
}
2020-04-17 20:15:36 +00:00
return alignPageAllocLen(new_size_aligned, new_size, len_align);
2019-05-25 17:07:44 +00:00
}
const old_size_aligned = mem.alignForward(buf_unaligned.len, mem.page_size);
if (new_size_aligned <= old_size_aligned) {
2020-04-17 20:15:36 +00:00
return alignPageAllocLen(new_size_aligned, new_size, len_align);
2019-05-25 17:07:44 +00:00
}
return null;
2020-04-17 20:15:36 +00:00
}
2019-05-25 17:07:44 +00:00
2020-04-17 20:15:36 +00:00
const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size);
if (new_size_aligned == buf_aligned_len)
return alignPageAllocLen(new_size_aligned, new_size, len_align);
2019-05-25 17:07:44 +00:00
2020-04-17 20:15:36 +00:00
if (new_size_aligned < buf_aligned_len) {
const ptr = @alignCast(mem.page_size, buf_unaligned.ptr + new_size_aligned);
std: introduce GeneralPurposeAllocator `std.GeneralPurposeAllocator` is now available. It is a function that takes a configuration struct (with default field values) and returns an allocator. There is a detailed description of this allocator in the doc comments at the top of the new file. The main feature of this allocator is that it is *safe*. It prevents double-free, use-after-free, and detects leaks. Some deprecation compile errors are removed. The Allocator interface gains `old_align` as a new parameter to `resizeFn`. This is useful to quickly look up allocations. `std.heap.page_allocator` is improved to use mmap address hints to avoid obtaining the same virtual address pages when unmapping and mapping pages. The new general purpose allocator uses the page allocator as its backing allocator by default. `std.testing.allocator` is replaced with usage of this new allocator, which does leak checking, and so the LeakCheckAllocator is retired. stage1 is improved so that the `@typeInfo` of a pointer has a lazy value for the alignment of the child type, to avoid false dependency loops when dealing with pointers to async function frames. The `std.mem.Allocator` interface is refactored to be in its own file. `std.Mutex` now exposes the dummy mutex with `std.Mutex.Dummy`. This allocator is great for debug mode, however it needs some work to have better performance in release modes. The next step will be setting up a series of tests in ziglang/gotta-go-fast and then making improvements to the implementation.
2020-08-08 05:35:15 +00:00
// TODO: if the next_mmap_addr_hint is within the unmapped range, update it
2020-04-17 20:15:36 +00:00
os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]);
return alignPageAllocLen(new_size_aligned, new_size, len_align);
}
2020-04-17 20:15:36 +00:00
// TODO: call mremap
std: introduce GeneralPurposeAllocator `std.GeneralPurposeAllocator` is now available. It is a function that takes a configuration struct (with default field values) and returns an allocator. There is a detailed description of this allocator in the doc comments at the top of the new file. The main feature of this allocator is that it is *safe*. It prevents double-free, use-after-free, and detects leaks. Some deprecation compile errors are removed. The Allocator interface gains `old_align` as a new parameter to `resizeFn`. This is useful to quickly look up allocations. `std.heap.page_allocator` is improved to use mmap address hints to avoid obtaining the same virtual address pages when unmapping and mapping pages. The new general purpose allocator uses the page allocator as its backing allocator by default. `std.testing.allocator` is replaced with usage of this new allocator, which does leak checking, and so the LeakCheckAllocator is retired. stage1 is improved so that the `@typeInfo` of a pointer has a lazy value for the alignment of the child type, to avoid false dependency loops when dealing with pointers to async function frames. The `std.mem.Allocator` interface is refactored to be in its own file. `std.Mutex` now exposes the dummy mutex with `std.Mutex.Dummy`. This allocator is great for debug mode, however it needs some work to have better performance in release modes. The next step will be setting up a series of tests in ziglang/gotta-go-fast and then making improvements to the implementation.
2020-08-08 05:35:15 +00:00
// TODO: if the next_mmap_addr_hint is within the remapped range, update it
return null;
}
2021-11-06 00:54:35 +00:00
fn free(_: *anyopaque, buf_unaligned: []u8, buf_align: u29, return_address: usize) void {
2021-11-06 00:54:35 +00:00
_ = buf_align;
_ = return_address;
if (builtin.os.tag == .windows) {
os.windows.VirtualFree(buf_unaligned.ptr, 0, os.windows.MEM_RELEASE);
} else {
const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size);
const ptr = @alignCast(mem.page_size, buf_unaligned.ptr);
os.munmap(ptr[0..buf_aligned_len]);
}
}
};
const WasmPageAllocator = struct {
comptime {
if (!builtin.target.isWasm()) {
@compileError("WasmPageAllocator is only available for wasm32 arch");
}
}
const vtable = Allocator.VTable{
.alloc = alloc,
.resize = resize,
2021-11-06 00:54:35 +00:00
.free = free,
};
2019-12-05 23:59:43 +00:00
const PageStatus = enum(u1) {
used = 0,
free = 1,
2019-12-06 03:54:57 +00:00
pub const none_free: u8 = 0;
2019-12-05 23:59:43 +00:00
};
2019-11-28 00:46:42 +00:00
const FreeBlock = struct {
2019-12-06 00:28:32 +00:00
data: []u128,
2019-12-05 03:41:01 +00:00
const Io = std.packed_int_array.PackedIntIo(u1, .Little);
2019-11-28 03:19:08 +00:00
fn totalPages(self: FreeBlock) usize {
2019-12-06 03:54:57 +00:00
return self.data.len * 128;
}
fn isInitialized(self: FreeBlock) bool {
return self.data.len > 0;
2019-12-05 03:21:54 +00:00
}
2019-12-06 00:28:32 +00:00
fn getBit(self: FreeBlock, idx: usize) PageStatus {
2019-12-05 03:41:01 +00:00
const bit_offset = 0;
return @intToEnum(PageStatus, Io.get(mem.sliceAsBytes(self.data), idx, bit_offset));
2019-11-28 03:19:08 +00:00
}
2019-12-06 00:28:32 +00:00
fn setBits(self: FreeBlock, start_idx: usize, len: usize, val: PageStatus) void {
2019-12-05 03:41:01 +00:00
const bit_offset = 0;
2019-12-03 23:41:05 +00:00
var i: usize = 0;
while (i < len) : (i += 1) {
Io.set(mem.sliceAsBytes(self.data), start_idx + i, bit_offset, @enumToInt(val));
}
2019-12-03 23:41:05 +00:00
}
2019-12-05 00:12:25 +00:00
// Use '0xFFFFFFFF' as a _missing_ sentinel
// This saves ~50 bytes compared to returning a nullable
// We can guarantee that conventional memory never gets this big,
2019-12-05 03:21:54 +00:00
// and wasm32 would not be able to address this memory (32 GB > usize).
2019-12-05 00:12:25 +00:00
// Revisit if this is settled: https://github.com/ziglang/zig/issues/3806
const not_found = std.math.maxInt(usize);
fn useRecycled(self: FreeBlock, num_pages: usize, alignment: u29) usize {
2019-12-04 05:49:56 +00:00
@setCold(true);
2019-12-06 00:28:32 +00:00
for (self.data) |segment, i| {
2019-12-03 23:24:50 +00:00
const spills_into_next = @bitCast(i128, segment) < 0;
const has_enough_bits = @popCount(u128, segment) >= num_pages;
if (!spills_into_next and !has_enough_bits) continue;
2019-11-28 04:02:54 +00:00
2019-12-03 23:24:50 +00:00
var j: usize = i * 128;
2019-12-04 05:49:56 +00:00
while (j < (i + 1) * 128) : (j += 1) {
var count: usize = 0;
2019-12-05 23:59:43 +00:00
while (j + count < self.totalPages() and self.getBit(j + count) == .free) {
2019-12-04 05:49:56 +00:00
count += 1;
const addr = j * mem.page_size;
if (count >= num_pages and mem.isAligned(addr, alignment)) {
2019-12-05 23:59:43 +00:00
self.setBits(j, num_pages, .used);
2019-12-05 00:12:25 +00:00
return j;
2019-12-02 18:26:14 +00:00
}
2019-11-28 04:02:54 +00:00
}
2019-12-04 05:49:56 +00:00
j += count;
2019-11-28 04:02:54 +00:00
}
}
2019-12-05 00:12:25 +00:00
return not_found;
}
2019-12-06 00:28:32 +00:00
fn recycle(self: FreeBlock, start_idx: usize, len: usize) void {
2019-12-05 23:59:43 +00:00
self.setBits(start_idx, len, .free);
}
2019-11-28 00:46:42 +00:00
};
2019-12-06 03:54:57 +00:00
var _conventional_data = [_]u128{0} ** 16;
// Marking `conventional` as const saves ~40 bytes
const conventional = FreeBlock{ .data = &_conventional_data };
2019-12-06 00:28:32 +00:00
var extended = FreeBlock{ .data = &[_]u128{} };
2019-12-05 00:12:25 +00:00
fn extendedOffset() usize {
return conventional.totalPages();
}
2019-11-28 00:46:42 +00:00
fn nPages(memsize: usize) usize {
2020-04-17 20:15:36 +00:00
return mem.alignForward(memsize, mem.page_size) / mem.page_size;
}
fn alloc(_: *anyopaque, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
2021-06-20 01:10:22 +00:00
_ = ra;
2020-04-17 20:15:36 +00:00
const page_count = nPages(len);
const page_idx = try allocPages(page_count, alignment);
2020-07-11 11:09:04 +00:00
return @intToPtr([*]u8, page_idx * mem.page_size)[0..alignPageAllocLen(page_count * mem.page_size, len, len_align)];
2020-04-17 20:15:36 +00:00
}
fn allocPages(page_count: usize, alignment: u29) !usize {
2020-04-17 20:15:36 +00:00
{
const idx = conventional.useRecycled(page_count, alignment);
2020-04-17 20:15:36 +00:00
if (idx != FreeBlock.not_found) {
return idx;
}
}
const idx = extended.useRecycled(page_count, alignment);
2019-12-05 00:12:25 +00:00
if (idx != FreeBlock.not_found) {
return idx + extendedOffset();
}
const next_page_idx = @wasmMemorySize(0);
const next_page_addr = next_page_idx * mem.page_size;
const aligned_addr = mem.alignForward(next_page_addr, alignment);
const drop_page_count = @divExact(aligned_addr - next_page_addr, mem.page_size);
const result = @wasmMemoryGrow(0, @intCast(u32, drop_page_count + page_count));
if (result <= 0)
2019-12-05 00:12:25 +00:00
return error.OutOfMemory;
assert(result == next_page_idx);
const aligned_page_idx = next_page_idx + drop_page_count;
if (drop_page_count > 0) {
freePages(next_page_idx, aligned_page_idx);
2019-12-05 00:12:25 +00:00
}
return @intCast(usize, aligned_page_idx);
}
2020-04-17 20:15:36 +00:00
fn freePages(start: usize, end: usize) void {
if (start < extendedOffset()) {
conventional.recycle(start, @minimum(extendedOffset(), end) - start);
2019-12-02 18:26:14 +00:00
}
2020-04-17 20:15:36 +00:00
if (end > extendedOffset()) {
var new_end = end;
if (!extended.isInitialized()) {
// Steal the last page from the memory currently being recycled
// TODO: would it be better if we use the first page instead?
new_end -= 1;
extended.data = @intToPtr([*]u128, new_end * mem.page_size)[0 .. mem.page_size / @sizeOf(u128)];
// Since this is the first page being freed and we consume it, assume *nothing* is free.
mem.set(u128, extended.data, PageStatus.none_free);
}
const clamped_start = std.math.max(extendedOffset(), start);
extended.recycle(clamped_start - extendedOffset(), new_end - clamped_start);
}
}
fn resize(
_: *anyopaque,
buf: []u8,
buf_align: u29,
new_len: usize,
len_align: u29,
return_address: usize,
) ?usize {
2021-06-20 01:10:22 +00:00
_ = buf_align;
_ = return_address;
2020-04-17 20:15:36 +00:00
const aligned_len = mem.alignForward(buf.len, mem.page_size);
if (new_len > aligned_len) return null;
2020-04-17 20:15:36 +00:00
const current_n = nPages(aligned_len);
const new_n = nPages(new_len);
if (new_n != current_n) {
const base = nPages(@ptrToInt(buf.ptr));
freePages(base + new_n, base + current_n);
2019-11-28 00:46:42 +00:00
}
2021-11-06 00:54:35 +00:00
return alignPageAllocLen(new_n * mem.page_size, new_len, len_align);
}
fn free(
_: *anyopaque,
2021-11-06 00:54:35 +00:00
buf: []u8,
buf_align: u29,
return_address: usize,
) void {
_ = buf_align;
_ = return_address;
const aligned_len = mem.alignForward(buf.len, mem.page_size);
const current_n = nPages(aligned_len);
const base = nPages(@ptrToInt(buf.ptr));
freePages(base, base + current_n);
}
};
pub const HeapAllocator = switch (builtin.os.tag) {
.windows => struct {
heap_handle: ?HeapHandle,
const HeapHandle = os.windows.HANDLE;
pub fn init() HeapAllocator {
return HeapAllocator{
.heap_handle = null,
};
}
pub fn allocator(self: *HeapAllocator) Allocator {
2021-11-06 00:54:35 +00:00
return Allocator.init(self, alloc, resize, free);
}
pub fn deinit(self: *HeapAllocator) void {
if (self.heap_handle) |heap_handle| {
2019-05-27 05:35:58 +00:00
os.windows.HeapDestroy(heap_handle);
}
}
2020-04-17 20:15:36 +00:00
fn getRecordPtr(buf: []u8) *align(1) usize {
return @intToPtr(*align(1) usize, @ptrToInt(buf.ptr) + buf.len);
}
fn alloc(
self: *HeapAllocator,
n: usize,
ptr_align: u29,
len_align: u29,
return_address: usize,
) error{OutOfMemory}![]u8 {
2021-06-20 01:10:22 +00:00
_ = return_address;
2020-04-17 20:15:36 +00:00
const amt = n + ptr_align - 1 + @sizeOf(usize);
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .SeqCst);
const heap_handle = optional_heap_handle orelse blk: {
const options = if (builtin.single_threaded) os.windows.HEAP_NO_SERIALIZE else 0;
2019-05-27 05:35:58 +00:00
const hh = os.windows.kernel32.HeapCreate(options, amt, 0) orelse return error.OutOfMemory;
const other_hh = @cmpxchgStrong(?HeapHandle, &self.heap_handle, null, hh, .SeqCst, .SeqCst) orelse break :blk hh;
2019-05-27 05:35:58 +00:00
os.windows.HeapDestroy(hh);
break :blk other_hh.?; // can't be null because of the cmpxchg
};
2019-05-27 05:35:58 +00:00
const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return error.OutOfMemory;
const root_addr = @ptrToInt(ptr);
2020-04-17 20:15:36 +00:00
const aligned_addr = mem.alignForward(root_addr, ptr_align);
const return_len = init: {
if (len_align == 0) break :init n;
const full_len = os.windows.kernel32.HeapSize(heap_handle, 0, ptr);
assert(full_len != std.math.maxInt(usize));
assert(full_len >= amt);
break :init mem.alignBackwardAnyAlign(full_len - (aligned_addr - root_addr) - @sizeOf(usize), len_align);
};
2020-04-17 20:15:36 +00:00
const buf = @intToPtr([*]u8, aligned_addr)[0..return_len];
getRecordPtr(buf).* = root_addr;
return buf;
}
fn resize(
self: *HeapAllocator,
buf: []u8,
buf_align: u29,
new_size: usize,
len_align: u29,
return_address: usize,
) ?usize {
2021-06-20 01:10:22 +00:00
_ = buf_align;
_ = return_address;
2020-04-17 20:15:36 +00:00
const root_addr = getRecordPtr(buf).*;
const align_offset = @ptrToInt(buf.ptr) - root_addr;
const amt = align_offset + new_size + @sizeOf(usize);
2019-05-27 05:35:58 +00:00
const new_ptr = os.windows.kernel32.HeapReAlloc(
self.heap_handle.?,
2020-04-17 20:15:36 +00:00
os.windows.HEAP_REALLOC_IN_PLACE_ONLY,
@intToPtr(*anyopaque, root_addr),
amt,
) orelse return null;
assert(new_ptr == @intToPtr(*anyopaque, root_addr));
2020-04-17 20:15:36 +00:00
const return_len = init: {
if (len_align == 0) break :init new_size;
const full_len = os.windows.kernel32.HeapSize(self.heap_handle.?, 0, new_ptr);
assert(full_len != std.math.maxInt(usize));
assert(full_len >= amt);
break :init mem.alignBackwardAnyAlign(full_len - align_offset, len_align);
};
getRecordPtr(buf.ptr[0..return_len]).* = root_addr;
return return_len;
}
2021-11-06 00:54:35 +00:00
fn free(
self: *HeapAllocator,
buf: []u8,
buf_align: u29,
return_address: usize,
) void {
_ = buf_align;
_ = return_address;
os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*anyopaque, getRecordPtr(buf).*));
2021-11-06 00:54:35 +00:00
}
},
else => @compileError("Unsupported OS"),
};
2020-04-17 20:15:36 +00:00
fn sliceContainsPtr(container: []u8, ptr: [*]u8) bool {
return @ptrToInt(ptr) >= @ptrToInt(container.ptr) and
@ptrToInt(ptr) < (@ptrToInt(container.ptr) + container.len);
}
fn sliceContainsSlice(container: []u8, slice: []u8) bool {
return @ptrToInt(slice.ptr) >= @ptrToInt(container.ptr) and
(@ptrToInt(slice.ptr) + slice.len) <= (@ptrToInt(container.ptr) + container.len);
}
pub const FixedBufferAllocator = struct {
end_index: usize,
buffer: []u8,
pub fn init(buffer: []u8) FixedBufferAllocator {
return FixedBufferAllocator{
.buffer = buffer,
.end_index = 0,
};
2017-11-05 17:27:56 +00:00
}
/// *WARNING* using this at the same time as the interface returned by `threadSafeAllocator` is not thread safe
pub fn allocator(self: *FixedBufferAllocator) Allocator {
2021-11-06 00:54:35 +00:00
return Allocator.init(self, alloc, resize, free);
}
/// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator`
/// *WARNING* using this at the same time as the interface returned by `getAllocator` is not thread safe
pub fn threadSafeAllocator(self: *FixedBufferAllocator) Allocator {
2021-11-06 00:54:35 +00:00
return Allocator.init(
self,
threadSafeAlloc,
Allocator.NoResize(FixedBufferAllocator).noResize,
Allocator.NoOpFree(FixedBufferAllocator).noOpFree,
);
}
2020-04-17 20:15:36 +00:00
pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool {
return sliceContainsPtr(self.buffer, ptr);
}
pub fn ownsSlice(self: *FixedBufferAllocator, slice: []u8) bool {
return sliceContainsSlice(self.buffer, slice);
}
/// NOTE: this will not work in all cases, if the last allocation had an adjusted_index
/// then we won't be able to determine what the last allocation was. This is because
2021-11-06 00:54:35 +00:00
/// the alignForward operation done in alloc is not reversible.
2020-04-17 20:15:36 +00:00
pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool {
return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
}
fn alloc(self: *FixedBufferAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
2021-06-20 01:10:22 +00:00
_ = len_align;
_ = ra;
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse
return error.OutOfMemory;
const adjusted_index = self.end_index + adjust_off;
const new_end_index = adjusted_index + n;
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
}
const result = self.buffer[adjusted_index..new_end_index];
self.end_index = new_end_index;
return result;
}
fn resize(
self: *FixedBufferAllocator,
buf: []u8,
buf_align: u29,
new_size: usize,
len_align: u29,
return_address: usize,
) ?usize {
2021-06-20 01:10:22 +00:00
_ = buf_align;
_ = return_address;
2020-04-17 20:15:36 +00:00
assert(self.ownsSlice(buf)); // sanity check
if (!self.isLastAllocation(buf)) {
if (new_size > buf.len) return null;
2021-11-06 00:54:35 +00:00
return mem.alignAllocLen(buf.len, new_size, len_align);
}
2020-04-17 20:15:36 +00:00
if (new_size <= buf.len) {
const sub = buf.len - new_size;
self.end_index -= sub;
2021-11-06 00:54:35 +00:00
return mem.alignAllocLen(buf.len - sub, new_size, len_align);
2020-04-17 20:15:36 +00:00
}
const add = new_size - buf.len;
if (add + self.end_index > self.buffer.len) return null;
2020-04-17 20:15:36 +00:00
self.end_index += add;
return new_size;
}
2019-09-16 01:35:01 +00:00
2021-11-06 00:54:35 +00:00
fn free(
self: *FixedBufferAllocator,
buf: []u8,
buf_align: u29,
return_address: usize,
) void {
_ = buf_align;
_ = return_address;
assert(self.ownsSlice(buf)); // sanity check
if (self.isLastAllocation(buf)) {
self.end_index -= buf.len;
}
}
fn threadSafeAlloc(self: *FixedBufferAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
_ = len_align;
_ = ra;
var end_index = @atomicLoad(usize, &self.end_index, .SeqCst);
while (true) {
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse
return error.OutOfMemory;
const adjusted_index = end_index + adjust_off;
const new_end_index = adjusted_index + n;
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
}
end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .SeqCst, .SeqCst) orelse return self.buffer[adjusted_index..new_end_index];
}
}
2019-09-21 15:42:26 +00:00
pub fn reset(self: *FixedBufferAllocator) void {
2019-09-16 01:35:01 +00:00
self.end_index = 0;
}
};
pub const ThreadSafeFixedBufferAllocator = @compileError("ThreadSafeFixedBufferAllocator has been replaced with `threadSafeAllocator` on FixedBufferAllocator");
2018-04-28 21:53:06 +00:00
pub fn stackFallback(comptime size: usize, fallback_allocator: Allocator) StackFallbackAllocator(size) {
return StackFallbackAllocator(size){
.buffer = undefined,
.fallback_allocator = fallback_allocator,
.fixed_buffer_allocator = undefined,
};
}
pub fn StackFallbackAllocator(comptime size: usize) type {
return struct {
const Self = @This();
buffer: [size]u8,
fallback_allocator: Allocator,
fixed_buffer_allocator: FixedBufferAllocator,
/// WARNING: This functions both fetches a `std.mem.Allocator` interface to this allocator *and* resets the internal buffer allocator
pub fn get(self: *Self) Allocator {
self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]);
2021-11-06 00:54:35 +00:00
return Allocator.init(self, alloc, resize, free);
}
fn alloc(
self: *Self,
len: usize,
ptr_align: u29,
len_align: u29,
return_address: usize,
2020-12-13 17:58:17 +00:00
) error{OutOfMemory}![]u8 {
return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align, len_align, return_address) catch
2021-11-06 00:54:35 +00:00
return self.fallback_allocator.rawAlloc(len, ptr_align, len_align, return_address);
}
fn resize(
self: *Self,
buf: []u8,
buf_align: u29,
new_len: usize,
len_align: u29,
return_address: usize,
) ?usize {
2020-04-17 20:15:36 +00:00
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, buf_align, new_len, len_align, return_address);
2020-04-17 20:15:36 +00:00
} else {
2021-11-06 00:54:35 +00:00
return self.fallback_allocator.rawResize(buf, buf_align, new_len, len_align, return_address);
}
}
fn free(
self: *Self,
buf: []u8,
buf_align: u29,
return_address: usize,
) void {
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, buf_align, return_address);
} else {
return self.fallback_allocator.rawFree(buf, buf_align, return_address);
}
}
};
}
test "c_allocator" {
if (builtin.link_libc) {
try testAllocator(c_allocator);
try testAllocatorAligned(c_allocator);
try testAllocatorLargeAlignment(c_allocator);
try testAllocatorAlignedShrink(c_allocator);
}
}
test "raw_c_allocator" {
if (builtin.link_libc) {
try testAllocator(raw_c_allocator);
}
}
2019-12-06 03:54:57 +00:00
test "WasmPageAllocator internals" {
if (comptime builtin.target.isWasm()) {
2020-04-17 20:15:36 +00:00
const conventional_memsize = WasmPageAllocator.conventional.totalPages() * mem.page_size;
const initial = try page_allocator.alloc(u8, mem.page_size);
2021-05-04 17:47:26 +00:00
try testing.expect(@ptrToInt(initial.ptr) < conventional_memsize); // If this isn't conventional, the rest of these tests don't make sense. Also we have a serious memory leak in the test suite.
var inplace = try page_allocator.realloc(initial, 1);
2021-05-04 17:47:26 +00:00
try testing.expectEqual(initial.ptr, inplace.ptr);
inplace = try page_allocator.realloc(inplace, 4);
2021-05-04 17:47:26 +00:00
try testing.expectEqual(initial.ptr, inplace.ptr);
page_allocator.free(inplace);
const reuse = try page_allocator.alloc(u8, 1);
2021-05-04 17:47:26 +00:00
try testing.expectEqual(initial.ptr, reuse.ptr);
page_allocator.free(reuse);
// This segment may span conventional and extended which has really complex rules so we're just ignoring it for now.
const padding = try page_allocator.alloc(u8, conventional_memsize);
page_allocator.free(padding);
const extended = try page_allocator.alloc(u8, conventional_memsize);
2021-05-04 17:47:26 +00:00
try testing.expect(@ptrToInt(extended.ptr) >= conventional_memsize);
const use_small = try page_allocator.alloc(u8, 1);
2021-05-04 17:47:26 +00:00
try testing.expectEqual(initial.ptr, use_small.ptr);
page_allocator.free(use_small);
inplace = try page_allocator.realloc(extended, 1);
2021-05-04 17:47:26 +00:00
try testing.expectEqual(extended.ptr, inplace.ptr);
page_allocator.free(inplace);
const reuse_extended = try page_allocator.alloc(u8, conventional_memsize);
2021-05-04 17:47:26 +00:00
try testing.expectEqual(extended.ptr, reuse_extended.ptr);
page_allocator.free(reuse_extended);
2019-12-06 03:54:57 +00:00
}
}
test "PageAllocator" {
const allocator = page_allocator;
try testAllocator(allocator);
try testAllocatorAligned(allocator);
if (!builtin.target.isWasm()) {
try testAllocatorLargeAlignment(allocator);
try testAllocatorAlignedShrink(allocator);
}
if (builtin.os.tag == .windows) {
// Trying really large alignment. As mentionned in the implementation,
// VirtualAlloc returns 64K aligned addresses. We want to make sure
// PageAllocator works beyond that, as it's not tested by
// `testAllocatorLargeAlignment`.
const slice = try allocator.alignedAlloc(u8, 1 << 20, 128);
slice[0] = 0x12;
slice[127] = 0x34;
allocator.free(slice);
}
2020-06-28 20:33:41 +00:00
{
var buf = try allocator.alloc(u8, mem.page_size + 1);
defer allocator.free(buf);
buf = try allocator.realloc(buf, 1); // shrink past the page boundary
}
}
test "HeapAllocator" {
if (builtin.os.tag == .windows) {
var heap_allocator = HeapAllocator.init();
defer heap_allocator.deinit();
const allocator = heap_allocator.allocator();
try testAllocator(allocator);
try testAllocatorAligned(allocator);
try testAllocatorLargeAlignment(allocator);
try testAllocatorAlignedShrink(allocator);
}
}
2019-06-22 05:13:10 +00:00
test "ArenaAllocator" {
var arena_allocator = ArenaAllocator.init(page_allocator);
2019-06-22 05:13:10 +00:00
defer arena_allocator.deinit();
const allocator = arena_allocator.allocator();
2019-06-22 05:13:10 +00:00
try testAllocator(allocator);
try testAllocatorAligned(allocator);
try testAllocatorLargeAlignment(allocator);
try testAllocatorAlignedShrink(allocator);
2019-06-22 05:13:10 +00:00
}
var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined;
test "FixedBufferAllocator" {
var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]));
const allocator = fixed_buffer_allocator.allocator();
try testAllocator(allocator);
try testAllocatorAligned(allocator);
try testAllocatorLargeAlignment(allocator);
try testAllocatorAlignedShrink(allocator);
}
2019-09-16 01:35:01 +00:00
test "FixedBufferAllocator.reset" {
var buf: [8]u8 align(@alignOf(u64)) = undefined;
2019-09-16 01:35:01 +00:00
var fba = FixedBufferAllocator.init(buf[0..]);
const allocator = fba.allocator();
2019-09-16 01:35:01 +00:00
const X = 0xeeeeeeeeeeeeeeee;
const Y = 0xffffffffffffffff;
var x = try allocator.create(u64);
2019-09-16 01:35:01 +00:00
x.* = X;
try testing.expectError(error.OutOfMemory, allocator.create(u64));
2019-09-16 01:35:01 +00:00
fba.reset();
var y = try allocator.create(u64);
2019-09-16 01:35:01 +00:00
y.* = Y;
// we expect Y to have overwritten X.
2021-05-04 17:47:26 +00:00
try testing.expect(x.* == y.*);
try testing.expect(y.* == Y);
2019-09-16 01:35:01 +00:00
}
2020-12-13 17:58:17 +00:00
test "StackFallbackAllocator" {
const fallback_allocator = page_allocator;
var stack_allocator = stackFallback(4096, fallback_allocator);
try testAllocator(stack_allocator.get());
try testAllocatorAligned(stack_allocator.get());
try testAllocatorLargeAlignment(stack_allocator.get());
try testAllocatorAlignedShrink(stack_allocator.get());
}
test "FixedBufferAllocator Reuse memory on realloc" {
var small_fixed_buffer: [10]u8 = undefined;
// check if we re-use the memory
{
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
const allocator = fixed_buffer_allocator.allocator();
var slice0 = try allocator.alloc(u8, 5);
2021-05-04 17:47:26 +00:00
try testing.expect(slice0.len == 5);
var slice1 = try allocator.realloc(slice0, 10);
2021-05-04 17:47:26 +00:00
try testing.expect(slice1.ptr == slice0.ptr);
try testing.expect(slice1.len == 10);
try testing.expectError(error.OutOfMemory, allocator.realloc(slice1, 11));
}
// check that we don't re-use the memory if it's not the most recent block
{
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
const allocator = fixed_buffer_allocator.allocator();
var slice0 = try allocator.alloc(u8, 2);
slice0[0] = 1;
slice0[1] = 2;
var slice1 = try allocator.alloc(u8, 2);
var slice2 = try allocator.realloc(slice0, 4);
2021-05-04 17:47:26 +00:00
try testing.expect(slice0.ptr != slice2.ptr);
try testing.expect(slice1.ptr != slice2.ptr);
try testing.expect(slice2[0] == 1);
try testing.expect(slice2[1] == 2);
}
}
test "Thread safe FixedBufferAllocator" {
var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
2018-04-28 21:53:06 +00:00
try testAllocator(fixed_buffer_allocator.threadSafeAllocator());
try testAllocatorAligned(fixed_buffer_allocator.threadSafeAllocator());
try testAllocatorLargeAlignment(fixed_buffer_allocator.threadSafeAllocator());
try testAllocatorAlignedShrink(fixed_buffer_allocator.threadSafeAllocator());
2018-04-28 21:53:06 +00:00
}
/// This one should not try alignments that exceed what C malloc can handle.
pub fn testAllocator(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
const allocator = validationAllocator.allocator();
2020-04-17 20:15:36 +00:00
var slice = try allocator.alloc(*i32, 100);
2021-05-04 17:47:26 +00:00
try testing.expect(slice.len == 100);
for (slice) |*item, i| {
item.* = try allocator.create(i32);
item.*.* = @intCast(i32, i);
}
slice = try allocator.realloc(slice, 20000);
2021-05-04 17:47:26 +00:00
try testing.expect(slice.len == 20000);
for (slice[0..100]) |item, i| {
2021-05-04 17:47:26 +00:00
try testing.expect(item.* == @intCast(i32, i));
allocator.destroy(item);
}
slice = allocator.shrink(slice, 50);
2021-05-04 17:47:26 +00:00
try testing.expect(slice.len == 50);
slice = allocator.shrink(slice, 25);
2021-05-04 17:47:26 +00:00
try testing.expect(slice.len == 25);
slice = allocator.shrink(slice, 0);
2021-05-04 17:47:26 +00:00
try testing.expect(slice.len == 0);
slice = try allocator.realloc(slice, 10);
2021-05-04 17:47:26 +00:00
try testing.expect(slice.len == 10);
allocator.free(slice);
2020-09-03 23:33:47 +00:00
// Zero-length allocation
var empty = try allocator.alloc(u8, 0);
allocator.free(empty);
// Allocation with zero-sized types
2020-09-03 23:33:47 +00:00
const zero_bit_ptr = try allocator.create(u0);
zero_bit_ptr.* = 0;
allocator.destroy(zero_bit_ptr);
const oversize = try allocator.allocAdvanced(u32, null, 5, .at_least);
2021-05-04 17:47:26 +00:00
try testing.expect(oversize.len >= 5);
for (oversize) |*item| {
item.* = 0xDEADBEEF;
}
allocator.free(oversize);
}
pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
const allocator = validationAllocator.allocator();
2020-04-17 20:15:36 +00:00
// Test a few alignment values, smaller and bigger than the type's one
inline for ([_]u29{ 1, 2, 4, 8, 16, 32, 64 }) |alignment| {
// initial
var slice = try allocator.alignedAlloc(u8, alignment, 10);
2021-05-04 17:47:26 +00:00
try testing.expect(slice.len == 10);
// grow
slice = try allocator.realloc(slice, 100);
2021-05-04 17:47:26 +00:00
try testing.expect(slice.len == 100);
// shrink
slice = allocator.shrink(slice, 10);
2021-05-04 17:47:26 +00:00
try testing.expect(slice.len == 10);
// go to zero
slice = allocator.shrink(slice, 0);
2021-05-04 17:47:26 +00:00
try testing.expect(slice.len == 0);
// realloc from zero
slice = try allocator.realloc(slice, 100);
2021-05-04 17:47:26 +00:00
try testing.expect(slice.len == 100);
// shrink with shrink
slice = allocator.shrink(slice, 10);
2021-05-04 17:47:26 +00:00
try testing.expect(slice.len == 10);
// shrink to zero
slice = allocator.shrink(slice, 0);
2021-05-04 17:47:26 +00:00
try testing.expect(slice.len == 0);
}
}
pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
const allocator = validationAllocator.allocator();
2020-04-17 20:15:36 +00:00
2018-05-29 00:23:55 +00:00
//Maybe a platform's page_size is actually the same as or
// very near usize?
2019-05-26 17:17:34 +00:00
if (mem.page_size << 2 > maxInt(usize)) return;
const USizeShift = std.meta.Int(.unsigned, std.math.log2(std.meta.bitCount(usize)));
const large_align = @as(u29, mem.page_size << 2);
var align_mask: usize = undefined;
_ = @shlWithOverflow(usize, ~@as(usize, 0), @as(USizeShift, @ctz(u29, large_align)), &align_mask);
var slice = try allocator.alignedAlloc(u8, large_align, 500);
2021-05-04 17:47:26 +00:00
try testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
slice = allocator.shrink(slice, 100);
2021-05-04 17:47:26 +00:00
try testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
slice = try allocator.realloc(slice, 5000);
2021-05-04 17:47:26 +00:00
try testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
slice = allocator.shrink(slice, 10);
2021-05-04 17:47:26 +00:00
try testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
slice = try allocator.realloc(slice, 20000);
2021-05-04 17:47:26 +00:00
try testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
allocator.free(slice);
}
pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
const allocator = validationAllocator.allocator();
2020-04-17 20:15:36 +00:00
var debug_buffer: [1000]u8 = undefined;
const debug_allocator = FixedBufferAllocator.init(&debug_buffer).allocator();
2019-05-26 17:17:34 +00:00
const alloc_size = mem.page_size * 2 + 50;
var slice = try allocator.alignedAlloc(u8, 16, alloc_size);
defer allocator.free(slice);
var stuff_to_free = std.ArrayList([]align(16) u8).init(debug_allocator);
// On Windows, VirtualAlloc returns addresses aligned to a 64K boundary,
// which is 16 pages, hence the 32. This test may require to increase
// the size of the allocations feeding the `allocator` parameter if they
// fail, because of this high over-alignment we want to have.
2019-05-26 17:17:34 +00:00
while (@ptrToInt(slice.ptr) == mem.alignForward(@ptrToInt(slice.ptr), mem.page_size * 32)) {
try stuff_to_free.append(slice);
slice = try allocator.alignedAlloc(u8, 16, alloc_size);
}
while (stuff_to_free.popOrNull()) |item| {
allocator.free(item);
}
slice[0] = 0x12;
slice[60] = 0x34;
// realloc to a smaller size but with a larger alignment
slice = try allocator.reallocAdvanced(slice, mem.page_size * 32, alloc_size / 2, .exact);
2021-05-04 17:47:26 +00:00
try testing.expect(slice[0] == 0x12);
try testing.expect(slice[60] == 0x34);
}
test "heap" {
_ = @import("heap/logging_allocator.zig");
_ = @import("heap/log_to_writer_allocator.zig");
}