2019-03-02 21:46:04 +00:00
const std = @import ( " std.zig " ) ;
2021-10-05 06:47:27 +00:00
const builtin = @import ( " builtin " ) ;
2019-12-02 20:02:17 +00:00
const root = @import ( " root " ) ;
2017-12-24 03:08:53 +00:00
const debug = std . debug ;
2017-10-31 08:47:55 +00:00
const assert = debug . assert ;
2019-02-08 23:18:47 +00:00
const testing = std . testing ;
2017-12-24 03:08:53 +00:00
const mem = std . mem ;
const os = std . os ;
const c = std . c ;
2018-10-26 18:59:58 +00:00
const maxInt = std . math . maxInt ;
2017-10-31 08:47:55 +00:00
2019-06-27 16:04:14 +00:00
pub const LoggingAllocator = @import ( " heap/logging_allocator.zig " ) . LoggingAllocator ;
2020-03-10 22:44:30 +00:00
pub const loggingAllocator = @import ( " heap/logging_allocator.zig " ) . loggingAllocator ;
2021-06-09 18:42:07 +00:00
pub const ScopedLoggingAllocator = @import ( " heap/logging_allocator.zig " ) . ScopedLoggingAllocator ;
pub const LogToWriterAllocator = @import ( " heap/log_to_writer_allocator.zig " ) . LogToWriterAllocator ;
pub const logToWriterAllocator = @import ( " heap/log_to_writer_allocator.zig " ) . logToWriterAllocator ;
2020-05-10 06:05:54 +00:00
pub const ArenaAllocator = @import ( " heap/arena_allocator.zig " ) . ArenaAllocator ;
2020-08-08 05:35:15 +00:00
pub const GeneralPurposeAllocator = @import ( " heap/general_purpose_allocator.zig " ) . GeneralPurposeAllocator ;
2019-06-27 16:04:14 +00:00
2017-10-31 08:47:55 +00:00
const Allocator = mem . Allocator ;
2020-09-20 16:54:23 +00:00
const CAllocator = struct {
comptime {
if ( ! builtin . link_libc ) {
@compileError ( " C allocator is only available when linking against libc " ) ;
}
2020-07-11 11:09:04 +00:00
}
2020-04-17 20:15:36 +00:00
2021-04-29 05:58:12 +00:00
usingnamespace if ( @hasDecl ( c , " malloc_size " ) )
2020-09-20 16:54:23 +00:00
struct {
pub const supports_malloc_size = true ;
pub const malloc_size = c . malloc_size ;
}
2021-04-29 05:58:12 +00:00
else if ( @hasDecl ( c , " malloc_usable_size " ) )
2020-09-20 16:54:23 +00:00
struct {
pub const supports_malloc_size = true ;
pub const malloc_size = c . malloc_usable_size ;
}
2021-04-29 05:58:12 +00:00
else if ( @hasDecl ( c , " _msize " ) )
2020-09-22 11:18:40 +00:00
struct {
pub const supports_malloc_size = true ;
pub const malloc_size = c . _msize ;
}
2020-09-20 16:54:23 +00:00
else
struct {
pub const supports_malloc_size = false ;
} ;
2017-10-31 08:47:55 +00:00
2020-09-22 12:42:01 +00:00
pub const supports_posix_memalign = @hasDecl ( c , " posix_memalign " ) ;
2020-09-22 11:18:40 +00:00
2020-09-22 12:42:01 +00:00
fn getHeader ( ptr : [ * ] u8 ) * [ * ] u8 {
2020-09-22 11:18:40 +00:00
return @intToPtr ( * [ * ] u8 , @ptrToInt ( ptr ) - @sizeOf ( usize ) ) ;
2020-07-11 11:09:04 +00:00
}
2020-04-17 20:15:36 +00:00
2020-09-22 12:42:01 +00:00
fn alignedAlloc ( len : usize , alignment : usize ) ? [ * ] u8 {
2020-09-22 11:18:40 +00:00
if ( supports_posix_memalign ) {
2020-09-22 12:42:01 +00:00
// The posix_memalign only accepts alignment values that are a
// multiple of the pointer size
2020-09-22 11:18:40 +00:00
const eff_alignment = std . math . max ( alignment , @sizeOf ( usize ) ) ;
2020-09-20 16:54:23 +00:00
2021-12-19 05:24:45 +00:00
var aligned_ptr : ? * anyopaque = undefined ;
2020-09-22 11:18:40 +00:00
if ( c . posix_memalign ( & aligned_ptr , eff_alignment , len ) ! = 0 )
return null ;
2020-09-20 16:54:23 +00:00
2020-09-22 11:18:40 +00:00
return @ptrCast ( [ * ] u8 , aligned_ptr ) ;
2020-04-17 20:15:36 +00:00
}
2017-10-31 08:47:55 +00:00
2020-09-22 11:18:40 +00:00
// Thin wrapper around regular malloc, overallocate to account for
// alignment padding and store the orignal malloc()'ed pointer before
// the aligned address.
var unaligned_ptr = @ptrCast ( [ * ] u8 , c . malloc ( len + alignment - 1 + @sizeOf ( usize ) ) orelse return null ) ;
const unaligned_addr = @ptrToInt ( unaligned_ptr ) ;
const aligned_addr = mem . alignForward ( unaligned_addr + @sizeOf ( usize ) , alignment ) ;
var aligned_ptr = unaligned_ptr + ( aligned_addr - unaligned_addr ) ;
2020-09-22 12:42:01 +00:00
getHeader ( aligned_ptr ) . * = unaligned_ptr ;
2017-10-31 08:47:55 +00:00
2020-09-20 16:54:23 +00:00
return aligned_ptr ;
2020-04-17 20:15:36 +00:00
}
2020-09-20 16:54:23 +00:00
2020-09-22 12:42:01 +00:00
fn alignedFree ( ptr : [ * ] u8 ) void {
2020-09-22 11:18:40 +00:00
if ( supports_posix_memalign ) {
return c . free ( ptr ) ;
2020-04-17 20:15:36 +00:00
}
2017-10-31 08:47:55 +00:00
2020-09-22 12:42:01 +00:00
const unaligned_ptr = getHeader ( ptr ) . * ;
2020-09-22 11:18:40 +00:00
c . free ( unaligned_ptr ) ;
}
2020-09-22 12:42:01 +00:00
fn alignedAllocSize ( ptr : [ * ] u8 ) usize {
2020-09-22 11:18:40 +00:00
if ( supports_posix_memalign ) {
2021-08-28 22:34:17 +00:00
return CAllocator . malloc_size ( ptr ) ;
2020-09-20 16:54:23 +00:00
}
2020-09-22 11:18:40 +00:00
2020-09-22 12:42:01 +00:00
const unaligned_ptr = getHeader ( ptr ) . * ;
2020-09-22 11:18:40 +00:00
const delta = @ptrToInt ( ptr ) - @ptrToInt ( unaligned_ptr ) ;
2021-08-28 22:34:17 +00:00
return CAllocator . malloc_size ( unaligned_ptr ) - delta ;
2020-04-17 20:15:36 +00:00
}
2020-09-20 16:54:23 +00:00
fn alloc (
2021-12-19 05:24:45 +00:00
_ : * anyopaque ,
2020-09-20 16:54:23 +00:00
len : usize ,
alignment : u29 ,
len_align : u29 ,
return_address : usize ,
) error { OutOfMemory } ! [ ] u8 {
2021-06-20 01:10:22 +00:00
_ = return_address ;
2020-09-20 16:54:23 +00:00
assert ( len > 0 ) ;
assert ( std . math . isPowerOfTwo ( alignment ) ) ;
2020-09-22 12:42:01 +00:00
var ptr = alignedAlloc ( len , alignment ) orelse return error . OutOfMemory ;
2020-09-22 11:18:40 +00:00
if ( len_align = = 0 ) {
2020-09-20 16:54:23 +00:00
return ptr [ 0 . . len ] ;
2020-09-22 11:18:40 +00:00
}
2020-09-20 16:54:23 +00:00
const full_len = init : {
2021-08-28 22:34:17 +00:00
if ( CAllocator . supports_malloc_size ) {
2020-09-22 12:42:01 +00:00
const s = alignedAllocSize ( ptr ) ;
2020-09-20 16:54:23 +00:00
assert ( s > = len ) ;
break : init s ;
}
break : init len ;
} ;
return ptr [ 0 . . mem . alignBackwardAnyAlign ( full_len , len_align ) ] ;
2020-04-17 20:15:36 +00:00
}
2020-09-20 16:54:23 +00:00
fn resize (
2021-12-19 05:24:45 +00:00
_ : * anyopaque ,
2020-09-20 16:54:23 +00:00
buf : [ ] u8 ,
buf_align : u29 ,
new_len : usize ,
len_align : u29 ,
return_address : usize ,
2021-11-07 01:40:06 +00:00
) ? usize {
2021-06-20 01:10:22 +00:00
_ = buf_align ;
_ = return_address ;
2020-09-20 16:54:23 +00:00
if ( new_len < = buf . len ) {
return mem . alignAllocLen ( buf . len , new_len , len_align ) ;
2020-04-17 20:15:36 +00:00
}
2021-08-28 22:34:17 +00:00
if ( CAllocator . supports_malloc_size ) {
2020-09-22 12:42:01 +00:00
const full_len = alignedAllocSize ( buf . ptr ) ;
2020-09-20 16:54:23 +00:00
if ( new_len < = full_len ) {
return mem . alignAllocLen ( full_len , new_len , len_align ) ;
}
2020-04-17 20:15:36 +00:00
}
2021-11-07 01:40:06 +00:00
return null ;
2020-04-17 20:15:36 +00:00
}
2021-11-06 00:54:35 +00:00
fn free (
2021-12-19 05:24:45 +00:00
_ : * anyopaque ,
2021-11-06 00:54:35 +00:00
buf : [ ] u8 ,
buf_align : u29 ,
return_address : usize ,
) void {
_ = buf_align ;
_ = return_address ;
alignedFree ( buf . ptr ) ;
}
2020-09-20 16:54:23 +00:00
} ;
2020-11-19 05:09:34 +00:00
/// Supports the full Allocator interface, including alignment, and exploiting
/// `malloc_usable_size` if available. For an allocator that directly calls
/// `malloc`/`free`, see `raw_c_allocator`.
2021-10-28 23:41:58 +00:00
pub const c_allocator = Allocator {
. ptr = undefined ,
2021-10-29 03:17:21 +00:00
. vtable = & c_allocator_vtable ,
} ;
const c_allocator_vtable = Allocator . VTable {
. alloc = CAllocator . alloc ,
. resize = CAllocator . resize ,
2021-11-06 00:54:35 +00:00
. free = CAllocator . free ,
2020-09-20 16:54:23 +00:00
} ;
2017-10-31 08:47:55 +00:00
2020-11-19 05:09:34 +00:00
/// Asserts allocations are within `@alignOf(std.c.max_align_t)` and directly calls
/// `malloc`/`free`. Does not attempt to utilize `malloc_usable_size`.
/// This allocator is safe to use as the backing allocator with
2020-12-28 14:06:19 +00:00
/// `ArenaAllocator` for example and is more optimal in such a case
/// than `c_allocator`.
2021-10-28 23:41:58 +00:00
pub const raw_c_allocator = Allocator {
. ptr = undefined ,
2021-10-29 03:17:21 +00:00
. vtable = & raw_c_allocator_vtable ,
} ;
const raw_c_allocator_vtable = Allocator . VTable {
. alloc = rawCAlloc ,
. resize = rawCResize ,
2021-11-06 00:54:35 +00:00
. free = rawCFree ,
2020-11-19 05:09:34 +00:00
} ;
fn rawCAlloc (
2021-12-19 05:24:45 +00:00
_ : * anyopaque ,
2020-11-19 05:09:34 +00:00
len : usize ,
ptr_align : u29 ,
len_align : u29 ,
ret_addr : usize ,
) Allocator . Error ! [ ] u8 {
2021-06-20 01:10:22 +00:00
_ = len_align ;
_ = ret_addr ;
2020-11-19 05:09:34 +00:00
assert ( ptr_align < = @alignOf ( std . c . max_align_t ) ) ;
const ptr = @ptrCast ( [ * ] u8 , c . malloc ( len ) orelse return error . OutOfMemory ) ;
return ptr [ 0 . . len ] ;
}
fn rawCResize (
2021-12-19 05:24:45 +00:00
_ : * anyopaque ,
2020-11-19 05:09:34 +00:00
buf : [ ] u8 ,
old_align : u29 ,
new_len : usize ,
len_align : u29 ,
ret_addr : usize ,
2021-11-07 01:40:06 +00:00
) ? usize {
2021-06-20 01:10:22 +00:00
_ = old_align ;
_ = ret_addr ;
2020-11-19 05:09:34 +00:00
if ( new_len < = buf . len ) {
return mem . alignAllocLen ( buf . len , new_len , len_align ) ;
}
2021-11-07 01:40:06 +00:00
return null ;
2020-11-19 05:09:34 +00:00
}
2021-11-06 00:54:35 +00:00
fn rawCFree (
2021-12-19 05:24:45 +00:00
_ : * anyopaque ,
2021-11-06 00:54:35 +00:00
buf : [ ] u8 ,
old_align : u29 ,
ret_addr : usize ,
) void {
_ = old_align ;
_ = ret_addr ;
c . free ( buf . ptr ) ;
}
2018-02-12 07:14:44 +00:00
/// This allocator makes a syscall directly for every allocation and free.
2018-07-05 19:09:02 +00:00
/// Thread-safe and lock-free.
2021-10-05 06:47:27 +00:00
pub const page_allocator = if ( builtin . target . isWasm ( ) )
2021-10-28 23:41:58 +00:00
Allocator {
. ptr = undefined ,
2021-10-29 03:17:21 +00:00
. vtable = & WasmPageAllocator . vtable ,
2021-10-28 23:41:58 +00:00
}
else if ( builtin . target . os . tag = = . freestanding )
2019-12-02 20:02:17 +00:00
root . os . heap . page_allocator
2021-10-28 23:41:58 +00:00
else
Allocator {
. ptr = undefined ,
2021-10-29 03:17:21 +00:00
. vtable = & PageAllocator . vtable ,
2021-10-28 23:41:58 +00:00
} ;
2018-02-12 07:14:44 +00:00
2020-04-17 20:15:36 +00:00
/// Verifies that the adjusted length will still map to the full length
pub fn alignPageAllocLen ( full_len : usize , len : usize , len_align : u29 ) usize {
const aligned_len = mem . alignAllocLen ( full_len , len , len_align ) ;
assert ( mem . alignForward ( aligned_len , mem . page_size ) = = full_len ) ;
return aligned_len ;
}
2020-08-08 05:35:15 +00:00
/// TODO Utilize this on Windows.
pub var next_mmap_addr_hint : ? [ * ] align ( mem . page_size ) u8 = null ;
2019-11-25 22:25:06 +00:00
const PageAllocator = struct {
2021-10-29 03:17:21 +00:00
const vtable = Allocator . VTable {
. alloc = alloc ,
. resize = resize ,
2021-11-06 00:54:35 +00:00
. free = free ,
2021-10-29 03:17:21 +00:00
} ;
2021-12-19 05:24:45 +00:00
fn alloc ( _ : * anyopaque , n : usize , alignment : u29 , len_align : u29 , ra : usize ) error { OutOfMemory } ! [ ] u8 {
2021-06-20 01:10:22 +00:00
_ = ra ;
2020-04-17 20:15:36 +00:00
assert ( n > 0 ) ;
2020-08-08 05:35:15 +00:00
const aligned_len = mem . alignForward ( n , mem . page_size ) ;
2017-10-31 08:47:55 +00:00
2020-02-25 06:52:27 +00:00
if ( builtin . os . tag = = . windows ) {
2019-05-25 17:07:44 +00:00
const w = os . windows ;
// Although officially it's at least aligned to page boundary,
// Windows is known to reserve pages on a 64K boundary. It's
// even more likely that the requested alignment is <= 64K than
// 4K, so we're just allocating blindly and hoping for the best.
// see https://devblogs.microsoft.com/oldnewthing/?p=42223
const addr = w . VirtualAlloc (
null ,
2020-08-08 05:35:15 +00:00
aligned_len ,
2019-05-25 17:07:44 +00:00
w . MEM_COMMIT | w . MEM_RESERVE ,
w . PAGE_READWRITE ,
) catch return error . OutOfMemory ;
// If the allocation is sufficiently aligned, use it.
2021-06-06 09:12:53 +00:00
if ( mem . isAligned ( @ptrToInt ( addr ) , alignment ) ) {
2020-08-08 05:35:15 +00:00
return @ptrCast ( [ * ] u8 , addr ) [ 0 . . alignPageAllocLen ( aligned_len , n , len_align ) ] ;
2019-05-25 17:07:44 +00:00
}
// If it wasn't, actually do an explicitely aligned allocation.
w . VirtualFree ( addr , 0 , w . MEM_RELEASE ) ;
2020-04-17 20:15:36 +00:00
const alloc_size = n + alignment - mem . page_size ;
2019-05-25 17:07:44 +00:00
2020-04-17 20:15:36 +00:00
while ( true ) {
2019-05-25 17:07:44 +00:00
// Reserve a range of memory large enough to find a sufficiently
// aligned address.
const reserved_addr = w . VirtualAlloc (
2019-05-08 16:02:37 +00:00
null ,
2019-05-25 17:07:44 +00:00
alloc_size ,
w . MEM_RESERVE ,
w . PAGE_NOACCESS ,
) catch return error . OutOfMemory ;
const aligned_addr = mem . alignForward ( @ptrToInt ( reserved_addr ) , alignment ) ;
// Release the reserved pages (not actually used).
w . VirtualFree ( reserved_addr , 0 , w . MEM_RELEASE ) ;
// At this point, it is possible that another thread has
// obtained some memory space that will cause the next
// VirtualAlloc call to fail. To handle this, we will retry
// until it succeeds.
2019-05-27 04:48:56 +00:00
const ptr = w . VirtualAlloc (
2021-12-19 05:24:45 +00:00
@intToPtr ( * anyopaque , aligned_addr ) ,
2020-08-08 05:35:15 +00:00
aligned_len ,
2019-05-08 16:02:37 +00:00
w . MEM_COMMIT | w . MEM_RESERVE ,
w . PAGE_READWRITE ,
2019-05-25 17:07:44 +00:00
) catch continue ;
2019-05-27 04:48:56 +00:00
2020-08-08 05:35:15 +00:00
return @ptrCast ( [ * ] u8 , ptr ) [ 0 . . alignPageAllocLen ( aligned_len , n , len_align ) ] ;
2020-04-17 20:15:36 +00:00
}
2019-05-25 17:07:44 +00:00
}
2019-05-08 16:02:37 +00:00
2021-10-20 21:50:07 +00:00
const max_drop_len = alignment - @minimum ( alignment , mem . page_size ) ;
2020-08-08 05:35:15 +00:00
const alloc_len = if ( max_drop_len < = aligned_len - n )
aligned_len
else
mem . alignForward ( aligned_len + max_drop_len , mem . page_size ) ;
const hint = @atomicLoad ( @TypeOf ( next_mmap_addr_hint ) , & next_mmap_addr_hint , . Unordered ) ;
2019-05-26 23:56:37 +00:00
const slice = os . mmap (
2020-08-08 05:35:15 +00:00
hint ,
alloc_len ,
2021-08-24 20:43:41 +00:00
os . PROT . READ | os . PROT . WRITE ,
os . MAP . PRIVATE | os . MAP . ANONYMOUS ,
2019-05-25 17:07:44 +00:00
- 1 ,
0 ,
) catch return error . OutOfMemory ;
2020-04-17 20:15:36 +00:00
assert ( mem . isAligned ( @ptrToInt ( slice . ptr ) , mem . page_size ) ) ;
2019-05-25 17:07:44 +00:00
2021-06-06 09:12:53 +00:00
const result_ptr = mem . alignPointer ( slice . ptr , alignment ) orelse
return error . OutOfMemory ;
2019-05-25 17:07:44 +00:00
// Unmap the extra bytes that were only requested in order to guarantee
// that the range of memory we were provided had a proper alignment in
// it somewhere. The extra bytes could be at the beginning, or end, or both.
2021-06-06 09:12:53 +00:00
const drop_len = @ptrToInt ( result_ptr ) - @ptrToInt ( slice . ptr ) ;
2020-08-08 05:35:15 +00:00
if ( drop_len ! = 0 ) {
os . munmap ( slice [ 0 . . drop_len ] ) ;
2019-05-25 17:07:44 +00:00
}
2020-04-17 20:15:36 +00:00
// Unmap extra pages
2020-08-08 05:35:15 +00:00
const aligned_buffer_len = alloc_len - drop_len ;
if ( aligned_buffer_len > aligned_len ) {
os . munmap ( result_ptr [ aligned_len . . aligned_buffer_len ] ) ;
2018-02-12 07:14:44 +00:00
}
2019-05-25 17:07:44 +00:00
2020-08-08 05:35:15 +00:00
const new_hint = @alignCast ( mem . page_size , result_ptr + aligned_len ) ;
_ = @cmpxchgStrong ( @TypeOf ( next_mmap_addr_hint ) , & next_mmap_addr_hint , hint , new_hint , . Monotonic , . Monotonic ) ;
return result_ptr [ 0 . . alignPageAllocLen ( aligned_len , n , len_align ) ] ;
2018-02-12 07:14:44 +00:00
}
2020-08-08 07:34:13 +00:00
fn resize (
2021-12-19 05:24:45 +00:00
_ : * anyopaque ,
2020-08-08 07:34:13 +00:00
buf_unaligned : [ ] u8 ,
buf_align : u29 ,
new_size : usize ,
len_align : u29 ,
return_address : usize ,
2021-11-07 01:40:06 +00:00
) ? usize {
2021-06-20 01:10:22 +00:00
_ = buf_align ;
_ = return_address ;
2020-04-17 20:15:36 +00:00
const new_size_aligned = mem . alignForward ( new_size , mem . page_size ) ;
2020-02-25 06:52:27 +00:00
if ( builtin . os . tag = = . windows ) {
2019-05-25 17:07:44 +00:00
const w = os . windows ;
2020-08-08 20:04:30 +00:00
if ( new_size < = buf_unaligned . len ) {
2020-04-17 20:15:36 +00:00
const base_addr = @ptrToInt ( buf_unaligned . ptr ) ;
const old_addr_end = base_addr + buf_unaligned . len ;
const new_addr_end = mem . alignForward ( base_addr + new_size , mem . page_size ) ;
if ( old_addr_end > new_addr_end ) {
2019-05-25 17:07:44 +00:00
// For shrinking that is not releasing, we will only
// decommit the pages not needed anymore.
w . VirtualFree (
2021-12-19 05:24:45 +00:00
@intToPtr ( * anyopaque , new_addr_end ) ,
2020-04-17 20:15:36 +00:00
old_addr_end - new_addr_end ,
2019-05-25 17:07:44 +00:00
w . MEM_DECOMMIT ,
) ;
2018-02-12 07:14:44 +00:00
}
2020-04-17 20:15:36 +00:00
return alignPageAllocLen ( new_size_aligned , new_size , len_align ) ;
2019-05-25 17:07:44 +00:00
}
2020-08-08 20:04:30 +00:00
const old_size_aligned = mem . alignForward ( buf_unaligned . len , mem . page_size ) ;
if ( new_size_aligned < = old_size_aligned ) {
2020-04-17 20:15:36 +00:00
return alignPageAllocLen ( new_size_aligned , new_size , len_align ) ;
2019-05-25 17:07:44 +00:00
}
2021-11-07 01:40:06 +00:00
return null ;
2020-04-17 20:15:36 +00:00
}
2019-05-25 17:07:44 +00:00
2020-04-17 20:15:36 +00:00
const buf_aligned_len = mem . alignForward ( buf_unaligned . len , mem . page_size ) ;
if ( new_size_aligned = = buf_aligned_len )
return alignPageAllocLen ( new_size_aligned , new_size , len_align ) ;
2019-05-25 17:07:44 +00:00
2020-04-17 20:15:36 +00:00
if ( new_size_aligned < buf_aligned_len ) {
2021-06-06 09:12:53 +00:00
const ptr = @alignCast ( mem . page_size , buf_unaligned . ptr + new_size_aligned ) ;
2020-08-08 05:35:15 +00:00
// TODO: if the next_mmap_addr_hint is within the unmapped range, update it
2020-04-17 20:15:36 +00:00
os . munmap ( ptr [ 0 . . buf_aligned_len - new_size_aligned ] ) ;
return alignPageAllocLen ( new_size_aligned , new_size , len_align ) ;
2017-10-31 08:47:55 +00:00
}
2020-04-17 20:15:36 +00:00
// TODO: call mremap
2020-08-08 05:35:15 +00:00
// TODO: if the next_mmap_addr_hint is within the remapped range, update it
2021-11-07 01:40:06 +00:00
return null ;
2017-10-31 08:47:55 +00:00
}
2021-11-06 00:54:35 +00:00
2021-12-19 05:24:45 +00:00
fn free ( _ : * anyopaque , buf_unaligned : [ ] u8 , buf_align : u29 , return_address : usize ) void {
2021-11-06 00:54:35 +00:00
_ = buf_align ;
_ = return_address ;
if ( builtin . os . tag = = . windows ) {
os . windows . VirtualFree ( buf_unaligned . ptr , 0 , os . windows . MEM_RELEASE ) ;
} else {
const buf_aligned_len = mem . alignForward ( buf_unaligned . len , mem . page_size ) ;
const ptr = @alignCast ( mem . page_size , buf_unaligned . ptr ) ;
os . munmap ( ptr [ 0 . . buf_aligned_len ] ) ;
}
}
2018-02-12 07:14:44 +00:00
} ;
2017-10-31 08:47:55 +00:00
2019-11-25 22:53:26 +00:00
const WasmPageAllocator = struct {
comptime {
2021-10-05 06:47:27 +00:00
if ( ! builtin . target . isWasm ( ) ) {
2019-11-25 22:53:26 +00:00
@compileError ( " WasmPageAllocator is only available for wasm32 arch " ) ;
}
}
2021-10-29 03:17:21 +00:00
const vtable = Allocator . VTable {
. alloc = alloc ,
. resize = resize ,
2021-11-06 00:54:35 +00:00
. free = free ,
2021-10-29 03:17:21 +00:00
} ;
2019-12-05 23:59:43 +00:00
const PageStatus = enum ( u1 ) {
used = 0 ,
free = 1 ,
2019-12-06 03:54:57 +00:00
pub const none_free : u8 = 0 ;
2019-12-05 23:59:43 +00:00
} ;
2019-11-25 22:53:26 +00:00
2019-11-28 00:46:42 +00:00
const FreeBlock = struct {
2019-12-06 00:28:32 +00:00
data : [ ] u128 ,
2019-11-25 22:53:26 +00:00
2019-12-05 03:41:01 +00:00
const Io = std . packed_int_array . PackedIntIo ( u1 , . Little ) ;
2019-11-28 03:19:08 +00:00
fn totalPages ( self : FreeBlock ) usize {
2019-12-06 03:54:57 +00:00
return self . data . len * 128 ;
}
fn isInitialized ( self : FreeBlock ) bool {
return self . data . len > 0 ;
2019-12-05 03:21:54 +00:00
}
2019-12-06 00:28:32 +00:00
fn getBit ( self : FreeBlock , idx : usize ) PageStatus {
2019-12-05 03:41:01 +00:00
const bit_offset = 0 ;
2020-02-21 18:46:53 +00:00
return @intToEnum ( PageStatus , Io . get ( mem . sliceAsBytes ( self . data ) , idx , bit_offset ) ) ;
2019-11-28 03:19:08 +00:00
}
2019-12-06 00:28:32 +00:00
fn setBits ( self : FreeBlock , start_idx : usize , len : usize , val : PageStatus ) void {
2019-12-05 03:41:01 +00:00
const bit_offset = 0 ;
2019-12-03 23:41:05 +00:00
var i : usize = 0 ;
while ( i < len ) : ( i + = 1 ) {
2020-02-21 18:46:53 +00:00
Io . set ( mem . sliceAsBytes ( self . data ) , start_idx + i , bit_offset , @enumToInt ( val ) ) ;
2019-11-25 22:53:26 +00:00
}
2019-12-03 23:41:05 +00:00
}
2019-12-05 00:12:25 +00:00
// Use '0xFFFFFFFF' as a _missing_ sentinel
// This saves ~50 bytes compared to returning a nullable
// We can guarantee that conventional memory never gets this big,
2019-12-05 03:21:54 +00:00
// and wasm32 would not be able to address this memory (32 GB > usize).
2019-12-05 00:12:25 +00:00
// Revisit if this is settled: https://github.com/ziglang/zig/issues/3806
const not_found = std . math . maxInt ( usize ) ;
2019-11-25 22:53:26 +00:00
2020-06-28 04:19:15 +00:00
fn useRecycled ( self : FreeBlock , num_pages : usize , alignment : u29 ) usize {
2019-12-04 05:49:56 +00:00
@setCold ( true ) ;
2019-12-06 00:28:32 +00:00
for ( self . data ) | segment , i | {
2019-12-03 23:24:50 +00:00
const spills_into_next = @bitCast ( i128 , segment ) < 0 ;
const has_enough_bits = @popCount ( u128 , segment ) > = num_pages ;
if ( ! spills_into_next and ! has_enough_bits ) continue ;
2019-11-28 04:02:54 +00:00
2019-12-03 23:24:50 +00:00
var j : usize = i * 128 ;
2019-12-04 05:49:56 +00:00
while ( j < ( i + 1 ) * 128 ) : ( j + = 1 ) {
var count : usize = 0 ;
2019-12-05 23:59:43 +00:00
while ( j + count < self . totalPages ( ) and self . getBit ( j + count ) = = . free ) {
2019-12-04 05:49:56 +00:00
count + = 1 ;
2020-06-28 04:19:15 +00:00
const addr = j * mem . page_size ;
if ( count > = num_pages and mem . isAligned ( addr , alignment ) ) {
2019-12-05 23:59:43 +00:00
self . setBits ( j , num_pages , . used ) ;
2019-12-05 00:12:25 +00:00
return j ;
2019-12-02 18:26:14 +00:00
}
2019-11-28 04:02:54 +00:00
}
2019-12-04 05:49:56 +00:00
j + = count ;
2019-11-28 04:02:54 +00:00
}
2019-11-25 22:53:26 +00:00
}
2019-12-05 00:12:25 +00:00
return not_found ;
2019-11-25 22:53:26 +00:00
}
2019-12-06 00:28:32 +00:00
fn recycle ( self : FreeBlock , start_idx : usize , len : usize ) void {
2019-12-05 23:59:43 +00:00
self . setBits ( start_idx , len , . free ) ;
2019-11-25 22:53:26 +00:00
}
2019-11-28 00:46:42 +00:00
} ;
2019-11-25 22:53:26 +00:00
2019-12-06 03:54:57 +00:00
var _conventional_data = [ _ ] u128 { 0 } * * 16 ;
// Marking `conventional` as const saves ~40 bytes
2019-12-06 21:16:07 +00:00
const conventional = FreeBlock { . data = & _conventional_data } ;
2019-12-06 00:28:32 +00:00
var extended = FreeBlock { . data = & [ _ ] u128 { } } ;
2019-11-25 22:53:26 +00:00
2019-12-05 00:12:25 +00:00
fn extendedOffset ( ) usize {
return conventional . totalPages ( ) ;
2019-11-25 22:53:26 +00:00
}
2019-11-28 00:46:42 +00:00
fn nPages ( memsize : usize ) usize {
2020-04-17 20:15:36 +00:00
return mem . alignForward ( memsize , mem . page_size ) / mem . page_size ;
2019-11-25 22:53:26 +00:00
}
2021-12-19 05:24:45 +00:00
fn alloc ( _ : * anyopaque , len : usize , alignment : u29 , len_align : u29 , ra : usize ) error { OutOfMemory } ! [ ] u8 {
2021-06-20 01:10:22 +00:00
_ = ra ;
2020-04-17 20:15:36 +00:00
const page_count = nPages ( len ) ;
2020-06-28 04:19:15 +00:00
const page_idx = try allocPages ( page_count , alignment ) ;
2020-07-11 11:09:04 +00:00
return @intToPtr ( [ * ] u8 , page_idx * mem . page_size ) [ 0 . . alignPageAllocLen ( page_count * mem . page_size , len , len_align ) ] ;
2020-04-17 20:15:36 +00:00
}
2020-06-28 04:19:15 +00:00
fn allocPages ( page_count : usize , alignment : u29 ) ! usize {
2020-04-17 20:15:36 +00:00
{
2020-06-28 04:19:15 +00:00
const idx = conventional . useRecycled ( page_count , alignment ) ;
2020-04-17 20:15:36 +00:00
if ( idx ! = FreeBlock . not_found ) {
return idx ;
}
2019-11-25 22:53:26 +00:00
}
2020-06-28 04:19:15 +00:00
const idx = extended . useRecycled ( page_count , alignment ) ;
2019-12-05 00:12:25 +00:00
if ( idx ! = FreeBlock . not_found ) {
return idx + extendedOffset ( ) ;
}
2019-11-25 22:53:26 +00:00
2020-06-28 04:19:15 +00:00
const next_page_idx = @wasmMemorySize ( 0 ) ;
const next_page_addr = next_page_idx * mem . page_size ;
const aligned_addr = mem . alignForward ( next_page_addr , alignment ) ;
const drop_page_count = @divExact ( aligned_addr - next_page_addr , mem . page_size ) ;
const result = @wasmMemoryGrow ( 0 , @intCast ( u32 , drop_page_count + page_count ) ) ;
if ( result < = 0 )
2019-12-05 00:12:25 +00:00
return error . OutOfMemory ;
2020-06-28 04:19:15 +00:00
assert ( result = = next_page_idx ) ;
const aligned_page_idx = next_page_idx + drop_page_count ;
if ( drop_page_count > 0 ) {
freePages ( next_page_idx , aligned_page_idx ) ;
2019-12-05 00:12:25 +00:00
}
2020-06-28 04:19:15 +00:00
return @intCast ( usize , aligned_page_idx ) ;
2019-11-25 22:53:26 +00:00
}
2020-04-17 20:15:36 +00:00
fn freePages ( start : usize , end : usize ) void {
if ( start < extendedOffset ( ) ) {
2021-10-20 21:50:07 +00:00
conventional . recycle ( start , @minimum ( extendedOffset ( ) , end ) - start ) ;
2019-12-02 18:26:14 +00:00
}
2020-04-17 20:15:36 +00:00
if ( end > extendedOffset ( ) ) {
var new_end = end ;
if ( ! extended . isInitialized ( ) ) {
// Steal the last page from the memory currently being recycled
// TODO: would it be better if we use the first page instead?
new_end - = 1 ;
extended . data = @intToPtr ( [ * ] u128 , new_end * mem . page_size ) [ 0 . . mem . page_size / @sizeOf ( u128 ) ] ;
// Since this is the first page being freed and we consume it, assume *nothing* is free.
mem . set ( u128 , extended . data , PageStatus . none_free ) ;
}
const clamped_start = std . math . max ( extendedOffset ( ) , start ) ;
extended . recycle ( clamped_start - extendedOffset ( ) , new_end - clamped_start ) ;
2019-11-25 22:53:26 +00:00
}
}
2020-08-08 07:34:13 +00:00
fn resize (
2021-12-19 05:24:45 +00:00
_ : * anyopaque ,
2020-08-08 07:34:13 +00:00
buf : [ ] u8 ,
buf_align : u29 ,
new_len : usize ,
len_align : u29 ,
return_address : usize ,
2021-11-07 01:40:06 +00:00
) ? usize {
2021-06-20 01:10:22 +00:00
_ = buf_align ;
_ = return_address ;
2020-04-17 20:15:36 +00:00
const aligned_len = mem . alignForward ( buf . len , mem . page_size ) ;
2021-11-07 01:40:06 +00:00
if ( new_len > aligned_len ) return null ;
2020-04-17 20:15:36 +00:00
const current_n = nPages ( aligned_len ) ;
const new_n = nPages ( new_len ) ;
if ( new_n ! = current_n ) {
const base = nPages ( @ptrToInt ( buf . ptr ) ) ;
freePages ( base + new_n , base + current_n ) ;
2019-11-28 00:46:42 +00:00
}
2021-11-06 00:54:35 +00:00
return alignPageAllocLen ( new_n * mem . page_size , new_len , len_align ) ;
}
fn free (
2021-12-19 05:24:45 +00:00
_ : * anyopaque ,
2021-11-06 00:54:35 +00:00
buf : [ ] u8 ,
buf_align : u29 ,
return_address : usize ,
) void {
_ = buf_align ;
_ = return_address ;
const aligned_len = mem . alignForward ( buf . len , mem . page_size ) ;
const current_n = nPages ( aligned_len ) ;
const base = nPages ( @ptrToInt ( buf . ptr ) ) ;
freePages ( base , base + current_n ) ;
2019-11-25 22:53:26 +00:00
}
} ;
2020-02-25 06:52:27 +00:00
pub const HeapAllocator = switch ( builtin . os . tag ) {
2019-05-08 15:53:58 +00:00
. windows = > struct {
heap_handle : ? HeapHandle ,
const HeapHandle = os . windows . HANDLE ;
pub fn init ( ) HeapAllocator {
return HeapAllocator {
. heap_handle = null ,
} ;
}
2021-10-29 01:08:41 +00:00
pub fn allocator ( self : * HeapAllocator ) Allocator {
2021-11-06 00:54:35 +00:00
return Allocator . init ( self , alloc , resize , free ) ;
2021-10-28 23:37:25 +00:00
}
2019-05-08 15:53:58 +00:00
pub fn deinit ( self : * HeapAllocator ) void {
if ( self . heap_handle ) | heap_handle | {
2019-05-27 05:35:58 +00:00
os . windows . HeapDestroy ( heap_handle ) ;
2019-05-08 15:53:58 +00:00
}
}
2020-04-17 20:15:36 +00:00
fn getRecordPtr ( buf : [ ] u8 ) * align ( 1 ) usize {
return @intToPtr ( * align ( 1 ) usize , @ptrToInt ( buf . ptr ) + buf . len ) ;
}
2020-08-08 07:34:13 +00:00
fn alloc (
2021-10-28 23:37:25 +00:00
self : * HeapAllocator ,
2020-08-08 07:34:13 +00:00
n : usize ,
ptr_align : u29 ,
len_align : u29 ,
return_address : usize ,
) error { OutOfMemory } ! [ ] u8 {
2021-06-20 01:10:22 +00:00
_ = return_address ;
2019-05-08 15:53:58 +00:00
2020-04-17 20:15:36 +00:00
const amt = n + ptr_align - 1 + @sizeOf ( usize ) ;
2021-10-05 06:47:27 +00:00
const optional_heap_handle = @atomicLoad ( ? HeapHandle , & self . heap_handle , . SeqCst ) ;
2019-05-08 15:53:58 +00:00
const heap_handle = optional_heap_handle orelse blk : {
2019-05-08 16:14:35 +00:00
const options = if ( builtin . single_threaded ) os . windows . HEAP_NO_SERIALIZE else 0 ;
2019-05-27 05:35:58 +00:00
const hh = os . windows . kernel32 . HeapCreate ( options , amt , 0 ) orelse return error . OutOfMemory ;
2021-10-05 06:47:27 +00:00
const other_hh = @cmpxchgStrong ( ? HeapHandle , & self . heap_handle , null , hh , . SeqCst , . SeqCst ) orelse break : blk hh ;
2019-05-27 05:35:58 +00:00
os . windows . HeapDestroy ( hh ) ;
2019-05-08 15:53:58 +00:00
break : blk other_hh . ? ; // can't be null because of the cmpxchg
} ;
2019-05-27 05:35:58 +00:00
const ptr = os . windows . kernel32 . HeapAlloc ( heap_handle , 0 , amt ) orelse return error . OutOfMemory ;
2019-05-08 15:53:58 +00:00
const root_addr = @ptrToInt ( ptr ) ;
2020-04-17 20:15:36 +00:00
const aligned_addr = mem . alignForward ( root_addr , ptr_align ) ;
const return_len = init : {
if ( len_align = = 0 ) break : init n ;
const full_len = os . windows . kernel32 . HeapSize ( heap_handle , 0 , ptr ) ;
assert ( full_len ! = std . math . maxInt ( usize ) ) ;
assert ( full_len > = amt ) ;
2020-09-19 22:10:53 +00:00
break : init mem . alignBackwardAnyAlign ( full_len - ( aligned_addr - root_addr ) - @sizeOf ( usize ) , len_align ) ;
2019-05-08 15:53:58 +00:00
} ;
2020-04-17 20:15:36 +00:00
const buf = @intToPtr ( [ * ] u8 , aligned_addr ) [ 0 . . return_len ] ;
getRecordPtr ( buf ) . * = root_addr ;
return buf ;
2019-05-08 15:53:58 +00:00
}
2020-08-08 06:26:58 +00:00
fn resize (
2021-10-28 23:37:25 +00:00
self : * HeapAllocator ,
2020-08-08 06:26:58 +00:00
buf : [ ] u8 ,
buf_align : u29 ,
new_size : usize ,
len_align : u29 ,
2020-08-08 07:34:13 +00:00
return_address : usize ,
2021-11-07 01:40:06 +00:00
) ? usize {
2021-06-20 01:10:22 +00:00
_ = buf_align ;
_ = return_address ;
2019-05-08 15:53:58 +00:00
2020-04-17 20:15:36 +00:00
const root_addr = getRecordPtr ( buf ) . * ;
const align_offset = @ptrToInt ( buf . ptr ) - root_addr ;
const amt = align_offset + new_size + @sizeOf ( usize ) ;
2019-05-27 05:35:58 +00:00
const new_ptr = os . windows . kernel32 . HeapReAlloc (
2019-05-08 15:53:58 +00:00
self . heap_handle . ? ,
2020-04-17 20:15:36 +00:00
os . windows . HEAP_REALLOC_IN_PLACE_ONLY ,
2021-12-19 05:24:45 +00:00
@intToPtr ( * anyopaque , root_addr ) ,
2019-05-08 15:53:58 +00:00
amt ,
2021-11-07 01:40:06 +00:00
) orelse return null ;
2021-12-19 05:24:45 +00:00
assert ( new_ptr = = @intToPtr ( * anyopaque , root_addr ) ) ;
2020-04-17 20:15:36 +00:00
const return_len = init : {
if ( len_align = = 0 ) break : init new_size ;
const full_len = os . windows . kernel32 . HeapSize ( self . heap_handle . ? , 0 , new_ptr ) ;
assert ( full_len ! = std . math . maxInt ( usize ) ) ;
assert ( full_len > = amt ) ;
break : init mem . alignBackwardAnyAlign ( full_len - align_offset , len_align ) ;
} ;
getRecordPtr ( buf . ptr [ 0 . . return_len ] ) . * = root_addr ;
return return_len ;
2019-05-08 15:53:58 +00:00
}
2021-11-06 00:54:35 +00:00
fn free (
self : * HeapAllocator ,
buf : [ ] u8 ,
buf_align : u29 ,
return_address : usize ,
) void {
_ = buf_align ;
_ = return_address ;
2021-12-19 05:24:45 +00:00
os . windows . HeapFree ( self . heap_handle . ? , 0 , @intToPtr ( * anyopaque , getRecordPtr ( buf ) . * ) ) ;
2021-11-06 00:54:35 +00:00
}
2019-05-08 15:53:58 +00:00
} ,
else = > @compileError ( " Unsupported OS " ) ,
} ;
2020-04-17 20:15:36 +00:00
fn sliceContainsPtr ( container : [ ] u8 , ptr : [ * ] u8 ) bool {
return @ptrToInt ( ptr ) > = @ptrToInt ( container . ptr ) and
@ptrToInt ( ptr ) < ( @ptrToInt ( container . ptr ) + container . len ) ;
}
fn sliceContainsSlice ( container : [ ] u8 , slice : [ ] u8 ) bool {
return @ptrToInt ( slice . ptr ) > = @ptrToInt ( container . ptr ) and
( @ptrToInt ( slice . ptr ) + slice . len ) < = ( @ptrToInt ( container . ptr ) + container . len ) ;
}
2018-11-13 13:08:37 +00:00
pub const FixedBufferAllocator = struct {
2018-02-12 07:27:02 +00:00
end_index : usize ,
buffer : [ ] u8 ,
2018-02-12 07:14:44 +00:00
2018-02-12 07:27:02 +00:00
pub fn init ( buffer : [ ] u8 ) FixedBufferAllocator {
2018-11-13 13:08:37 +00:00
return FixedBufferAllocator {
2018-02-12 07:27:02 +00:00
. buffer = buffer ,
. end_index = 0 ,
} ;
2017-11-05 17:27:56 +00:00
}
2021-10-29 01:08:41 +00:00
/// *WARNING* using this at the same time as the interface returned by `threadSafeAllocator` is not thread safe
pub fn allocator ( self : * FixedBufferAllocator ) Allocator {
2021-11-06 00:54:35 +00:00
return Allocator . init ( self , alloc , resize , free ) ;
2021-10-28 23:37:25 +00:00
}
/// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator`
/// *WARNING* using this at the same time as the interface returned by `getAllocator` is not thread safe
2021-10-29 01:08:41 +00:00
pub fn threadSafeAllocator ( self : * FixedBufferAllocator ) Allocator {
2021-11-06 00:54:35 +00:00
return Allocator . init (
self ,
threadSafeAlloc ,
Allocator . NoResize ( FixedBufferAllocator ) . noResize ,
Allocator . NoOpFree ( FixedBufferAllocator ) . noOpFree ,
) ;
2021-10-28 23:37:25 +00:00
}
2020-04-17 20:15:36 +00:00
pub fn ownsPtr ( self : * FixedBufferAllocator , ptr : [ * ] u8 ) bool {
return sliceContainsPtr ( self . buffer , ptr ) ;
}
pub fn ownsSlice ( self : * FixedBufferAllocator , slice : [ ] u8 ) bool {
return sliceContainsSlice ( self . buffer , slice ) ;
}
2020-06-27 01:29:06 +00:00
/// NOTE: this will not work in all cases, if the last allocation had an adjusted_index
/// then we won't be able to determine what the last allocation was. This is because
2021-11-06 00:54:35 +00:00
/// the alignForward operation done in alloc is not reversible.
2020-04-17 20:15:36 +00:00
pub fn isLastAllocation ( self : * FixedBufferAllocator , buf : [ ] u8 ) bool {
return buf . ptr + buf . len = = self . buffer . ptr + self . end_index ;
}
2021-10-28 23:37:25 +00:00
fn alloc ( self : * FixedBufferAllocator , n : usize , ptr_align : u29 , len_align : u29 , ra : usize ) ! [ ] u8 {
2021-06-20 01:10:22 +00:00
_ = len_align ;
_ = ra ;
2021-06-06 09:12:53 +00:00
const adjust_off = mem . alignPointerOffset ( self . buffer . ptr + self . end_index , ptr_align ) orelse
return error . OutOfMemory ;
const adjusted_index = self . end_index + adjust_off ;
2018-02-12 07:27:02 +00:00
const new_end_index = adjusted_index + n ;
if ( new_end_index > self . buffer . len ) {
return error . OutOfMemory ;
}
2018-05-01 05:53:04 +00:00
const result = self . buffer [ adjusted_index . . new_end_index ] ;
2018-02-12 07:27:02 +00:00
self . end_index = new_end_index ;
2017-10-31 08:47:55 +00:00
2020-06-27 01:29:06 +00:00
return result ;
2018-02-12 07:27:02 +00:00
}
2017-10-31 08:47:55 +00:00
2020-08-08 07:34:13 +00:00
fn resize (
2021-10-28 23:37:25 +00:00
self : * FixedBufferAllocator ,
2020-08-08 07:34:13 +00:00
buf : [ ] u8 ,
buf_align : u29 ,
new_size : usize ,
len_align : u29 ,
return_address : usize ,
2021-11-07 01:40:06 +00:00
) ? usize {
2021-06-20 01:10:22 +00:00
_ = buf_align ;
_ = return_address ;
2020-04-17 20:15:36 +00:00
assert ( self . ownsSlice ( buf ) ) ; // sanity check
if ( ! self . isLastAllocation ( buf ) ) {
2021-11-07 01:40:06 +00:00
if ( new_size > buf . len ) return null ;
2021-11-06 00:54:35 +00:00
return mem . alignAllocLen ( buf . len , new_size , len_align ) ;
2018-02-12 07:27:02 +00:00
}
2017-10-31 08:47:55 +00:00
2020-04-17 20:15:36 +00:00
if ( new_size < = buf . len ) {
const sub = buf . len - new_size ;
self . end_index - = sub ;
2021-11-06 00:54:35 +00:00
return mem . alignAllocLen ( buf . len - sub , new_size , len_align ) ;
2020-04-17 20:15:36 +00:00
}
2020-06-27 01:29:06 +00:00
const add = new_size - buf . len ;
2021-11-07 01:40:06 +00:00
if ( add + self . end_index > self . buffer . len ) return null ;
2020-04-17 20:15:36 +00:00
self . end_index + = add ;
2020-06-27 01:29:06 +00:00
return new_size ;
2019-03-15 21:47:47 +00:00
}
2019-09-16 01:35:01 +00:00
2021-11-06 00:54:35 +00:00
fn free (
self : * FixedBufferAllocator ,
buf : [ ] u8 ,
buf_align : u29 ,
return_address : usize ,
) void {
_ = buf_align ;
_ = return_address ;
assert ( self . ownsSlice ( buf ) ) ; // sanity check
if ( self . isLastAllocation ( buf ) ) {
self . end_index - = buf . len ;
}
}
2021-10-28 23:37:25 +00:00
fn threadSafeAlloc ( self : * FixedBufferAllocator , n : usize , ptr_align : u29 , len_align : u29 , ra : usize ) ! [ ] u8 {
_ = len_align ;
_ = ra ;
var end_index = @atomicLoad ( usize , & self . end_index , . SeqCst ) ;
while ( true ) {
const adjust_off = mem . alignPointerOffset ( self . buffer . ptr + end_index , ptr_align ) orelse
return error . OutOfMemory ;
const adjusted_index = end_index + adjust_off ;
const new_end_index = adjusted_index + n ;
if ( new_end_index > self . buffer . len ) {
return error . OutOfMemory ;
}
end_index = @cmpxchgWeak ( usize , & self . end_index , end_index , new_end_index , . SeqCst , . SeqCst ) orelse return self . buffer [ adjusted_index . . new_end_index ] ;
}
}
2019-09-21 15:42:26 +00:00
pub fn reset ( self : * FixedBufferAllocator ) void {
2019-09-16 01:35:01 +00:00
self . end_index = 0 ;
}
2018-02-12 07:27:02 +00:00
} ;
2021-10-29 01:08:41 +00:00
pub const ThreadSafeFixedBufferAllocator = @compileError ( " ThreadSafeFixedBufferAllocator has been replaced with `threadSafeAllocator` on FixedBufferAllocator " ) ;
2018-04-28 21:53:06 +00:00
2021-10-28 23:37:25 +00:00
pub fn stackFallback ( comptime size : usize , fallback_allocator : Allocator ) StackFallbackAllocator ( size ) {
2018-11-13 13:08:37 +00:00
return StackFallbackAllocator ( size ) {
2018-07-07 05:23:18 +00:00
. buffer = undefined ,
. fallback_allocator = fallback_allocator ,
. fixed_buffer_allocator = undefined ,
} ;
}
pub fn StackFallbackAllocator ( comptime size : usize ) type {
2018-11-13 13:08:37 +00:00
return struct {
2018-09-13 20:34:33 +00:00
const Self = @This ( ) ;
2018-07-07 05:23:18 +00:00
buffer : [ size ] u8 ,
2021-10-28 23:37:25 +00:00
fallback_allocator : Allocator ,
2018-07-07 05:23:18 +00:00
fixed_buffer_allocator : FixedBufferAllocator ,
2021-10-28 23:37:25 +00:00
/// WARNING: This functions both fetches a `std.mem.Allocator` interface to this allocator *and* resets the internal buffer allocator
pub fn get ( self : * Self ) Allocator {
2018-07-07 05:23:18 +00:00
self . fixed_buffer_allocator = FixedBufferAllocator . init ( self . buffer [ 0 . . ] ) ;
2021-11-06 00:54:35 +00:00
return Allocator . init ( self , alloc , resize , free ) ;
2018-07-07 05:23:18 +00:00
}
2020-08-08 07:34:13 +00:00
fn alloc (
2021-10-28 23:37:25 +00:00
self : * Self ,
2020-08-08 07:34:13 +00:00
len : usize ,
ptr_align : u29 ,
len_align : u29 ,
return_address : usize ,
2020-12-13 17:58:17 +00:00
) error { OutOfMemory } ! [ ] u8 {
2021-10-28 23:37:25 +00:00
return FixedBufferAllocator . alloc ( & self . fixed_buffer_allocator , len , ptr_align , len_align , return_address ) catch
2021-11-06 00:54:35 +00:00
return self . fallback_allocator . rawAlloc ( len , ptr_align , len_align , return_address ) ;
2018-07-07 05:23:18 +00:00
}
2020-08-08 07:34:13 +00:00
fn resize (
2021-10-28 23:37:25 +00:00
self : * Self ,
2020-08-08 07:34:13 +00:00
buf : [ ] u8 ,
buf_align : u29 ,
new_len : usize ,
len_align : u29 ,
return_address : usize ,
2021-11-07 01:40:06 +00:00
) ? usize {
2020-04-17 20:15:36 +00:00
if ( self . fixed_buffer_allocator . ownsPtr ( buf . ptr ) ) {
2021-10-28 23:37:25 +00:00
return FixedBufferAllocator . resize ( & self . fixed_buffer_allocator , buf , buf_align , new_len , len_align , return_address ) ;
2020-04-17 20:15:36 +00:00
} else {
2021-11-06 00:54:35 +00:00
return self . fallback_allocator . rawResize ( buf , buf_align , new_len , len_align , return_address ) ;
}
}
fn free (
self : * Self ,
buf : [ ] u8 ,
buf_align : u29 ,
return_address : usize ,
) void {
if ( self . fixed_buffer_allocator . ownsPtr ( buf . ptr ) ) {
return FixedBufferAllocator . free ( & self . fixed_buffer_allocator , buf , buf_align , return_address ) ;
} else {
return self . fallback_allocator . rawFree ( buf , buf_align , return_address ) ;
2018-07-07 05:23:18 +00:00
}
}
} ;
}
2018-02-12 07:27:02 +00:00
test " c_allocator " {
if ( builtin . link_libc ) {
2020-09-20 16:54:23 +00:00
try testAllocator ( c_allocator ) ;
try testAllocatorAligned ( c_allocator ) ;
try testAllocatorLargeAlignment ( c_allocator ) ;
try testAllocatorAlignedShrink ( c_allocator ) ;
2018-02-12 07:27:02 +00:00
}
2017-10-31 08:47:55 +00:00
}
2020-11-19 05:09:34 +00:00
test " raw_c_allocator " {
if ( builtin . link_libc ) {
try testAllocator ( raw_c_allocator ) ;
}
}
2019-12-06 03:54:57 +00:00
test " WasmPageAllocator internals " {
2021-10-05 06:47:27 +00:00
if ( comptime builtin . target . isWasm ( ) ) {
2020-04-17 20:15:36 +00:00
const conventional_memsize = WasmPageAllocator . conventional . totalPages ( ) * mem . page_size ;
const initial = try page_allocator . alloc ( u8 , mem . page_size ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( @ptrToInt ( initial . ptr ) < conventional_memsize ) ; // If this isn't conventional, the rest of these tests don't make sense. Also we have a serious memory leak in the test suite.
2019-12-09 03:22:07 +00:00
var inplace = try page_allocator . realloc ( initial , 1 ) ;
2021-05-04 17:47:26 +00:00
try testing . expectEqual ( initial . ptr , inplace . ptr ) ;
2019-12-09 03:22:07 +00:00
inplace = try page_allocator . realloc ( inplace , 4 ) ;
2021-05-04 17:47:26 +00:00
try testing . expectEqual ( initial . ptr , inplace . ptr ) ;
2019-12-09 03:22:07 +00:00
page_allocator . free ( inplace ) ;
const reuse = try page_allocator . alloc ( u8 , 1 ) ;
2021-05-04 17:47:26 +00:00
try testing . expectEqual ( initial . ptr , reuse . ptr ) ;
2019-12-09 03:22:07 +00:00
page_allocator . free ( reuse ) ;
// This segment may span conventional and extended which has really complex rules so we're just ignoring it for now.
const padding = try page_allocator . alloc ( u8 , conventional_memsize ) ;
page_allocator . free ( padding ) ;
const extended = try page_allocator . alloc ( u8 , conventional_memsize ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( @ptrToInt ( extended . ptr ) > = conventional_memsize ) ;
2019-12-09 03:22:07 +00:00
const use_small = try page_allocator . alloc ( u8 , 1 ) ;
2021-05-04 17:47:26 +00:00
try testing . expectEqual ( initial . ptr , use_small . ptr ) ;
2019-12-09 03:22:07 +00:00
page_allocator . free ( use_small ) ;
inplace = try page_allocator . realloc ( extended , 1 ) ;
2021-05-04 17:47:26 +00:00
try testing . expectEqual ( extended . ptr , inplace . ptr ) ;
2019-12-09 03:22:07 +00:00
page_allocator . free ( inplace ) ;
const reuse_extended = try page_allocator . alloc ( u8 , conventional_memsize ) ;
2021-05-04 17:47:26 +00:00
try testing . expectEqual ( extended . ptr , reuse_extended . ptr ) ;
2019-12-09 03:22:07 +00:00
page_allocator . free ( reuse_extended ) ;
2019-12-06 03:54:57 +00:00
}
}
2019-11-25 22:25:06 +00:00
test " PageAllocator " {
const allocator = page_allocator ;
2018-02-12 07:14:44 +00:00
try testAllocator ( allocator ) ;
2020-09-20 16:54:23 +00:00
try testAllocatorAligned ( allocator ) ;
2021-10-05 06:47:27 +00:00
if ( ! builtin . target . isWasm ( ) ) {
2019-12-06 01:31:49 +00:00
try testAllocatorLargeAlignment ( allocator ) ;
try testAllocatorAlignedShrink ( allocator ) ;
}
2019-05-08 16:02:37 +00:00
2020-02-25 06:52:27 +00:00
if ( builtin . os . tag = = . windows ) {
2019-05-08 16:02:37 +00:00
// Trying really large alignment. As mentionned in the implementation,
// VirtualAlloc returns 64K aligned addresses. We want to make sure
2019-11-25 22:25:06 +00:00
// PageAllocator works beyond that, as it's not tested by
2019-05-08 16:02:37 +00:00
// `testAllocatorLargeAlignment`.
const slice = try allocator . alignedAlloc ( u8 , 1 < < 20 , 128 ) ;
slice [ 0 ] = 0x12 ;
slice [ 127 ] = 0x34 ;
allocator . free ( slice ) ;
}
2020-06-28 20:33:41 +00:00
{
var buf = try allocator . alloc ( u8 , mem . page_size + 1 ) ;
defer allocator . free ( buf ) ;
buf = try allocator . realloc ( buf , 1 ) ; // shrink past the page boundary
}
2018-02-12 07:14:44 +00:00
}
2019-05-08 15:53:58 +00:00
test " HeapAllocator " {
2020-02-25 06:52:27 +00:00
if ( builtin . os . tag = = . windows ) {
2019-05-08 15:53:58 +00:00
var heap_allocator = HeapAllocator . init ( ) ;
defer heap_allocator . deinit ( ) ;
2021-10-29 01:08:41 +00:00
const allocator = heap_allocator . allocator ( ) ;
2019-05-08 15:53:58 +00:00
try testAllocator ( allocator ) ;
2020-09-20 16:54:23 +00:00
try testAllocatorAligned ( allocator ) ;
2019-05-08 15:53:58 +00:00
try testAllocatorLargeAlignment ( allocator ) ;
try testAllocatorAlignedShrink ( allocator ) ;
}
}
2019-06-22 05:13:10 +00:00
test " ArenaAllocator " {
2019-11-25 22:25:06 +00:00
var arena_allocator = ArenaAllocator . init ( page_allocator ) ;
2019-06-22 05:13:10 +00:00
defer arena_allocator . deinit ( ) ;
2021-10-29 01:08:41 +00:00
const allocator = arena_allocator . allocator ( ) ;
2019-06-22 05:13:10 +00:00
2021-10-28 23:37:25 +00:00
try testAllocator ( allocator ) ;
try testAllocatorAligned ( allocator ) ;
try testAllocatorLargeAlignment ( allocator ) ;
try testAllocatorAlignedShrink ( allocator ) ;
2019-06-22 05:13:10 +00:00
}
2018-02-12 07:14:44 +00:00
2019-12-06 01:31:49 +00:00
var test_fixed_buffer_allocator_memory : [ 800000 * @sizeOf ( u64 ) ] u8 = undefined ;
2018-02-12 07:27:02 +00:00
test " FixedBufferAllocator " {
2020-06-27 01:29:06 +00:00
var fixed_buffer_allocator = mem . validationWrap ( FixedBufferAllocator . init ( test_fixed_buffer_allocator_memory [ 0 . . ] ) ) ;
2021-10-29 01:08:41 +00:00
const allocator = fixed_buffer_allocator . allocator ( ) ;
2018-02-12 07:27:02 +00:00
2021-10-28 23:37:25 +00:00
try testAllocator ( allocator ) ;
try testAllocatorAligned ( allocator ) ;
try testAllocatorLargeAlignment ( allocator ) ;
try testAllocatorAlignedShrink ( allocator ) ;
2018-02-12 07:27:02 +00:00
}
2019-09-16 01:35:01 +00:00
test " FixedBufferAllocator.reset " {
2019-09-25 19:57:13 +00:00
var buf : [ 8 ] u8 align ( @alignOf ( u64 ) ) = undefined ;
2019-09-16 01:35:01 +00:00
var fba = FixedBufferAllocator . init ( buf [ 0 . . ] ) ;
2021-10-29 01:08:41 +00:00
const allocator = fba . allocator ( ) ;
2019-09-16 01:35:01 +00:00
const X = 0xeeeeeeeeeeeeeeee ;
const Y = 0xffffffffffffffff ;
2021-10-28 23:37:25 +00:00
var x = try allocator . create ( u64 ) ;
2019-09-16 01:35:01 +00:00
x . * = X ;
2021-10-28 23:37:25 +00:00
try testing . expectError ( error . OutOfMemory , allocator . create ( u64 ) ) ;
2019-09-16 01:35:01 +00:00
fba . reset ( ) ;
2021-10-28 23:37:25 +00:00
var y = try allocator . create ( u64 ) ;
2019-09-16 01:35:01 +00:00
y . * = Y ;
// we expect Y to have overwritten X.
2021-05-04 17:47:26 +00:00
try testing . expect ( x . * = = y . * ) ;
try testing . expect ( y . * = = Y ) ;
2019-09-16 01:35:01 +00:00
}
2020-12-13 17:58:17 +00:00
test " StackFallbackAllocator " {
const fallback_allocator = page_allocator ;
var stack_allocator = stackFallback ( 4096 , fallback_allocator ) ;
try testAllocator ( stack_allocator . get ( ) ) ;
try testAllocatorAligned ( stack_allocator . get ( ) ) ;
try testAllocatorLargeAlignment ( stack_allocator . get ( ) ) ;
try testAllocatorAlignedShrink ( stack_allocator . get ( ) ) ;
}
2018-07-14 20:31:11 +00:00
test " FixedBufferAllocator Reuse memory on realloc " {
var small_fixed_buffer : [ 10 ] u8 = undefined ;
// check if we re-use the memory
{
var fixed_buffer_allocator = FixedBufferAllocator . init ( small_fixed_buffer [ 0 . . ] ) ;
2021-10-29 01:08:41 +00:00
const allocator = fixed_buffer_allocator . allocator ( ) ;
2018-07-14 20:31:11 +00:00
2021-10-28 23:37:25 +00:00
var slice0 = try allocator . alloc ( u8 , 5 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( slice0 . len = = 5 ) ;
2021-10-28 23:37:25 +00:00
var slice1 = try allocator . realloc ( slice0 , 10 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( slice1 . ptr = = slice0 . ptr ) ;
try testing . expect ( slice1 . len = = 10 ) ;
2021-10-28 23:37:25 +00:00
try testing . expectError ( error . OutOfMemory , allocator . realloc ( slice1 , 11 ) ) ;
2018-07-14 20:31:11 +00:00
}
// check that we don't re-use the memory if it's not the most recent block
{
var fixed_buffer_allocator = FixedBufferAllocator . init ( small_fixed_buffer [ 0 . . ] ) ;
2021-10-29 01:08:41 +00:00
const allocator = fixed_buffer_allocator . allocator ( ) ;
2018-07-14 20:31:11 +00:00
2021-10-28 23:37:25 +00:00
var slice0 = try allocator . alloc ( u8 , 2 ) ;
2018-07-14 20:31:11 +00:00
slice0 [ 0 ] = 1 ;
slice0 [ 1 ] = 2 ;
2021-10-28 23:37:25 +00:00
var slice1 = try allocator . alloc ( u8 , 2 ) ;
var slice2 = try allocator . realloc ( slice0 , 4 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( slice0 . ptr ! = slice2 . ptr ) ;
try testing . expect ( slice1 . ptr ! = slice2 . ptr ) ;
try testing . expect ( slice2 [ 0 ] = = 1 ) ;
try testing . expect ( slice2 [ 1 ] = = 2 ) ;
2018-07-14 20:31:11 +00:00
}
}
2021-10-28 23:37:25 +00:00
test " Thread safe FixedBufferAllocator " {
var fixed_buffer_allocator = FixedBufferAllocator . init ( test_fixed_buffer_allocator_memory [ 0 . . ] ) ;
2018-04-28 21:53:06 +00:00
2021-10-29 01:08:41 +00:00
try testAllocator ( fixed_buffer_allocator . threadSafeAllocator ( ) ) ;
try testAllocatorAligned ( fixed_buffer_allocator . threadSafeAllocator ( ) ) ;
try testAllocatorLargeAlignment ( fixed_buffer_allocator . threadSafeAllocator ( ) ) ;
try testAllocatorAlignedShrink ( fixed_buffer_allocator . threadSafeAllocator ( ) ) ;
2018-04-28 21:53:06 +00:00
}
2020-11-19 05:09:34 +00:00
/// This one should not try alignments that exceed what C malloc can handle.
2021-10-28 23:37:25 +00:00
pub fn testAllocator ( base_allocator : mem . Allocator ) ! void {
2020-06-27 01:29:06 +00:00
var validationAllocator = mem . validationWrap ( base_allocator ) ;
2021-10-29 01:08:41 +00:00
const allocator = validationAllocator . allocator ( ) ;
2020-04-17 20:15:36 +00:00
2018-05-31 14:56:59 +00:00
var slice = try allocator . alloc ( * i32 , 100 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( slice . len = = 100 ) ;
2018-02-12 07:14:44 +00:00
for ( slice ) | * item , i | {
2019-02-03 21:13:28 +00:00
item . * = try allocator . create ( i32 ) ;
item . * . * = @intCast ( i32 , i ) ;
2018-02-12 07:14:44 +00:00
}
2019-03-15 21:47:47 +00:00
slice = try allocator . realloc ( slice , 20000 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( slice . len = = 20000 ) ;
2018-07-14 20:31:11 +00:00
for ( slice [ 0 . . 100 ] ) | item , i | {
2021-05-04 17:47:26 +00:00
try testing . expect ( item . * = = @intCast ( i32 , i ) ) ;
2018-02-12 07:14:44 +00:00
allocator . destroy ( item ) ;
}
2019-03-15 21:47:47 +00:00
slice = allocator . shrink ( slice , 50 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( slice . len = = 50 ) ;
2019-03-15 21:47:47 +00:00
slice = allocator . shrink ( slice , 25 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( slice . len = = 25 ) ;
2019-03-15 21:47:47 +00:00
slice = allocator . shrink ( slice , 0 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( slice . len = = 0 ) ;
2019-03-15 21:47:47 +00:00
slice = try allocator . realloc ( slice , 10 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( slice . len = = 10 ) ;
2018-02-12 07:14:44 +00:00
allocator . free ( slice ) ;
2020-09-03 23:33:47 +00:00
2020-09-20 16:54:23 +00:00
// Zero-length allocation
var empty = try allocator . alloc ( u8 , 0 ) ;
allocator . free ( empty ) ;
// Allocation with zero-sized types
2020-09-03 23:33:47 +00:00
const zero_bit_ptr = try allocator . create ( u0 ) ;
zero_bit_ptr . * = 0 ;
allocator . destroy ( zero_bit_ptr ) ;
2020-09-24 22:21:57 +00:00
const oversize = try allocator . allocAdvanced ( u32 , null , 5 , . at_least ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( oversize . len > = 5 ) ;
2020-09-24 22:21:57 +00:00
for ( oversize ) | * item | {
item . * = 0xDEADBEEF ;
}
allocator . free ( oversize ) ;
2018-02-12 07:14:44 +00:00
}
2018-04-22 01:41:49 +00:00
2021-10-28 23:37:25 +00:00
pub fn testAllocatorAligned ( base_allocator : mem . Allocator ) ! void {
2020-06-27 01:29:06 +00:00
var validationAllocator = mem . validationWrap ( base_allocator ) ;
2021-10-29 01:08:41 +00:00
const allocator = validationAllocator . allocator ( ) ;
2020-04-17 20:15:36 +00:00
2020-09-20 16:54:23 +00:00
// Test a few alignment values, smaller and bigger than the type's one
inline for ( [ _ ] u29 { 1 , 2 , 4 , 8 , 16 , 32 , 64 } ) | alignment | {
// initial
var slice = try allocator . alignedAlloc ( u8 , alignment , 10 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( slice . len = = 10 ) ;
2020-09-20 16:54:23 +00:00
// grow
slice = try allocator . realloc ( slice , 100 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( slice . len = = 100 ) ;
2020-09-20 16:54:23 +00:00
// shrink
slice = allocator . shrink ( slice , 10 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( slice . len = = 10 ) ;
2020-09-20 16:54:23 +00:00
// go to zero
slice = allocator . shrink ( slice , 0 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( slice . len = = 0 ) ;
2020-09-20 16:54:23 +00:00
// realloc from zero
slice = try allocator . realloc ( slice , 100 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( slice . len = = 100 ) ;
2020-09-20 16:54:23 +00:00
// shrink with shrink
slice = allocator . shrink ( slice , 10 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( slice . len = = 10 ) ;
2020-09-20 16:54:23 +00:00
// shrink to zero
slice = allocator . shrink ( slice , 0 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( slice . len = = 0 ) ;
2020-09-20 16:54:23 +00:00
}
2018-07-14 16:03:06 +00:00
}
2021-10-28 23:37:25 +00:00
pub fn testAllocatorLargeAlignment ( base_allocator : mem . Allocator ) ! void {
2020-06-27 01:29:06 +00:00
var validationAllocator = mem . validationWrap ( base_allocator ) ;
2021-10-29 01:08:41 +00:00
const allocator = validationAllocator . allocator ( ) ;
2020-04-17 20:15:36 +00:00
2018-05-29 00:23:55 +00:00
//Maybe a platform's page_size is actually the same as or
2018-04-22 01:41:49 +00:00
// very near usize?
2019-05-26 17:17:34 +00:00
if ( mem . page_size < < 2 > maxInt ( usize ) ) return ;
2018-05-01 05:53:04 +00:00
2020-10-17 12:09:59 +00:00
const USizeShift = std . meta . Int ( . unsigned , std . math . log2 ( std . meta . bitCount ( usize ) ) ) ;
2019-11-07 23:52:09 +00:00
const large_align = @as ( u29 , mem . page_size < < 2 ) ;
2018-05-01 05:53:04 +00:00
2018-04-22 01:41:49 +00:00
var align_mask : usize = undefined ;
2019-11-07 23:52:09 +00:00
_ = @shlWithOverflow ( usize , ~ @as ( usize , 0 ) , @as ( USizeShift , @ctz ( u29 , large_align ) ) , & align_mask ) ;
2018-05-01 05:53:04 +00:00
2019-03-15 21:47:47 +00:00
var slice = try allocator . alignedAlloc ( u8 , large_align , 500 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( @ptrToInt ( slice . ptr ) & align_mask = = @ptrToInt ( slice . ptr ) ) ;
2018-05-01 05:53:04 +00:00
2019-03-15 21:47:47 +00:00
slice = allocator . shrink ( slice , 100 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( @ptrToInt ( slice . ptr ) & align_mask = = @ptrToInt ( slice . ptr ) ) ;
2018-05-01 05:53:04 +00:00
2019-03-15 21:47:47 +00:00
slice = try allocator . realloc ( slice , 5000 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( @ptrToInt ( slice . ptr ) & align_mask = = @ptrToInt ( slice . ptr ) ) ;
2018-05-01 05:53:04 +00:00
2019-03-15 21:47:47 +00:00
slice = allocator . shrink ( slice , 10 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( @ptrToInt ( slice . ptr ) & align_mask = = @ptrToInt ( slice . ptr ) ) ;
2018-05-01 05:53:04 +00:00
2019-03-15 21:47:47 +00:00
slice = try allocator . realloc ( slice , 20000 ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( @ptrToInt ( slice . ptr ) & align_mask = = @ptrToInt ( slice . ptr ) ) ;
2018-04-22 01:41:49 +00:00
allocator . free ( slice ) ;
}
2019-04-20 00:54:53 +00:00
2021-10-28 23:37:25 +00:00
pub fn testAllocatorAlignedShrink ( base_allocator : mem . Allocator ) ! void {
2020-06-27 01:29:06 +00:00
var validationAllocator = mem . validationWrap ( base_allocator ) ;
2021-10-29 01:08:41 +00:00
const allocator = validationAllocator . allocator ( ) ;
2020-04-17 20:15:36 +00:00
2019-04-20 00:54:53 +00:00
var debug_buffer : [ 1000 ] u8 = undefined ;
2021-10-29 01:08:41 +00:00
const debug_allocator = FixedBufferAllocator . init ( & debug_buffer ) . allocator ( ) ;
2019-04-20 00:54:53 +00:00
2019-05-26 17:17:34 +00:00
const alloc_size = mem . page_size * 2 + 50 ;
2019-04-20 00:54:53 +00:00
var slice = try allocator . alignedAlloc ( u8 , 16 , alloc_size ) ;
defer allocator . free ( slice ) ;
var stuff_to_free = std . ArrayList ( [ ] align ( 16 ) u8 ) . init ( debug_allocator ) ;
2019-05-08 16:02:37 +00:00
// On Windows, VirtualAlloc returns addresses aligned to a 64K boundary,
// which is 16 pages, hence the 32. This test may require to increase
// the size of the allocations feeding the `allocator` parameter if they
// fail, because of this high over-alignment we want to have.
2019-05-26 17:17:34 +00:00
while ( @ptrToInt ( slice . ptr ) = = mem . alignForward ( @ptrToInt ( slice . ptr ) , mem . page_size * 32 ) ) {
2019-04-20 00:54:53 +00:00
try stuff_to_free . append ( slice ) ;
slice = try allocator . alignedAlloc ( u8 , 16 , alloc_size ) ;
}
while ( stuff_to_free . popOrNull ( ) ) | item | {
allocator . free ( item ) ;
}
slice [ 0 ] = 0x12 ;
slice [ 60 ] = 0x34 ;
// realloc to a smaller size but with a larger alignment
2020-08-08 07:34:13 +00:00
slice = try allocator . reallocAdvanced ( slice , mem . page_size * 32 , alloc_size / 2 , . exact ) ;
2021-05-04 17:47:26 +00:00
try testing . expect ( slice [ 0 ] = = 0x12 ) ;
try testing . expect ( slice [ 60 ] = = 0x34 ) ;
2019-04-20 00:54:53 +00:00
}
2020-01-09 05:37:29 +00:00
test " heap " {
_ = @import ( " heap/logging_allocator.zig " ) ;
2021-06-09 18:42:07 +00:00
_ = @import ( " heap/log_to_writer_allocator.zig " ) ;
2020-01-09 05:37:29 +00:00
}