mirror of
https://github.com/godotengine/godot.git
synced 2024-11-10 14:12:51 +00:00
Merge pull request #89495 from akien-mga/pcre2-fix-sljit-compilation-ioscross
pcre2: Remove unused sjlit files after last update
This commit is contained in:
commit
9c2db0c035
@ -29,7 +29,7 @@ if env["builtin_pcre2"]:
|
||||
"pcre2_extuni.c",
|
||||
"pcre2_find_bracket.c",
|
||||
"pcre2_jit_compile.c",
|
||||
# "pcre2_jit_match.c", "pcre2_jit_misc.c", # these files are included in pcre2_jit_compile.c.
|
||||
# "pcre2_jit_match.c", "pcre2_jit_misc.c", # Included in `pcre2_jit_compile.c`.
|
||||
"pcre2_maketables.c",
|
||||
"pcre2_match.c",
|
||||
"pcre2_match_data.c",
|
||||
@ -44,6 +44,7 @@ if env["builtin_pcre2"]:
|
||||
"pcre2_substring.c",
|
||||
"pcre2_tables.c",
|
||||
"pcre2_ucd.c",
|
||||
# "pcre2_ucptables.c", # Included in `pcre2_tables.c`.
|
||||
"pcre2_valid_utf.c",
|
||||
"pcre2_xclass.c",
|
||||
]
|
||||
|
3
thirdparty/README.md
vendored
3
thirdparty/README.md
vendored
@ -769,9 +769,6 @@ Files extracted from upstream source:
|
||||
- `src/sljit/`
|
||||
- `AUTHORS` and `LICENCE`
|
||||
|
||||
A sljit patch from upstream was backported to fix macOS < 11.0 compilation
|
||||
in 10.40, it can be found in the `patches` folder.
|
||||
|
||||
|
||||
## recastnavigation
|
||||
|
||||
|
@ -1,31 +0,0 @@
|
||||
From de8fc816bc6698ab97316ed954e133e7e5098262 Mon Sep 17 00:00:00 2001
|
||||
From: =?UTF-8?q?Carlo=20Marcelo=20Arenas=20Bel=C3=B3n?= <carenas@gmail.com>
|
||||
Date: Thu, 21 Apr 2022 21:01:12 -0700
|
||||
Subject: [PATCH] macos: somehow allow building with a target below 11.0
|
||||
|
||||
While building for macOS older than 11 in Apple Silicon makes no
|
||||
sense, some build systems lack the flexibility to set a target per
|
||||
architecture while aiming to support multi architecture binaries.
|
||||
|
||||
Allow an option in those cases by using the slower runtime checks
|
||||
if the toolchain allows it.
|
||||
|
||||
Fixes: PCRE2Project/pcre2#109
|
||||
---
|
||||
thirdparty/pcre2/src/sljit/sljitExecAllocator.c | 3 +++
|
||||
1 file changed, 3 insertions(+)
|
||||
|
||||
diff --git a/thirdparty/pcre2/src/sljit/sljitExecAllocator.c b/thirdparty/pcre2/src/sljit/sljitExecAllocator.c
|
||||
index 92d940ddc2..6359848cd5 100644
|
||||
--- a/thirdparty/pcre2/src/sljit/sljitExecAllocator.c
|
||||
+++ b/thirdparty/pcre2/src/sljit/sljitExecAllocator.c
|
||||
@@ -152,6 +152,9 @@ static SLJIT_INLINE void apple_update_wx_flags(sljit_s32 enable_exec)
|
||||
{
|
||||
#if MAC_OS_X_VERSION_MIN_REQUIRED >= 110000
|
||||
pthread_jit_write_protect_np(enable_exec);
|
||||
+#elif defined(__clang__)
|
||||
+ if (__builtin_available(macOS 11.0, *))
|
||||
+ pthread_jit_write_protect_np(enable_exec);
|
||||
#else
|
||||
#error "Must target Big Sur or newer"
|
||||
#endif /* BigSur */
|
414
thirdparty/pcre2/src/sljit/sljitExecAllocator.c
vendored
414
thirdparty/pcre2/src/sljit/sljitExecAllocator.c
vendored
@ -1,414 +0,0 @@
|
||||
/*
|
||||
* Stack-less Just-In-Time compiler
|
||||
*
|
||||
* Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification, are
|
||||
* permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this list of
|
||||
* conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
|
||||
* of conditions and the following disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
||||
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/*
|
||||
This file contains a simple executable memory allocator
|
||||
|
||||
It is assumed, that executable code blocks are usually medium (or sometimes
|
||||
large) memory blocks, and the allocator is not too frequently called (less
|
||||
optimized than other allocators). Thus, using it as a generic allocator is
|
||||
not suggested.
|
||||
|
||||
How does it work:
|
||||
Memory is allocated in continuous memory areas called chunks by alloc_chunk()
|
||||
Chunk format:
|
||||
[ block ][ block ] ... [ block ][ block terminator ]
|
||||
|
||||
All blocks and the block terminator is started with block_header. The block
|
||||
header contains the size of the previous and the next block. These sizes
|
||||
can also contain special values.
|
||||
Block size:
|
||||
0 - The block is a free_block, with a different size member.
|
||||
1 - The block is a block terminator.
|
||||
n - The block is used at the moment, and the value contains its size.
|
||||
Previous block size:
|
||||
0 - This is the first block of the memory chunk.
|
||||
n - The size of the previous block.
|
||||
|
||||
Using these size values we can go forward or backward on the block chain.
|
||||
The unused blocks are stored in a chain list pointed by free_blocks. This
|
||||
list is useful if we need to find a suitable memory area when the allocator
|
||||
is called.
|
||||
|
||||
When a block is freed, the new free block is connected to its adjacent free
|
||||
blocks if possible.
|
||||
|
||||
[ free block ][ used block ][ free block ]
|
||||
and "used block" is freed, the three blocks are connected together:
|
||||
[ one big free block ]
|
||||
*/
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
/* System (OS) functions */
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
||||
/* 64 KByte. */
|
||||
#define CHUNK_SIZE (sljit_uw)0x10000u
|
||||
|
||||
/*
|
||||
alloc_chunk / free_chunk :
|
||||
* allocate executable system memory chunks
|
||||
* the size is always divisible by CHUNK_SIZE
|
||||
SLJIT_ALLOCATOR_LOCK / SLJIT_ALLOCATOR_UNLOCK :
|
||||
* provided as part of sljitUtils
|
||||
* only the allocator requires this lock, sljit is fully thread safe
|
||||
as it only uses local variables
|
||||
*/
|
||||
|
||||
#ifdef _WIN32
|
||||
#define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec)
|
||||
|
||||
static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
|
||||
{
|
||||
return VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
|
||||
}
|
||||
|
||||
static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
|
||||
{
|
||||
SLJIT_UNUSED_ARG(size);
|
||||
VirtualFree(chunk, 0, MEM_RELEASE);
|
||||
}
|
||||
|
||||
#else /* POSIX */
|
||||
|
||||
#if defined(__APPLE__) && defined(MAP_JIT)
|
||||
/*
|
||||
On macOS systems, returns MAP_JIT if it is defined _and_ we're running on a
|
||||
version where it's OK to have more than one JIT block or where MAP_JIT is
|
||||
required.
|
||||
On non-macOS systems, returns MAP_JIT if it is defined.
|
||||
*/
|
||||
#include <TargetConditionals.h>
|
||||
#if TARGET_OS_OSX
|
||||
#if defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86
|
||||
#ifdef MAP_ANON
|
||||
#include <sys/utsname.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#define SLJIT_MAP_JIT (get_map_jit_flag())
|
||||
|
||||
static SLJIT_INLINE int get_map_jit_flag()
|
||||
{
|
||||
size_t page_size;
|
||||
void *ptr;
|
||||
struct utsname name;
|
||||
static int map_jit_flag = -1;
|
||||
|
||||
if (map_jit_flag < 0) {
|
||||
map_jit_flag = 0;
|
||||
uname(&name);
|
||||
|
||||
/* Kernel version for 10.14.0 (Mojave) or later */
|
||||
if (atoi(name.release) >= 18) {
|
||||
page_size = get_page_alignment() + 1;
|
||||
/* Only use MAP_JIT if a hardened runtime is used */
|
||||
ptr = mmap(NULL, page_size, PROT_WRITE | PROT_EXEC,
|
||||
MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||
|
||||
if (ptr != MAP_FAILED)
|
||||
munmap(ptr, page_size);
|
||||
else
|
||||
map_jit_flag = MAP_JIT;
|
||||
}
|
||||
}
|
||||
return map_jit_flag;
|
||||
}
|
||||
#endif /* MAP_ANON */
|
||||
#else /* !SLJIT_CONFIG_X86 */
|
||||
#if !(defined SLJIT_CONFIG_ARM && SLJIT_CONFIG_ARM)
|
||||
#error "Unsupported architecture"
|
||||
#endif /* SLJIT_CONFIG_ARM */
|
||||
#include <AvailabilityMacros.h>
|
||||
#include <pthread.h>
|
||||
|
||||
#define SLJIT_MAP_JIT (MAP_JIT)
|
||||
#define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec) \
|
||||
apple_update_wx_flags(enable_exec)
|
||||
|
||||
static SLJIT_INLINE void apple_update_wx_flags(sljit_s32 enable_exec)
|
||||
{
|
||||
#if MAC_OS_X_VERSION_MIN_REQUIRED >= 110000
|
||||
pthread_jit_write_protect_np(enable_exec);
|
||||
#elif defined(__clang__)
|
||||
if (__builtin_available(macOS 11.0, *))
|
||||
pthread_jit_write_protect_np(enable_exec);
|
||||
#else
|
||||
#error "Must target Big Sur or newer"
|
||||
#endif /* BigSur */
|
||||
}
|
||||
#endif /* SLJIT_CONFIG_X86 */
|
||||
#else /* !TARGET_OS_OSX */
|
||||
#define SLJIT_MAP_JIT (MAP_JIT)
|
||||
#endif /* TARGET_OS_OSX */
|
||||
#endif /* __APPLE__ && MAP_JIT */
|
||||
#ifndef SLJIT_UPDATE_WX_FLAGS
|
||||
#define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec)
|
||||
#endif /* !SLJIT_UPDATE_WX_FLAGS */
|
||||
#ifndef SLJIT_MAP_JIT
|
||||
#define SLJIT_MAP_JIT (0)
|
||||
#endif /* !SLJIT_MAP_JIT */
|
||||
|
||||
static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
|
||||
{
|
||||
void *retval;
|
||||
int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
|
||||
int flags = MAP_PRIVATE;
|
||||
int fd = -1;
|
||||
|
||||
#ifdef PROT_MAX
|
||||
prot |= PROT_MAX(prot);
|
||||
#endif
|
||||
|
||||
#ifdef MAP_ANON
|
||||
flags |= MAP_ANON | SLJIT_MAP_JIT;
|
||||
#else /* !MAP_ANON */
|
||||
if (SLJIT_UNLIKELY((dev_zero < 0) && open_dev_zero()))
|
||||
return NULL;
|
||||
|
||||
fd = dev_zero;
|
||||
#endif /* MAP_ANON */
|
||||
|
||||
retval = mmap(NULL, size, prot, flags, fd, 0);
|
||||
if (retval == MAP_FAILED)
|
||||
return NULL;
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
/* HardenedBSD's mmap lies, so check permissions again */
|
||||
if (mprotect(retval, size, PROT_READ | PROT_WRITE | PROT_EXEC) < 0) {
|
||||
munmap(retval, size);
|
||||
return NULL;
|
||||
}
|
||||
#endif /* FreeBSD */
|
||||
|
||||
SLJIT_UPDATE_WX_FLAGS(retval, (uint8_t *)retval + size, 0);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
|
||||
{
|
||||
munmap(chunk, size);
|
||||
}
|
||||
|
||||
#endif /* windows */
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
/* Common functions */
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
||||
#define CHUNK_MASK (~(CHUNK_SIZE - 1))
|
||||
|
||||
struct block_header {
|
||||
sljit_uw size;
|
||||
sljit_uw prev_size;
|
||||
};
|
||||
|
||||
struct free_block {
|
||||
struct block_header header;
|
||||
struct free_block *next;
|
||||
struct free_block *prev;
|
||||
sljit_uw size;
|
||||
};
|
||||
|
||||
#define AS_BLOCK_HEADER(base, offset) \
|
||||
((struct block_header*)(((sljit_u8*)base) + offset))
|
||||
#define AS_FREE_BLOCK(base, offset) \
|
||||
((struct free_block*)(((sljit_u8*)base) + offset))
|
||||
#define MEM_START(base) ((void*)(((sljit_u8*)base) + sizeof(struct block_header)))
|
||||
#define ALIGN_SIZE(size) (((size) + sizeof(struct block_header) + 7u) & ~(sljit_uw)7)
|
||||
|
||||
static struct free_block* free_blocks;
|
||||
static sljit_uw allocated_size;
|
||||
static sljit_uw total_size;
|
||||
|
||||
static SLJIT_INLINE void sljit_insert_free_block(struct free_block *free_block, sljit_uw size)
|
||||
{
|
||||
free_block->header.size = 0;
|
||||
free_block->size = size;
|
||||
|
||||
free_block->next = free_blocks;
|
||||
free_block->prev = NULL;
|
||||
if (free_blocks)
|
||||
free_blocks->prev = free_block;
|
||||
free_blocks = free_block;
|
||||
}
|
||||
|
||||
static SLJIT_INLINE void sljit_remove_free_block(struct free_block *free_block)
|
||||
{
|
||||
if (free_block->next)
|
||||
free_block->next->prev = free_block->prev;
|
||||
|
||||
if (free_block->prev)
|
||||
free_block->prev->next = free_block->next;
|
||||
else {
|
||||
SLJIT_ASSERT(free_blocks == free_block);
|
||||
free_blocks = free_block->next;
|
||||
}
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size)
|
||||
{
|
||||
struct block_header *header;
|
||||
struct block_header *next_header;
|
||||
struct free_block *free_block;
|
||||
sljit_uw chunk_size;
|
||||
|
||||
SLJIT_ALLOCATOR_LOCK();
|
||||
if (size < (64 - sizeof(struct block_header)))
|
||||
size = (64 - sizeof(struct block_header));
|
||||
size = ALIGN_SIZE(size);
|
||||
|
||||
free_block = free_blocks;
|
||||
while (free_block) {
|
||||
if (free_block->size >= size) {
|
||||
chunk_size = free_block->size;
|
||||
SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 0);
|
||||
if (chunk_size > size + 64) {
|
||||
/* We just cut a block from the end of the free block. */
|
||||
chunk_size -= size;
|
||||
free_block->size = chunk_size;
|
||||
header = AS_BLOCK_HEADER(free_block, chunk_size);
|
||||
header->prev_size = chunk_size;
|
||||
AS_BLOCK_HEADER(header, size)->prev_size = size;
|
||||
}
|
||||
else {
|
||||
sljit_remove_free_block(free_block);
|
||||
header = (struct block_header*)free_block;
|
||||
size = chunk_size;
|
||||
}
|
||||
allocated_size += size;
|
||||
header->size = size;
|
||||
SLJIT_ALLOCATOR_UNLOCK();
|
||||
return MEM_START(header);
|
||||
}
|
||||
free_block = free_block->next;
|
||||
}
|
||||
|
||||
chunk_size = (size + sizeof(struct block_header) + CHUNK_SIZE - 1) & CHUNK_MASK;
|
||||
header = (struct block_header*)alloc_chunk(chunk_size);
|
||||
if (!header) {
|
||||
SLJIT_ALLOCATOR_UNLOCK();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
chunk_size -= sizeof(struct block_header);
|
||||
total_size += chunk_size;
|
||||
|
||||
header->prev_size = 0;
|
||||
if (chunk_size > size + 64) {
|
||||
/* Cut the allocated space into a free and a used block. */
|
||||
allocated_size += size;
|
||||
header->size = size;
|
||||
chunk_size -= size;
|
||||
|
||||
free_block = AS_FREE_BLOCK(header, size);
|
||||
free_block->header.prev_size = size;
|
||||
sljit_insert_free_block(free_block, chunk_size);
|
||||
next_header = AS_BLOCK_HEADER(free_block, chunk_size);
|
||||
}
|
||||
else {
|
||||
/* All space belongs to this allocation. */
|
||||
allocated_size += chunk_size;
|
||||
header->size = chunk_size;
|
||||
next_header = AS_BLOCK_HEADER(header, chunk_size);
|
||||
}
|
||||
next_header->size = 1;
|
||||
next_header->prev_size = chunk_size;
|
||||
SLJIT_ALLOCATOR_UNLOCK();
|
||||
return MEM_START(header);
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr)
|
||||
{
|
||||
struct block_header *header;
|
||||
struct free_block* free_block;
|
||||
|
||||
SLJIT_ALLOCATOR_LOCK();
|
||||
header = AS_BLOCK_HEADER(ptr, -(sljit_sw)sizeof(struct block_header));
|
||||
allocated_size -= header->size;
|
||||
|
||||
/* Connecting free blocks together if possible. */
|
||||
SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 0);
|
||||
|
||||
/* If header->prev_size == 0, free_block will equal to header.
|
||||
In this case, free_block->header.size will be > 0. */
|
||||
free_block = AS_FREE_BLOCK(header, -(sljit_sw)header->prev_size);
|
||||
if (SLJIT_UNLIKELY(!free_block->header.size)) {
|
||||
free_block->size += header->size;
|
||||
header = AS_BLOCK_HEADER(free_block, free_block->size);
|
||||
header->prev_size = free_block->size;
|
||||
}
|
||||
else {
|
||||
free_block = (struct free_block*)header;
|
||||
sljit_insert_free_block(free_block, header->size);
|
||||
}
|
||||
|
||||
header = AS_BLOCK_HEADER(free_block, free_block->size);
|
||||
if (SLJIT_UNLIKELY(!header->size)) {
|
||||
free_block->size += ((struct free_block*)header)->size;
|
||||
sljit_remove_free_block((struct free_block*)header);
|
||||
header = AS_BLOCK_HEADER(free_block, free_block->size);
|
||||
header->prev_size = free_block->size;
|
||||
}
|
||||
|
||||
/* The whole chunk is free. */
|
||||
if (SLJIT_UNLIKELY(!free_block->header.prev_size && header->size == 1)) {
|
||||
/* If this block is freed, we still have (allocated_size / 2) free space. */
|
||||
if (total_size - free_block->size > (allocated_size * 3 / 2)) {
|
||||
total_size -= free_block->size;
|
||||
sljit_remove_free_block(free_block);
|
||||
free_chunk(free_block, free_block->size + sizeof(struct block_header));
|
||||
}
|
||||
}
|
||||
|
||||
SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 1);
|
||||
SLJIT_ALLOCATOR_UNLOCK();
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void)
|
||||
{
|
||||
struct free_block* free_block;
|
||||
struct free_block* next_free_block;
|
||||
|
||||
SLJIT_ALLOCATOR_LOCK();
|
||||
SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 0);
|
||||
|
||||
free_block = free_blocks;
|
||||
while (free_block) {
|
||||
next_free_block = free_block->next;
|
||||
if (!free_block->header.prev_size &&
|
||||
AS_BLOCK_HEADER(free_block, free_block->size)->size == 1) {
|
||||
total_size -= free_block->size;
|
||||
sljit_remove_free_block(free_block);
|
||||
free_chunk(free_block, free_block->size + sizeof(struct block_header));
|
||||
}
|
||||
free_block = next_free_block;
|
||||
}
|
||||
|
||||
SLJIT_ASSERT((total_size && free_blocks) || (!total_size && !free_blocks));
|
||||
SLJIT_UPDATE_WX_FLAGS(NULL, NULL, 1);
|
||||
SLJIT_ALLOCATOR_UNLOCK();
|
||||
}
|
474
thirdparty/pcre2/src/sljit/sljitProtExecAllocator.c
vendored
474
thirdparty/pcre2/src/sljit/sljitProtExecAllocator.c
vendored
@ -1,474 +0,0 @@
|
||||
/*
|
||||
* Stack-less Just-In-Time compiler
|
||||
*
|
||||
* Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification, are
|
||||
* permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this list of
|
||||
* conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
|
||||
* of conditions and the following disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
||||
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/*
|
||||
This file contains a simple executable memory allocator
|
||||
|
||||
It is assumed, that executable code blocks are usually medium (or sometimes
|
||||
large) memory blocks, and the allocator is not too frequently called (less
|
||||
optimized than other allocators). Thus, using it as a generic allocator is
|
||||
not suggested.
|
||||
|
||||
How does it work:
|
||||
Memory is allocated in continuous memory areas called chunks by alloc_chunk()
|
||||
Chunk format:
|
||||
[ block ][ block ] ... [ block ][ block terminator ]
|
||||
|
||||
All blocks and the block terminator is started with block_header. The block
|
||||
header contains the size of the previous and the next block. These sizes
|
||||
can also contain special values.
|
||||
Block size:
|
||||
0 - The block is a free_block, with a different size member.
|
||||
1 - The block is a block terminator.
|
||||
n - The block is used at the moment, and the value contains its size.
|
||||
Previous block size:
|
||||
0 - This is the first block of the memory chunk.
|
||||
n - The size of the previous block.
|
||||
|
||||
Using these size values we can go forward or backward on the block chain.
|
||||
The unused blocks are stored in a chain list pointed by free_blocks. This
|
||||
list is useful if we need to find a suitable memory area when the allocator
|
||||
is called.
|
||||
|
||||
When a block is freed, the new free block is connected to its adjacent free
|
||||
blocks if possible.
|
||||
|
||||
[ free block ][ used block ][ free block ]
|
||||
and "used block" is freed, the three blocks are connected together:
|
||||
[ one big free block ]
|
||||
*/
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
/* System (OS) functions */
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
||||
/* 64 KByte. */
|
||||
#define CHUNK_SIZE (sljit_uw)0x10000
|
||||
|
||||
struct chunk_header {
|
||||
void *executable;
|
||||
};
|
||||
|
||||
/*
|
||||
alloc_chunk / free_chunk :
|
||||
* allocate executable system memory chunks
|
||||
* the size is always divisible by CHUNK_SIZE
|
||||
SLJIT_ALLOCATOR_LOCK / SLJIT_ALLOCATOR_UNLOCK :
|
||||
* provided as part of sljitUtils
|
||||
* only the allocator requires this lock, sljit is fully thread safe
|
||||
as it only uses local variables
|
||||
*/
|
||||
|
||||
#ifndef __NetBSD__
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
#ifndef O_NOATIME
|
||||
#define O_NOATIME 0
|
||||
#endif
|
||||
|
||||
/* this is a linux extension available since kernel 3.11 */
|
||||
#ifndef O_TMPFILE
|
||||
#define O_TMPFILE 020200000
|
||||
#endif
|
||||
|
||||
#ifndef _GNU_SOURCE
|
||||
char *secure_getenv(const char *name);
|
||||
int mkostemp(char *template, int flags);
|
||||
#endif
|
||||
|
||||
static SLJIT_INLINE int create_tempfile(void)
|
||||
{
|
||||
int fd;
|
||||
char tmp_name[256];
|
||||
size_t tmp_name_len = 0;
|
||||
char *dir;
|
||||
struct stat st;
|
||||
#if defined(SLJIT_SINGLE_THREADED) && SLJIT_SINGLE_THREADED
|
||||
mode_t mode;
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_MEMFD_CREATE
|
||||
/* this is a GNU extension, make sure to use -D_GNU_SOURCE */
|
||||
fd = memfd_create("sljit", MFD_CLOEXEC);
|
||||
if (fd != -1) {
|
||||
fchmod(fd, 0);
|
||||
return fd;
|
||||
}
|
||||
#endif
|
||||
|
||||
dir = secure_getenv("TMPDIR");
|
||||
|
||||
if (dir) {
|
||||
tmp_name_len = strlen(dir);
|
||||
if (tmp_name_len > 0 && tmp_name_len < sizeof(tmp_name)) {
|
||||
if ((stat(dir, &st) == 0) && S_ISDIR(st.st_mode))
|
||||
strcpy(tmp_name, dir);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef P_tmpdir
|
||||
if (!tmp_name_len) {
|
||||
tmp_name_len = strlen(P_tmpdir);
|
||||
if (tmp_name_len > 0 && tmp_name_len < sizeof(tmp_name))
|
||||
strcpy(tmp_name, P_tmpdir);
|
||||
}
|
||||
#endif
|
||||
if (!tmp_name_len) {
|
||||
strcpy(tmp_name, "/tmp");
|
||||
tmp_name_len = 4;
|
||||
}
|
||||
|
||||
SLJIT_ASSERT(tmp_name_len > 0 && tmp_name_len < sizeof(tmp_name));
|
||||
|
||||
if (tmp_name[tmp_name_len - 1] == '/')
|
||||
tmp_name[--tmp_name_len] = '\0';
|
||||
|
||||
#ifdef __linux__
|
||||
/*
|
||||
* the previous trimming might had left an empty string if TMPDIR="/"
|
||||
* so work around the problem below
|
||||
*/
|
||||
fd = open(tmp_name_len ? tmp_name : "/",
|
||||
O_TMPFILE | O_EXCL | O_RDWR | O_NOATIME | O_CLOEXEC, 0);
|
||||
if (fd != -1)
|
||||
return fd;
|
||||
#endif
|
||||
|
||||
if (tmp_name_len + 7 >= sizeof(tmp_name))
|
||||
return -1;
|
||||
|
||||
strcpy(tmp_name + tmp_name_len, "/XXXXXX");
|
||||
#if defined(SLJIT_SINGLE_THREADED) && SLJIT_SINGLE_THREADED
|
||||
mode = umask(0777);
|
||||
#endif
|
||||
fd = mkostemp(tmp_name, O_CLOEXEC | O_NOATIME);
|
||||
#if defined(SLJIT_SINGLE_THREADED) && SLJIT_SINGLE_THREADED
|
||||
umask(mode);
|
||||
#else
|
||||
fchmod(fd, 0);
|
||||
#endif
|
||||
|
||||
if (fd == -1)
|
||||
return -1;
|
||||
|
||||
if (unlink(tmp_name)) {
|
||||
close(fd);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
static SLJIT_INLINE struct chunk_header* alloc_chunk(sljit_uw size)
|
||||
{
|
||||
struct chunk_header *retval;
|
||||
int fd;
|
||||
|
||||
fd = create_tempfile();
|
||||
if (fd == -1)
|
||||
return NULL;
|
||||
|
||||
if (ftruncate(fd, (off_t)size)) {
|
||||
close(fd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
retval = (struct chunk_header *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
|
||||
if (retval == MAP_FAILED) {
|
||||
close(fd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
retval->executable = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
|
||||
|
||||
if (retval->executable == MAP_FAILED) {
|
||||
munmap((void *)retval, size);
|
||||
close(fd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
close(fd);
|
||||
return retval;
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* MAP_REMAPDUP is a NetBSD extension available sinde 8.0, make sure to
|
||||
* adjust your feature macros (ex: -D_NETBSD_SOURCE) as needed
|
||||
*/
|
||||
static SLJIT_INLINE struct chunk_header* alloc_chunk(sljit_uw size)
|
||||
{
|
||||
struct chunk_header *retval;
|
||||
|
||||
retval = (struct chunk_header *)mmap(NULL, size,
|
||||
PROT_READ | PROT_WRITE | PROT_MPROTECT(PROT_EXEC),
|
||||
MAP_ANON | MAP_SHARED, -1, 0);
|
||||
|
||||
if (retval == MAP_FAILED)
|
||||
return NULL;
|
||||
|
||||
retval->executable = mremap(retval, size, NULL, size, MAP_REMAPDUP);
|
||||
if (retval->executable == MAP_FAILED) {
|
||||
munmap((void *)retval, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (mprotect(retval->executable, size, PROT_READ | PROT_EXEC) == -1) {
|
||||
munmap(retval->executable, size);
|
||||
munmap((void *)retval, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
#endif /* NetBSD */
|
||||
|
||||
static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
|
||||
{
|
||||
struct chunk_header *header = ((struct chunk_header *)chunk) - 1;
|
||||
|
||||
munmap(header->executable, size);
|
||||
munmap((void *)header, size);
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
/* Common functions */
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
||||
#define CHUNK_MASK (~(CHUNK_SIZE - 1))
|
||||
|
||||
struct block_header {
|
||||
sljit_uw size;
|
||||
sljit_uw prev_size;
|
||||
sljit_sw executable_offset;
|
||||
};
|
||||
|
||||
struct free_block {
|
||||
struct block_header header;
|
||||
struct free_block *next;
|
||||
struct free_block *prev;
|
||||
sljit_uw size;
|
||||
};
|
||||
|
||||
#define AS_BLOCK_HEADER(base, offset) \
|
||||
((struct block_header*)(((sljit_u8*)base) + offset))
|
||||
#define AS_FREE_BLOCK(base, offset) \
|
||||
((struct free_block*)(((sljit_u8*)base) + offset))
|
||||
#define MEM_START(base) ((void*)((base) + 1))
|
||||
#define ALIGN_SIZE(size) (((size) + sizeof(struct block_header) + 7u) & ~(sljit_uw)7)
|
||||
|
||||
static struct free_block* free_blocks;
|
||||
static sljit_uw allocated_size;
|
||||
static sljit_uw total_size;
|
||||
|
||||
static SLJIT_INLINE void sljit_insert_free_block(struct free_block *free_block, sljit_uw size)
|
||||
{
|
||||
free_block->header.size = 0;
|
||||
free_block->size = size;
|
||||
|
||||
free_block->next = free_blocks;
|
||||
free_block->prev = NULL;
|
||||
if (free_blocks)
|
||||
free_blocks->prev = free_block;
|
||||
free_blocks = free_block;
|
||||
}
|
||||
|
||||
static SLJIT_INLINE void sljit_remove_free_block(struct free_block *free_block)
|
||||
{
|
||||
if (free_block->next)
|
||||
free_block->next->prev = free_block->prev;
|
||||
|
||||
if (free_block->prev)
|
||||
free_block->prev->next = free_block->next;
|
||||
else {
|
||||
SLJIT_ASSERT(free_blocks == free_block);
|
||||
free_blocks = free_block->next;
|
||||
}
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size)
|
||||
{
|
||||
struct chunk_header *chunk_header;
|
||||
struct block_header *header;
|
||||
struct block_header *next_header;
|
||||
struct free_block *free_block;
|
||||
sljit_uw chunk_size;
|
||||
sljit_sw executable_offset;
|
||||
|
||||
SLJIT_ALLOCATOR_LOCK();
|
||||
if (size < (64 - sizeof(struct block_header)))
|
||||
size = (64 - sizeof(struct block_header));
|
||||
size = ALIGN_SIZE(size);
|
||||
|
||||
free_block = free_blocks;
|
||||
while (free_block) {
|
||||
if (free_block->size >= size) {
|
||||
chunk_size = free_block->size;
|
||||
if (chunk_size > size + 64) {
|
||||
/* We just cut a block from the end of the free block. */
|
||||
chunk_size -= size;
|
||||
free_block->size = chunk_size;
|
||||
header = AS_BLOCK_HEADER(free_block, chunk_size);
|
||||
header->prev_size = chunk_size;
|
||||
header->executable_offset = free_block->header.executable_offset;
|
||||
AS_BLOCK_HEADER(header, size)->prev_size = size;
|
||||
}
|
||||
else {
|
||||
sljit_remove_free_block(free_block);
|
||||
header = (struct block_header*)free_block;
|
||||
size = chunk_size;
|
||||
}
|
||||
allocated_size += size;
|
||||
header->size = size;
|
||||
SLJIT_ALLOCATOR_UNLOCK();
|
||||
return MEM_START(header);
|
||||
}
|
||||
free_block = free_block->next;
|
||||
}
|
||||
|
||||
chunk_size = sizeof(struct chunk_header) + sizeof(struct block_header);
|
||||
chunk_size = (chunk_size + size + CHUNK_SIZE - 1) & CHUNK_MASK;
|
||||
|
||||
chunk_header = alloc_chunk(chunk_size);
|
||||
if (!chunk_header) {
|
||||
SLJIT_ALLOCATOR_UNLOCK();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
executable_offset = (sljit_sw)((sljit_u8*)chunk_header->executable - (sljit_u8*)chunk_header);
|
||||
|
||||
chunk_size -= sizeof(struct chunk_header) + sizeof(struct block_header);
|
||||
total_size += chunk_size;
|
||||
|
||||
header = (struct block_header *)(chunk_header + 1);
|
||||
|
||||
header->prev_size = 0;
|
||||
header->executable_offset = executable_offset;
|
||||
if (chunk_size > size + 64) {
|
||||
/* Cut the allocated space into a free and a used block. */
|
||||
allocated_size += size;
|
||||
header->size = size;
|
||||
chunk_size -= size;
|
||||
|
||||
free_block = AS_FREE_BLOCK(header, size);
|
||||
free_block->header.prev_size = size;
|
||||
free_block->header.executable_offset = executable_offset;
|
||||
sljit_insert_free_block(free_block, chunk_size);
|
||||
next_header = AS_BLOCK_HEADER(free_block, chunk_size);
|
||||
}
|
||||
else {
|
||||
/* All space belongs to this allocation. */
|
||||
allocated_size += chunk_size;
|
||||
header->size = chunk_size;
|
||||
next_header = AS_BLOCK_HEADER(header, chunk_size);
|
||||
}
|
||||
next_header->size = 1;
|
||||
next_header->prev_size = chunk_size;
|
||||
next_header->executable_offset = executable_offset;
|
||||
SLJIT_ALLOCATOR_UNLOCK();
|
||||
return MEM_START(header);
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr)
|
||||
{
|
||||
struct block_header *header;
|
||||
struct free_block* free_block;
|
||||
|
||||
SLJIT_ALLOCATOR_LOCK();
|
||||
header = AS_BLOCK_HEADER(ptr, -(sljit_sw)sizeof(struct block_header));
|
||||
header = AS_BLOCK_HEADER(header, -header->executable_offset);
|
||||
allocated_size -= header->size;
|
||||
|
||||
/* Connecting free blocks together if possible. */
|
||||
|
||||
/* If header->prev_size == 0, free_block will equal to header.
|
||||
In this case, free_block->header.size will be > 0. */
|
||||
free_block = AS_FREE_BLOCK(header, -(sljit_sw)header->prev_size);
|
||||
if (SLJIT_UNLIKELY(!free_block->header.size)) {
|
||||
free_block->size += header->size;
|
||||
header = AS_BLOCK_HEADER(free_block, free_block->size);
|
||||
header->prev_size = free_block->size;
|
||||
}
|
||||
else {
|
||||
free_block = (struct free_block*)header;
|
||||
sljit_insert_free_block(free_block, header->size);
|
||||
}
|
||||
|
||||
header = AS_BLOCK_HEADER(free_block, free_block->size);
|
||||
if (SLJIT_UNLIKELY(!header->size)) {
|
||||
free_block->size += ((struct free_block*)header)->size;
|
||||
sljit_remove_free_block((struct free_block*)header);
|
||||
header = AS_BLOCK_HEADER(free_block, free_block->size);
|
||||
header->prev_size = free_block->size;
|
||||
}
|
||||
|
||||
/* The whole chunk is free. */
|
||||
if (SLJIT_UNLIKELY(!free_block->header.prev_size && header->size == 1)) {
|
||||
/* If this block is freed, we still have (allocated_size / 2) free space. */
|
||||
if (total_size - free_block->size > (allocated_size * 3 / 2)) {
|
||||
total_size -= free_block->size;
|
||||
sljit_remove_free_block(free_block);
|
||||
free_chunk(free_block, free_block->size +
|
||||
sizeof(struct chunk_header) +
|
||||
sizeof(struct block_header));
|
||||
}
|
||||
}
|
||||
|
||||
SLJIT_ALLOCATOR_UNLOCK();
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void)
|
||||
{
|
||||
struct free_block* free_block;
|
||||
struct free_block* next_free_block;
|
||||
|
||||
SLJIT_ALLOCATOR_LOCK();
|
||||
|
||||
free_block = free_blocks;
|
||||
while (free_block) {
|
||||
next_free_block = free_block->next;
|
||||
if (!free_block->header.prev_size &&
|
||||
AS_BLOCK_HEADER(free_block, free_block->size)->size == 1) {
|
||||
total_size -= free_block->size;
|
||||
sljit_remove_free_block(free_block);
|
||||
free_chunk(free_block, free_block->size +
|
||||
sizeof(struct chunk_header) +
|
||||
sizeof(struct block_header));
|
||||
}
|
||||
free_block = next_free_block;
|
||||
}
|
||||
|
||||
SLJIT_ASSERT((total_size && free_blocks) || (!total_size && !free_blocks));
|
||||
SLJIT_ALLOCATOR_UNLOCK();
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr)
|
||||
{
|
||||
return ((struct block_header *)(ptr))[-1].executable_offset;
|
||||
}
|
204
thirdparty/pcre2/src/sljit/sljitWXExecAllocator.c
vendored
204
thirdparty/pcre2/src/sljit/sljitWXExecAllocator.c
vendored
@ -1,204 +0,0 @@
|
||||
/*
|
||||
* Stack-less Just-In-Time compiler
|
||||
*
|
||||
* Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modification, are
|
||||
* permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice, this list of
|
||||
* conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
|
||||
* of conditions and the following disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
|
||||
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
|
||||
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/*
|
||||
This file contains a simple W^X executable memory allocator for POSIX
|
||||
like systems and Windows
|
||||
|
||||
In *NIX, MAP_ANON is required (that is considered a feature) so make
|
||||
sure to set the right availability macros for your system or the code
|
||||
will fail to build.
|
||||
|
||||
If your system doesn't support mapping of anonymous pages (ex: IRIX) it
|
||||
is also likely that it doesn't need this allocator and should be using
|
||||
the standard one instead.
|
||||
|
||||
It allocates a separate map for each code block and may waste a lot of
|
||||
memory, because whatever was requested, will be rounded up to the page
|
||||
size (minimum 4KB, but could be even bigger).
|
||||
|
||||
It changes the page permissions (RW <-> RX) as needed and therefore, if you
|
||||
will be updating the code after it has been generated, need to make sure to
|
||||
block any concurrent execution, or could result in a SIGBUS, that could
|
||||
even manifest itself at a different address than the one that was being
|
||||
modified.
|
||||
|
||||
Only use if you are unable to use the regular allocator because of security
|
||||
restrictions and adding exceptions to your application or the system are
|
||||
not possible.
|
||||
*/
|
||||
|
||||
#define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec) \
|
||||
sljit_update_wx_flags((from), (to), (enable_exec))
|
||||
|
||||
#ifndef _WIN32
|
||||
#include <sys/types.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
#ifdef __NetBSD__
|
||||
#define SLJIT_PROT_WX PROT_MPROTECT(PROT_EXEC)
|
||||
#define check_se_protected(ptr, size) (0)
|
||||
#else /* POSIX */
|
||||
#if !(defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
|
||||
#include <pthread.h>
|
||||
#define SLJIT_SE_LOCK() pthread_mutex_lock(&se_lock)
|
||||
#define SLJIT_SE_UNLOCK() pthread_mutex_unlock(&se_lock)
|
||||
#endif /* !SLJIT_SINGLE_THREADED */
|
||||
|
||||
#define check_se_protected(ptr, size) generic_se_protected(ptr, size)
|
||||
|
||||
static SLJIT_INLINE int generic_se_protected(void *ptr, sljit_uw size)
|
||||
{
|
||||
if (SLJIT_LIKELY(!mprotect(ptr, size, PROT_EXEC)))
|
||||
return mprotect(ptr, size, PROT_READ | PROT_WRITE);
|
||||
|
||||
return -1;
|
||||
}
|
||||
#endif /* NetBSD */
|
||||
|
||||
#ifndef SLJIT_SE_LOCK
|
||||
#define SLJIT_SE_LOCK()
|
||||
#endif
|
||||
#ifndef SLJIT_SE_UNLOCK
|
||||
#define SLJIT_SE_UNLOCK()
|
||||
#endif
|
||||
#ifndef SLJIT_PROT_WX
|
||||
#define SLJIT_PROT_WX 0
|
||||
#endif
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size)
|
||||
{
|
||||
#if !(defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED) \
|
||||
&& !defined(__NetBSD__)
|
||||
static pthread_mutex_t se_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
#endif
|
||||
static int se_protected = !SLJIT_PROT_WX;
|
||||
int prot = PROT_READ | PROT_WRITE | SLJIT_PROT_WX;
|
||||
sljit_uw* ptr;
|
||||
|
||||
if (SLJIT_UNLIKELY(se_protected < 0))
|
||||
return NULL;
|
||||
|
||||
#ifdef PROT_MAX
|
||||
prot |= PROT_MAX(PROT_READ | PROT_WRITE | PROT_EXEC);
|
||||
#endif
|
||||
|
||||
size += sizeof(sljit_uw);
|
||||
ptr = (sljit_uw*)mmap(NULL, size, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||
|
||||
if (ptr == MAP_FAILED)
|
||||
return NULL;
|
||||
|
||||
if (SLJIT_UNLIKELY(se_protected > 0)) {
|
||||
SLJIT_SE_LOCK();
|
||||
se_protected = check_se_protected(ptr, size);
|
||||
SLJIT_SE_UNLOCK();
|
||||
if (SLJIT_UNLIKELY(se_protected < 0)) {
|
||||
munmap((void *)ptr, size);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
*ptr++ = size;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
#undef SLJIT_PROT_WX
|
||||
#undef SLJIT_SE_UNLOCK
|
||||
#undef SLJIT_SE_LOCK
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr)
|
||||
{
|
||||
sljit_uw *start_ptr = ((sljit_uw*)ptr) - 1;
|
||||
munmap((void*)start_ptr, *start_ptr);
|
||||
}
|
||||
|
||||
static void sljit_update_wx_flags(void *from, void *to, sljit_s32 enable_exec)
|
||||
{
|
||||
sljit_uw page_mask = (sljit_uw)get_page_alignment();
|
||||
sljit_uw start = (sljit_uw)from;
|
||||
sljit_uw end = (sljit_uw)to;
|
||||
int prot = PROT_READ | (enable_exec ? PROT_EXEC : PROT_WRITE);
|
||||
|
||||
SLJIT_ASSERT(start < end);
|
||||
|
||||
start &= ~page_mask;
|
||||
end = (end + page_mask) & ~page_mask;
|
||||
|
||||
mprotect((void*)start, end - start, prot);
|
||||
}
|
||||
|
||||
#else /* windows */
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size)
|
||||
{
|
||||
sljit_uw *ptr;
|
||||
|
||||
size += sizeof(sljit_uw);
|
||||
ptr = (sljit_uw*)VirtualAlloc(NULL, size,
|
||||
MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
|
||||
|
||||
if (!ptr)
|
||||
return NULL;
|
||||
|
||||
*ptr++ = size;
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr)
|
||||
{
|
||||
sljit_uw start = (sljit_uw)ptr - sizeof(sljit_uw);
|
||||
#if defined(SLJIT_DEBUG) && SLJIT_DEBUG
|
||||
sljit_uw page_mask = (sljit_uw)get_page_alignment();
|
||||
|
||||
SLJIT_ASSERT(!(start & page_mask));
|
||||
#endif
|
||||
VirtualFree((void*)start, 0, MEM_RELEASE);
|
||||
}
|
||||
|
||||
static void sljit_update_wx_flags(void *from, void *to, sljit_s32 enable_exec)
|
||||
{
|
||||
DWORD oldprot;
|
||||
sljit_uw page_mask = (sljit_uw)get_page_alignment();
|
||||
sljit_uw start = (sljit_uw)from;
|
||||
sljit_uw end = (sljit_uw)to;
|
||||
DWORD prot = enable_exec ? PAGE_EXECUTE : PAGE_READWRITE;
|
||||
|
||||
SLJIT_ASSERT(start < end);
|
||||
|
||||
start &= ~page_mask;
|
||||
end = (end + page_mask) & ~page_mask;
|
||||
|
||||
VirtualProtect((void*)start, end - start, prot, &oldprot);
|
||||
}
|
||||
|
||||
#endif /* !windows */
|
||||
|
||||
SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void)
|
||||
{
|
||||
/* This allocator does not keep unused memory for future allocations. */
|
||||
}
|
Loading…
Reference in New Issue
Block a user