mirror of
https://github.com/godotengine/godot.git
synced 2024-11-10 06:03:09 +00:00
Merge pull request #94137 from Craig-Stoneham/master
Improve template class conditionals with constexpr (code style)
This commit is contained in:
commit
8019cdb444
@ -55,7 +55,7 @@ class PagedAllocator {
|
||||
public:
|
||||
template <typename... Args>
|
||||
T *alloc(Args &&...p_args) {
|
||||
if (thread_safe) {
|
||||
if constexpr (thread_safe) {
|
||||
spin_lock.lock();
|
||||
}
|
||||
if (unlikely(allocs_available == 0)) {
|
||||
@ -76,7 +76,7 @@ public:
|
||||
|
||||
allocs_available--;
|
||||
T *alloc = available_pool[allocs_available >> page_shift][allocs_available & page_mask];
|
||||
if (thread_safe) {
|
||||
if constexpr (thread_safe) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
memnew_placement(alloc, T(p_args...));
|
||||
@ -84,13 +84,13 @@ public:
|
||||
}
|
||||
|
||||
void free(T *p_mem) {
|
||||
if (thread_safe) {
|
||||
if constexpr (thread_safe) {
|
||||
spin_lock.lock();
|
||||
}
|
||||
p_mem->~T();
|
||||
available_pool[allocs_available >> page_shift][allocs_available & page_mask] = p_mem;
|
||||
allocs_available++;
|
||||
if (thread_safe) {
|
||||
if constexpr (thread_safe) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
}
|
||||
@ -120,28 +120,28 @@ private:
|
||||
|
||||
public:
|
||||
void reset(bool p_allow_unfreed = false) {
|
||||
if (thread_safe) {
|
||||
if constexpr (thread_safe) {
|
||||
spin_lock.lock();
|
||||
}
|
||||
_reset(p_allow_unfreed);
|
||||
if (thread_safe) {
|
||||
if constexpr (thread_safe) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
bool is_configured() const {
|
||||
if (thread_safe) {
|
||||
if constexpr (thread_safe) {
|
||||
spin_lock.lock();
|
||||
}
|
||||
bool result = page_size > 0;
|
||||
if (thread_safe) {
|
||||
if constexpr (thread_safe) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void configure(uint32_t p_page_size) {
|
||||
if (thread_safe) {
|
||||
if constexpr (thread_safe) {
|
||||
spin_lock.lock();
|
||||
}
|
||||
ERR_FAIL_COND(page_pool != nullptr); // Safety check.
|
||||
@ -149,7 +149,7 @@ public:
|
||||
page_size = nearest_power_of_2_templated(p_page_size);
|
||||
page_mask = page_size - 1;
|
||||
page_shift = get_shift_from_power_of_2(page_size);
|
||||
if (thread_safe) {
|
||||
if constexpr (thread_safe) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
}
|
||||
@ -161,7 +161,7 @@ public:
|
||||
}
|
||||
|
||||
~PagedAllocator() {
|
||||
if (thread_safe) {
|
||||
if constexpr (thread_safe) {
|
||||
spin_lock.lock();
|
||||
}
|
||||
bool leaked = allocs_available < pages_allocated * page_size;
|
||||
@ -172,7 +172,7 @@ public:
|
||||
} else {
|
||||
_reset(false);
|
||||
}
|
||||
if (thread_safe) {
|
||||
if constexpr (thread_safe) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ class RID_Alloc : public RID_AllocBase {
|
||||
mutable SpinLock spin_lock;
|
||||
|
||||
_FORCE_INLINE_ RID _allocate_rid() {
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.lock();
|
||||
}
|
||||
|
||||
@ -128,7 +128,7 @@ class RID_Alloc : public RID_AllocBase {
|
||||
|
||||
alloc_count++;
|
||||
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
|
||||
@ -156,14 +156,14 @@ public:
|
||||
if (p_rid == RID()) {
|
||||
return nullptr;
|
||||
}
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.lock();
|
||||
}
|
||||
|
||||
uint64_t id = p_rid.get_id();
|
||||
uint32_t idx = uint32_t(id & 0xFFFFFFFF);
|
||||
if (unlikely(idx >= max_alloc)) {
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
return nullptr;
|
||||
@ -176,14 +176,14 @@ public:
|
||||
|
||||
if (unlikely(p_initialize)) {
|
||||
if (unlikely(!(validator_chunks[idx_chunk][idx_element] & 0x80000000))) {
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
ERR_FAIL_V_MSG(nullptr, "Initializing already initialized RID");
|
||||
}
|
||||
|
||||
if (unlikely((validator_chunks[idx_chunk][idx_element] & 0x7FFFFFFF) != validator)) {
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
ERR_FAIL_V_MSG(nullptr, "Attempting to initialize the wrong RID");
|
||||
@ -192,7 +192,7 @@ public:
|
||||
validator_chunks[idx_chunk][idx_element] &= 0x7FFFFFFF; //initialized
|
||||
|
||||
} else if (unlikely(validator_chunks[idx_chunk][idx_element] != validator)) {
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
if ((validator_chunks[idx_chunk][idx_element] & 0x80000000) && validator_chunks[idx_chunk][idx_element] != 0xFFFFFFFF) {
|
||||
@ -203,7 +203,7 @@ public:
|
||||
|
||||
T *ptr = &chunks[idx_chunk][idx_element];
|
||||
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
|
||||
@ -221,14 +221,14 @@ public:
|
||||
}
|
||||
|
||||
_FORCE_INLINE_ bool owns(const RID &p_rid) const {
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.lock();
|
||||
}
|
||||
|
||||
uint64_t id = p_rid.get_id();
|
||||
uint32_t idx = uint32_t(id & 0xFFFFFFFF);
|
||||
if (unlikely(idx >= max_alloc)) {
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
return false;
|
||||
@ -241,7 +241,7 @@ public:
|
||||
|
||||
bool owned = (validator != 0x7FFFFFFF) && (validator_chunks[idx_chunk][idx_element] & 0x7FFFFFFF) == validator;
|
||||
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
|
||||
@ -249,14 +249,14 @@ public:
|
||||
}
|
||||
|
||||
_FORCE_INLINE_ void free(const RID &p_rid) {
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.lock();
|
||||
}
|
||||
|
||||
uint64_t id = p_rid.get_id();
|
||||
uint32_t idx = uint32_t(id & 0xFFFFFFFF);
|
||||
if (unlikely(idx >= max_alloc)) {
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
ERR_FAIL();
|
||||
@ -267,12 +267,12 @@ public:
|
||||
|
||||
uint32_t validator = uint32_t(id >> 32);
|
||||
if (unlikely(validator_chunks[idx_chunk][idx_element] & 0x80000000)) {
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
ERR_FAIL_MSG("Attempted to free an uninitialized or invalid RID.");
|
||||
} else if (unlikely(validator_chunks[idx_chunk][idx_element] != validator)) {
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
ERR_FAIL();
|
||||
@ -284,7 +284,7 @@ public:
|
||||
alloc_count--;
|
||||
free_list_chunks[alloc_count / elements_in_chunk][alloc_count % elements_in_chunk] = idx;
|
||||
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
}
|
||||
@ -293,7 +293,7 @@ public:
|
||||
return alloc_count;
|
||||
}
|
||||
void get_owned_list(List<RID> *p_owned) const {
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.lock();
|
||||
}
|
||||
for (size_t i = 0; i < max_alloc; i++) {
|
||||
@ -302,14 +302,14 @@ public:
|
||||
p_owned->push_back(_make_from_id((validator << 32) | i));
|
||||
}
|
||||
}
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
//used for fast iteration in the elements or RIDs
|
||||
void fill_owned_buffer(RID *p_rid_buffer) const {
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.lock();
|
||||
}
|
||||
uint32_t idx = 0;
|
||||
@ -320,7 +320,7 @@ public:
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
if (THREAD_SAFE) {
|
||||
if constexpr (THREAD_SAFE) {
|
||||
spin_lock.unlock();
|
||||
}
|
||||
}
|
||||
|
@ -174,14 +174,14 @@ public:
|
||||
|
||||
while (true) {
|
||||
while (compare(p_array[p_first], p_pivot)) {
|
||||
if (Validate) {
|
||||
if constexpr (Validate) {
|
||||
ERR_BAD_COMPARE(p_first == unmodified_last - 1);
|
||||
}
|
||||
p_first++;
|
||||
}
|
||||
p_last--;
|
||||
while (compare(p_pivot, p_array[p_last])) {
|
||||
if (Validate) {
|
||||
if constexpr (Validate) {
|
||||
ERR_BAD_COMPARE(p_last == unmodified_first);
|
||||
}
|
||||
p_last--;
|
||||
@ -251,7 +251,7 @@ public:
|
||||
inline void unguarded_linear_insert(int64_t p_last, T p_value, T *p_array) const {
|
||||
int64_t next = p_last - 1;
|
||||
while (compare(p_value, p_array[next])) {
|
||||
if (Validate) {
|
||||
if constexpr (Validate) {
|
||||
ERR_BAD_COMPARE(next == 0);
|
||||
}
|
||||
p_array[p_last] = p_array[next];
|
||||
|
Loading…
Reference in New Issue
Block a user