mirror of
https://github.com/torvalds/linux.git
synced 2024-11-14 16:12:02 +00:00
XArray updates for 5.7-rc1
- Fix two bugs which affected multi-index entries larger than 2^26 indices - Fix some documentation - Remove unused IDA macros - Add a small optimisation for tiny configurations - Fix a bug which could cause an RCU walker to terminate a marked walk early -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEEejHryeLBw/spnjHrDpNsjXcpgj4FAl6DT1YACgkQDpNsjXcp gj4GqAf/fVxinQwhlxF2UGVho67JgXpQklHtJi7uLUbdxEqueU1miz9W5rLfRUs5 JP8+7ahwkV4lfphzf8FhfGkQmc3aWFsPLjpicnH1y0Mei0C24mYZgAO0ZA+S1o+5 HZJ82jb8QtAHrhlnHZcwC20Q0bq5Q/HWVMfdv9YmZlBV0U4E/lUaZtpSc1T0Hcjt 7faT8QWeudEwraklp0lRq2AFVJainbH4Xcyr7EHMcZySRfk7p6bgLlQ6QbHbjcsh IdtbbR/xOkC9eLX5WtDSzVg1q5AS3l0Fegu14A+/OuyXnsJYe1vF43jw9xId/V2u Wn+GRbeBxV3flQWK+8PoVvlQCsY4fw== =O2iY -----END PGP SIGNATURE----- Merge tag 'xarray-5.7' of git://git.infradead.org/users/willy/linux-dax Pull XArray updates from Matthew Wilcox: - Fix two bugs which affected multi-index entries larger than 2^26 indices - Fix some documentation - Remove unused IDA macros - Add a small optimisation for tiny configurations - Fix a bug which could cause an RCU walker to terminate a marked walk early * tag 'xarray-5.7' of git://git.infradead.org/users/willy/linux-dax: xarray: Fix early termination of xas_for_each_marked radix tree test suite: Support kmem_cache alignment XArray: Optimise xas_sibling() if !CONFIG_XARRAY_MULTI ida: remove abandoned macros XArray: Fix incorrect comment in header file XArray: Fix xas_pause for large multi-index entries XArray: Fix xa_find_next for large multi-index entries
This commit is contained in:
commit
193bc55b6d
@ -32,8 +32,8 @@
|
||||
* The following internal entries have a special meaning:
|
||||
*
|
||||
* 0-62: Sibling entries
|
||||
* 256: Zero entry
|
||||
* 257: Retry entry
|
||||
* 256: Retry entry
|
||||
* 257: Zero entry
|
||||
*
|
||||
* Errors are also represented as internal entries, but use the negative
|
||||
* space (-4094 to -2). They're never stored in the slots array; only
|
||||
@ -1648,6 +1648,7 @@ static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
|
||||
xa_mark_t mark)
|
||||
{
|
||||
struct xa_node *node = xas->xa_node;
|
||||
void *entry;
|
||||
unsigned int offset;
|
||||
|
||||
if (unlikely(xas_not_node(node) || node->shift))
|
||||
@ -1659,7 +1660,10 @@ static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
|
||||
return NULL;
|
||||
if (offset == XA_CHUNK_SIZE)
|
||||
return xas_find_marked(xas, max, mark);
|
||||
return xa_entry(xas->xa, node, offset);
|
||||
entry = xa_entry(xas->xa, node, offset);
|
||||
if (!entry)
|
||||
return xas_find_marked(xas, max, mark);
|
||||
return entry;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -55,14 +55,6 @@ struct kmem_cache *radix_tree_node_cachep;
|
||||
RADIX_TREE_MAP_SHIFT))
|
||||
#define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
|
||||
|
||||
/*
|
||||
* The IDA is even shorter since it uses a bitmap at the last level.
|
||||
*/
|
||||
#define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS))
|
||||
#define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \
|
||||
RADIX_TREE_MAP_SHIFT))
|
||||
#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
|
||||
|
||||
/*
|
||||
* Per-cpu pool of preloaded nodes
|
||||
*/
|
||||
|
@ -12,6 +12,9 @@
|
||||
static unsigned int tests_run;
|
||||
static unsigned int tests_passed;
|
||||
|
||||
static const unsigned int order_limit =
|
||||
IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1;
|
||||
|
||||
#ifndef XA_DEBUG
|
||||
# ifdef __KERNEL__
|
||||
void xa_dump(const struct xarray *xa) { }
|
||||
@ -959,6 +962,20 @@ static noinline void check_multi_find_2(struct xarray *xa)
|
||||
}
|
||||
}
|
||||
|
||||
static noinline void check_multi_find_3(struct xarray *xa)
|
||||
{
|
||||
unsigned int order;
|
||||
|
||||
for (order = 5; order < order_limit; order++) {
|
||||
unsigned long index = 1UL << (order - 5);
|
||||
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL);
|
||||
XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT));
|
||||
xa_erase_index(xa, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static noinline void check_find_1(struct xarray *xa)
|
||||
{
|
||||
unsigned long i, j, k;
|
||||
@ -1081,6 +1098,7 @@ static noinline void check_find(struct xarray *xa)
|
||||
for (i = 2; i < 10; i++)
|
||||
check_multi_find_1(xa, i);
|
||||
check_multi_find_2(xa);
|
||||
check_multi_find_3(xa);
|
||||
}
|
||||
|
||||
/* See find_swap_entry() in mm/shmem.c */
|
||||
@ -1138,6 +1156,42 @@ static noinline void check_find_entry(struct xarray *xa)
|
||||
XA_BUG_ON(xa, !xa_empty(xa));
|
||||
}
|
||||
|
||||
static noinline void check_pause(struct xarray *xa)
|
||||
{
|
||||
XA_STATE(xas, xa, 0);
|
||||
void *entry;
|
||||
unsigned int order;
|
||||
unsigned long index = 1;
|
||||
unsigned int count = 0;
|
||||
|
||||
for (order = 0; order < order_limit; order++) {
|
||||
XA_BUG_ON(xa, xa_store_order(xa, index, order,
|
||||
xa_mk_index(index), GFP_KERNEL));
|
||||
index += 1UL << order;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
xas_for_each(&xas, entry, ULONG_MAX) {
|
||||
XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
|
||||
count++;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
XA_BUG_ON(xa, count != order_limit);
|
||||
|
||||
count = 0;
|
||||
xas_set(&xas, 0);
|
||||
rcu_read_lock();
|
||||
xas_for_each(&xas, entry, ULONG_MAX) {
|
||||
XA_BUG_ON(xa, entry != xa_mk_index(1UL << count));
|
||||
count++;
|
||||
xas_pause(&xas);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
XA_BUG_ON(xa, count != order_limit);
|
||||
|
||||
xa_destroy(xa);
|
||||
}
|
||||
|
||||
static noinline void check_move_tiny(struct xarray *xa)
|
||||
{
|
||||
XA_STATE(xas, xa, 0);
|
||||
@ -1646,6 +1700,7 @@ static int xarray_checks(void)
|
||||
check_xa_alloc();
|
||||
check_find(&array);
|
||||
check_find_entry(&array);
|
||||
check_pause(&array);
|
||||
check_account(&array);
|
||||
check_destroy(&array);
|
||||
check_move(&array);
|
||||
|
@ -970,7 +970,7 @@ void xas_pause(struct xa_state *xas)
|
||||
|
||||
xas->xa_node = XAS_RESTART;
|
||||
if (node) {
|
||||
unsigned int offset = xas->xa_offset;
|
||||
unsigned long offset = xas->xa_offset;
|
||||
while (++offset < XA_CHUNK_SIZE) {
|
||||
if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
|
||||
break;
|
||||
@ -1208,6 +1208,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
|
||||
}
|
||||
|
||||
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
|
||||
if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
|
||||
continue;
|
||||
if (!xa_is_node(entry))
|
||||
return entry;
|
||||
xas->xa_node = xa_to_node(entry);
|
||||
@ -1836,10 +1838,11 @@ static bool xas_sibling(struct xa_state *xas)
|
||||
struct xa_node *node = xas->xa_node;
|
||||
unsigned long mask;
|
||||
|
||||
if (!node)
|
||||
if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node)
|
||||
return false;
|
||||
mask = (XA_CHUNK_SIZE << node->shift) - 1;
|
||||
return (xas->xa_index & mask) > (xas->xa_offset << node->shift);
|
||||
return (xas->xa_index & mask) >
|
||||
((unsigned long)xas->xa_offset << node->shift);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -7,8 +7,8 @@ LDLIBS+= -lpthread -lurcu
|
||||
TARGETS = main idr-test multiorder xarray
|
||||
CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o
|
||||
OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
|
||||
regression4.o \
|
||||
tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
|
||||
regression4.o tag_check.o multiorder.o idr-test.o iteration_check.o \
|
||||
iteration_check_2.o benchmark.o
|
||||
|
||||
ifndef SHIFT
|
||||
SHIFT=3
|
||||
|
87
tools/testing/radix-tree/iteration_check_2.c
Normal file
87
tools/testing/radix-tree/iteration_check_2.c
Normal file
@ -0,0 +1,87 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* iteration_check_2.c: Check that deleting a tagged entry doesn't cause
|
||||
* an RCU walker to finish early.
|
||||
* Copyright (c) 2020 Oracle
|
||||
* Author: Matthew Wilcox <willy@infradead.org>
|
||||
*/
|
||||
#include <pthread.h>
|
||||
#include "test.h"
|
||||
|
||||
static volatile bool test_complete;
|
||||
|
||||
static void *iterator(void *arg)
|
||||
{
|
||||
XA_STATE(xas, arg, 0);
|
||||
void *entry;
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
while (!test_complete) {
|
||||
xas_set(&xas, 0);
|
||||
rcu_read_lock();
|
||||
xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
|
||||
;
|
||||
rcu_read_unlock();
|
||||
assert(xas.xa_index >= 100);
|
||||
}
|
||||
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *throbber(void *arg)
|
||||
{
|
||||
struct xarray *xa = arg;
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
while (!test_complete) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
xa_store(xa, i, xa_mk_value(i), GFP_KERNEL);
|
||||
xa_set_mark(xa, i, XA_MARK_0);
|
||||
}
|
||||
for (i = 0; i < 100; i++)
|
||||
xa_erase(xa, i);
|
||||
}
|
||||
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void iteration_test2(unsigned test_duration)
|
||||
{
|
||||
pthread_t threads[2];
|
||||
DEFINE_XARRAY(array);
|
||||
int i;
|
||||
|
||||
printv(1, "Running iteration test 2 for %d seconds\n", test_duration);
|
||||
|
||||
test_complete = false;
|
||||
|
||||
xa_store(&array, 100, xa_mk_value(100), GFP_KERNEL);
|
||||
xa_set_mark(&array, 100, XA_MARK_0);
|
||||
|
||||
if (pthread_create(&threads[0], NULL, iterator, &array)) {
|
||||
perror("create iterator thread");
|
||||
exit(1);
|
||||
}
|
||||
if (pthread_create(&threads[1], NULL, throbber, &array)) {
|
||||
perror("create throbber thread");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
sleep(test_duration);
|
||||
test_complete = true;
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (pthread_join(threads[i], NULL)) {
|
||||
perror("pthread_join");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
xa_destroy(&array);
|
||||
}
|
@ -19,37 +19,44 @@ int test_verbose;
|
||||
|
||||
struct kmem_cache {
|
||||
pthread_mutex_t lock;
|
||||
int size;
|
||||
unsigned int size;
|
||||
unsigned int align;
|
||||
int nr_objs;
|
||||
void *objs;
|
||||
void (*ctor)(void *);
|
||||
};
|
||||
|
||||
void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
|
||||
void *kmem_cache_alloc(struct kmem_cache *cachep, int gfp)
|
||||
{
|
||||
struct radix_tree_node *node;
|
||||
void *p;
|
||||
|
||||
if (!(flags & __GFP_DIRECT_RECLAIM))
|
||||
if (!(gfp & __GFP_DIRECT_RECLAIM))
|
||||
return NULL;
|
||||
|
||||
pthread_mutex_lock(&cachep->lock);
|
||||
if (cachep->nr_objs) {
|
||||
struct radix_tree_node *node = cachep->objs;
|
||||
cachep->nr_objs--;
|
||||
node = cachep->objs;
|
||||
cachep->objs = node->parent;
|
||||
pthread_mutex_unlock(&cachep->lock);
|
||||
node->parent = NULL;
|
||||
p = node;
|
||||
} else {
|
||||
pthread_mutex_unlock(&cachep->lock);
|
||||
node = malloc(cachep->size);
|
||||
if (cachep->align)
|
||||
posix_memalign(&p, cachep->align, cachep->size);
|
||||
else
|
||||
p = malloc(cachep->size);
|
||||
if (cachep->ctor)
|
||||
cachep->ctor(node);
|
||||
cachep->ctor(p);
|
||||
else if (gfp & __GFP_ZERO)
|
||||
memset(p, 0, cachep->size);
|
||||
}
|
||||
|
||||
uatomic_inc(&nr_allocated);
|
||||
if (kmalloc_verbose)
|
||||
printf("Allocating %p from slab\n", node);
|
||||
return node;
|
||||
printf("Allocating %p from slab\n", p);
|
||||
return p;
|
||||
}
|
||||
|
||||
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
|
||||
@ -59,7 +66,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
|
||||
if (kmalloc_verbose)
|
||||
printf("Freeing %p to slab\n", objp);
|
||||
pthread_mutex_lock(&cachep->lock);
|
||||
if (cachep->nr_objs > 10) {
|
||||
if (cachep->nr_objs > 10 || cachep->align) {
|
||||
memset(objp, POISON_FREE, cachep->size);
|
||||
free(objp);
|
||||
} else {
|
||||
@ -98,13 +105,14 @@ void kfree(void *p)
|
||||
}
|
||||
|
||||
struct kmem_cache *
|
||||
kmem_cache_create(const char *name, size_t size, size_t offset,
|
||||
unsigned long flags, void (*ctor)(void *))
|
||||
kmem_cache_create(const char *name, unsigned int size, unsigned int align,
|
||||
unsigned int flags, void (*ctor)(void *))
|
||||
{
|
||||
struct kmem_cache *ret = malloc(sizeof(*ret));
|
||||
|
||||
pthread_mutex_init(&ret->lock, NULL);
|
||||
ret->size = size;
|
||||
ret->align = align;
|
||||
ret->nr_objs = 0;
|
||||
ret->objs = NULL;
|
||||
ret->ctor = ctor;
|
||||
|
@ -20,8 +20,8 @@ static inline void *kzalloc(size_t size, gfp_t gfp)
|
||||
void *kmem_cache_alloc(struct kmem_cache *cachep, int flags);
|
||||
void kmem_cache_free(struct kmem_cache *cachep, void *objp);
|
||||
|
||||
struct kmem_cache *
|
||||
kmem_cache_create(const char *name, size_t size, size_t offset,
|
||||
unsigned long flags, void (*ctor)(void *));
|
||||
struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
|
||||
unsigned int align, unsigned int flags,
|
||||
void (*ctor)(void *));
|
||||
|
||||
#endif /* SLAB_H */
|
||||
|
@ -311,6 +311,7 @@ int main(int argc, char **argv)
|
||||
regression4_test();
|
||||
iteration_test(0, 10 + 90 * long_run);
|
||||
iteration_test(7, 10 + 90 * long_run);
|
||||
iteration_test2(10 + 90 * long_run);
|
||||
single_thread_tests(long_run);
|
||||
|
||||
/* Free any remaining preallocated nodes */
|
||||
|
@ -34,6 +34,7 @@ void xarray_tests(void);
|
||||
void tag_check(void);
|
||||
void multiorder_checks(void);
|
||||
void iteration_test(unsigned order, unsigned duration);
|
||||
void iteration_test2(unsigned duration);
|
||||
void benchmark(void);
|
||||
void idr_checks(void);
|
||||
void ida_tests(void);
|
||||
|
Loading…
Reference in New Issue
Block a user