mirror of
https://github.com/torvalds/linux.git
synced 2024-11-18 10:01:43 +00:00
RDMA/rxe: Let pools support both keys and indices
Allow both indices and keys to exist for objects in pools. Previously you were limited to one or the other. This is required for later implementing rxe memory windows. Link: https://lore.kernel.org/r/20201216231550.27224-3-rpearson@hpe.com Signed-off-by: Bob Pearson <rpearson@hpe.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
1d11c1b7f9
commit
c06ee3a014
@ -94,18 +94,18 @@ static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
|
||||
goto out;
|
||||
}
|
||||
|
||||
pool->max_index = max;
|
||||
pool->min_index = min;
|
||||
pool->index.max_index = max;
|
||||
pool->index.min_index = min;
|
||||
|
||||
size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
|
||||
pool->table = kmalloc(size, GFP_KERNEL);
|
||||
if (!pool->table) {
|
||||
pool->index.table = kmalloc(size, GFP_KERNEL);
|
||||
if (!pool->index.table) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pool->table_size = size;
|
||||
bitmap_zero(pool->table, max - min + 1);
|
||||
pool->index.table_size = size;
|
||||
bitmap_zero(pool->index.table, max - min + 1);
|
||||
|
||||
out:
|
||||
return err;
|
||||
@ -127,7 +127,8 @@ int rxe_pool_init(
|
||||
pool->max_elem = max_elem;
|
||||
pool->elem_size = ALIGN(size, RXE_POOL_ALIGN);
|
||||
pool->flags = rxe_type_info[type].flags;
|
||||
pool->tree = RB_ROOT;
|
||||
pool->index.tree = RB_ROOT;
|
||||
pool->key.tree = RB_ROOT;
|
||||
pool->cleanup = rxe_type_info[type].cleanup;
|
||||
|
||||
atomic_set(&pool->num_elem, 0);
|
||||
@ -145,8 +146,8 @@ int rxe_pool_init(
|
||||
}
|
||||
|
||||
if (rxe_type_info[type].flags & RXE_POOL_KEY) {
|
||||
pool->key_offset = rxe_type_info[type].key_offset;
|
||||
pool->key_size = rxe_type_info[type].key_size;
|
||||
pool->key.key_offset = rxe_type_info[type].key_offset;
|
||||
pool->key.key_size = rxe_type_info[type].key_size;
|
||||
}
|
||||
|
||||
pool->state = RXE_POOL_STATE_VALID;
|
||||
@ -160,7 +161,7 @@ static void rxe_pool_release(struct kref *kref)
|
||||
struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt);
|
||||
|
||||
pool->state = RXE_POOL_STATE_INVALID;
|
||||
kfree(pool->table);
|
||||
kfree(pool->index.table);
|
||||
}
|
||||
|
||||
static void rxe_pool_put(struct rxe_pool *pool)
|
||||
@ -185,27 +186,27 @@ void rxe_pool_cleanup(struct rxe_pool *pool)
|
||||
static u32 alloc_index(struct rxe_pool *pool)
|
||||
{
|
||||
u32 index;
|
||||
u32 range = pool->max_index - pool->min_index + 1;
|
||||
u32 range = pool->index.max_index - pool->index.min_index + 1;
|
||||
|
||||
index = find_next_zero_bit(pool->table, range, pool->last);
|
||||
index = find_next_zero_bit(pool->index.table, range, pool->index.last);
|
||||
if (index >= range)
|
||||
index = find_first_zero_bit(pool->table, range);
|
||||
index = find_first_zero_bit(pool->index.table, range);
|
||||
|
||||
WARN_ON_ONCE(index >= range);
|
||||
set_bit(index, pool->table);
|
||||
pool->last = index;
|
||||
return index + pool->min_index;
|
||||
set_bit(index, pool->index.table);
|
||||
pool->index.last = index;
|
||||
return index + pool->index.min_index;
|
||||
}
|
||||
|
||||
static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
|
||||
{
|
||||
struct rb_node **link = &pool->tree.rb_node;
|
||||
struct rb_node **link = &pool->index.tree.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct rxe_pool_entry *elem;
|
||||
|
||||
while (*link) {
|
||||
parent = *link;
|
||||
elem = rb_entry(parent, struct rxe_pool_entry, node);
|
||||
elem = rb_entry(parent, struct rxe_pool_entry, index_node);
|
||||
|
||||
if (elem->index == new->index) {
|
||||
pr_warn("element already exists!\n");
|
||||
@ -218,25 +219,25 @@ static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
|
||||
link = &(*link)->rb_right;
|
||||
}
|
||||
|
||||
rb_link_node(&new->node, parent, link);
|
||||
rb_insert_color(&new->node, &pool->tree);
|
||||
rb_link_node(&new->index_node, parent, link);
|
||||
rb_insert_color(&new->index_node, &pool->index.tree);
|
||||
out:
|
||||
return;
|
||||
}
|
||||
|
||||
static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
|
||||
{
|
||||
struct rb_node **link = &pool->tree.rb_node;
|
||||
struct rb_node **link = &pool->key.tree.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct rxe_pool_entry *elem;
|
||||
int cmp;
|
||||
|
||||
while (*link) {
|
||||
parent = *link;
|
||||
elem = rb_entry(parent, struct rxe_pool_entry, node);
|
||||
elem = rb_entry(parent, struct rxe_pool_entry, key_node);
|
||||
|
||||
cmp = memcmp((u8 *)elem + pool->key_offset,
|
||||
(u8 *)new + pool->key_offset, pool->key_size);
|
||||
cmp = memcmp((u8 *)elem + pool->key.key_offset,
|
||||
(u8 *)new + pool->key.key_offset, pool->key.key_size);
|
||||
|
||||
if (cmp == 0) {
|
||||
pr_warn("key already exists!\n");
|
||||
@ -249,8 +250,8 @@ static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
|
||||
link = &(*link)->rb_right;
|
||||
}
|
||||
|
||||
rb_link_node(&new->node, parent, link);
|
||||
rb_insert_color(&new->node, &pool->tree);
|
||||
rb_link_node(&new->key_node, parent, link);
|
||||
rb_insert_color(&new->key_node, &pool->key.tree);
|
||||
out:
|
||||
return;
|
||||
}
|
||||
@ -262,7 +263,7 @@ void rxe_add_key(void *arg, void *key)
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&pool->pool_lock, flags);
|
||||
memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
|
||||
memcpy((u8 *)elem + pool->key.key_offset, key, pool->key.key_size);
|
||||
insert_key(pool, elem);
|
||||
write_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
}
|
||||
@ -274,7 +275,7 @@ void rxe_drop_key(void *arg)
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&pool->pool_lock, flags);
|
||||
rb_erase(&elem->node, &pool->tree);
|
||||
rb_erase(&elem->key_node, &pool->key.tree);
|
||||
write_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
}
|
||||
|
||||
@ -297,8 +298,8 @@ void rxe_drop_index(void *arg)
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&pool->pool_lock, flags);
|
||||
clear_bit(elem->index - pool->min_index, pool->table);
|
||||
rb_erase(&elem->node, &pool->tree);
|
||||
clear_bit(elem->index - pool->index.min_index, pool->index.table);
|
||||
rb_erase(&elem->index_node, &pool->index.tree);
|
||||
write_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
}
|
||||
|
||||
@ -402,10 +403,10 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
|
||||
if (pool->state != RXE_POOL_STATE_VALID)
|
||||
goto out;
|
||||
|
||||
node = pool->tree.rb_node;
|
||||
node = pool->index.tree.rb_node;
|
||||
|
||||
while (node) {
|
||||
elem = rb_entry(node, struct rxe_pool_entry, node);
|
||||
elem = rb_entry(node, struct rxe_pool_entry, index_node);
|
||||
|
||||
if (elem->index > index)
|
||||
node = node->rb_left;
|
||||
@ -434,13 +435,13 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
|
||||
if (pool->state != RXE_POOL_STATE_VALID)
|
||||
goto out;
|
||||
|
||||
node = pool->tree.rb_node;
|
||||
node = pool->key.tree.rb_node;
|
||||
|
||||
while (node) {
|
||||
elem = rb_entry(node, struct rxe_pool_entry, node);
|
||||
elem = rb_entry(node, struct rxe_pool_entry, key_node);
|
||||
|
||||
cmp = memcmp((u8 *)elem + pool->key_offset,
|
||||
key, pool->key_size);
|
||||
cmp = memcmp((u8 *)elem + pool->key.key_offset,
|
||||
key, pool->key.key_size);
|
||||
|
||||
if (cmp > 0)
|
||||
node = node->rb_left;
|
||||
|
@ -56,8 +56,11 @@ struct rxe_pool_entry {
|
||||
struct kref ref_cnt;
|
||||
struct list_head list;
|
||||
|
||||
/* only used if indexed or keyed */
|
||||
struct rb_node node;
|
||||
/* only used if keyed */
|
||||
struct rb_node key_node;
|
||||
|
||||
/* only used if indexed */
|
||||
struct rb_node index_node;
|
||||
u32 index;
|
||||
};
|
||||
|
||||
@ -74,15 +77,22 @@ struct rxe_pool {
|
||||
unsigned int max_elem;
|
||||
atomic_t num_elem;
|
||||
|
||||
/* only used if indexed or keyed */
|
||||
struct rb_root tree;
|
||||
unsigned long *table;
|
||||
size_t table_size;
|
||||
u32 max_index;
|
||||
u32 min_index;
|
||||
u32 last;
|
||||
size_t key_offset;
|
||||
size_t key_size;
|
||||
/* only used if indexed */
|
||||
struct {
|
||||
struct rb_root tree;
|
||||
unsigned long *table;
|
||||
size_t table_size;
|
||||
u32 last;
|
||||
u32 max_index;
|
||||
u32 min_index;
|
||||
} index;
|
||||
|
||||
/* only used if keyed */
|
||||
struct {
|
||||
struct rb_root tree;
|
||||
size_t key_offset;
|
||||
size_t key_size;
|
||||
} key;
|
||||
};
|
||||
|
||||
/* initialize a pool of objects with given limit on
|
||||
|
Loading…
Reference in New Issue
Block a user