mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
io_uring/rsrc: merge nodes and io_rsrc_put
struct io_rsrc_node carries a number of resources represented by struct io_rsrc_put. That was handy before for sync overhead ammortisation, but all complexity is gone and nodes are simple and lightweight. Let's allocate a separate node for each resource. Nodes and io_rsrc_put and not much different in size, and former are cached, so node allocation should work better. That also removes some overhead for nested iteration in io_rsrc_node_ref_zero() / __io_rsrc_put_work(). Another reason for the patch is that it greatly reduces complexity by moving io_rsrc_node_switch[_start]() inside io_queue_rsrc_removal(), so users don't have to care about it. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/c7d3a45b30cc14cd93700a710dd112edc703db98.1681822823.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
63fea89027
commit
c376644fb9
@ -80,10 +80,6 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
|
||||
if (file_slot->file_ptr) {
|
||||
struct file *old_file;
|
||||
|
||||
ret = io_rsrc_node_switch_start(ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
old_file = (struct file *)(file_slot->file_ptr & FFS_MASK);
|
||||
ret = io_queue_rsrc_removal(ctx->file_data, slot_index, old_file);
|
||||
if (ret)
|
||||
@ -91,7 +87,6 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
|
||||
|
||||
file_slot->file_ptr = 0;
|
||||
io_file_bitmap_clear(&ctx->file_table, slot_index);
|
||||
io_rsrc_node_switch(ctx, ctx->file_data);
|
||||
}
|
||||
|
||||
ret = io_scm_file_account(ctx, file);
|
||||
@ -152,9 +147,6 @@ int io_fixed_fd_remove(struct io_ring_ctx *ctx, unsigned int offset)
|
||||
return -ENXIO;
|
||||
if (offset >= ctx->nr_user_files)
|
||||
return -EINVAL;
|
||||
ret = io_rsrc_node_switch_start(ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
offset = array_index_nospec(offset, ctx->nr_user_files);
|
||||
file_slot = io_fixed_file_slot(&ctx->file_table, offset);
|
||||
@ -168,7 +160,6 @@ int io_fixed_fd_remove(struct io_ring_ctx *ctx, unsigned int offset)
|
||||
|
||||
file_slot->file_ptr = 0;
|
||||
io_file_bitmap_clear(&ctx->file_table, offset);
|
||||
io_rsrc_node_switch(ctx, ctx->file_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -153,17 +153,10 @@ static void io_rsrc_put_work_one(struct io_rsrc_data *rsrc_data,
|
||||
static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
|
||||
{
|
||||
struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
|
||||
struct io_rsrc_put *prsrc, *tmp;
|
||||
|
||||
if (ref_node->inline_items)
|
||||
if (likely(ref_node->inline_items))
|
||||
io_rsrc_put_work_one(rsrc_data, &ref_node->item);
|
||||
|
||||
list_for_each_entry_safe(prsrc, tmp, &ref_node->item_list, list) {
|
||||
list_del(&prsrc->list);
|
||||
io_rsrc_put_work_one(rsrc_data, prsrc);
|
||||
kfree(prsrc);
|
||||
}
|
||||
|
||||
io_rsrc_node_destroy(rsrc_data->ctx, ref_node);
|
||||
}
|
||||
|
||||
@ -206,53 +199,29 @@ struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
|
||||
}
|
||||
|
||||
ref_node->rsrc_data = NULL;
|
||||
ref_node->refs = 1;
|
||||
INIT_LIST_HEAD(&ref_node->node);
|
||||
INIT_LIST_HEAD(&ref_node->item_list);
|
||||
ref_node->inline_items = 0;
|
||||
ref_node->refs = 1;
|
||||
return ref_node;
|
||||
}
|
||||
|
||||
void io_rsrc_node_switch(struct io_ring_ctx *ctx,
|
||||
struct io_rsrc_data *data_to_kill)
|
||||
__must_hold(&ctx->uring_lock)
|
||||
{
|
||||
struct io_rsrc_node *node = ctx->rsrc_node;
|
||||
struct io_rsrc_node *backup = io_rsrc_node_alloc(ctx);
|
||||
|
||||
if (WARN_ON_ONCE(!backup))
|
||||
return;
|
||||
|
||||
node->rsrc_data = data_to_kill;
|
||||
list_add_tail(&node->node, &ctx->rsrc_ref_list);
|
||||
/* put master ref */
|
||||
io_put_rsrc_node(ctx, node);
|
||||
ctx->rsrc_node = backup;
|
||||
}
|
||||
|
||||
int __io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_rsrc_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
|
||||
struct io_ring_ctx *ctx)
|
||||
{
|
||||
struct io_rsrc_node *backup;
|
||||
DEFINE_WAIT(we);
|
||||
int ret;
|
||||
|
||||
/* As we may drop ->uring_lock, other task may have started quiesce */
|
||||
/* As We may drop ->uring_lock, other task may have started quiesce */
|
||||
if (data->quiesce)
|
||||
return -ENXIO;
|
||||
ret = io_rsrc_node_switch_start(ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
io_rsrc_node_switch(ctx, data);
|
||||
|
||||
backup = io_rsrc_node_alloc(ctx);
|
||||
if (!backup)
|
||||
return -ENOMEM;
|
||||
ctx->rsrc_node->rsrc_data = data;
|
||||
list_add_tail(&ctx->rsrc_node->node, &ctx->rsrc_ref_list);
|
||||
io_put_rsrc_node(ctx, ctx->rsrc_node);
|
||||
ctx->rsrc_node = backup;
|
||||
|
||||
if (list_empty(&ctx->rsrc_ref_list))
|
||||
return 0;
|
||||
@ -382,7 +351,6 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
||||
struct file *file;
|
||||
int fd, i, err = 0;
|
||||
unsigned int done;
|
||||
bool needs_switch = false;
|
||||
|
||||
if (!ctx->file_data)
|
||||
return -ENXIO;
|
||||
@ -414,7 +382,6 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
||||
break;
|
||||
file_slot->file_ptr = 0;
|
||||
io_file_bitmap_clear(&ctx->file_table, i);
|
||||
needs_switch = true;
|
||||
}
|
||||
if (fd != -1) {
|
||||
file = fget(fd);
|
||||
@ -445,9 +412,6 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
||||
io_file_bitmap_set(&ctx->file_table, i);
|
||||
}
|
||||
}
|
||||
|
||||
if (needs_switch)
|
||||
io_rsrc_node_switch(ctx, data);
|
||||
return done ? done : err;
|
||||
}
|
||||
|
||||
@ -458,7 +422,6 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
|
||||
u64 __user *tags = u64_to_user_ptr(up->tags);
|
||||
struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
|
||||
struct page *last_hpage = NULL;
|
||||
bool needs_switch = false;
|
||||
__u32 done;
|
||||
int i, err;
|
||||
|
||||
@ -498,15 +461,11 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
|
||||
break;
|
||||
}
|
||||
ctx->user_bufs[i] = ctx->dummy_ubuf;
|
||||
needs_switch = true;
|
||||
}
|
||||
|
||||
ctx->user_bufs[i] = imu;
|
||||
*io_get_tag_slot(ctx->buf_data, i) = tag;
|
||||
}
|
||||
|
||||
if (needs_switch)
|
||||
io_rsrc_node_switch(ctx, ctx->buf_data);
|
||||
return done ? done : err;
|
||||
}
|
||||
|
||||
@ -515,15 +474,11 @@ static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
|
||||
unsigned nr_args)
|
||||
{
|
||||
__u32 tmp;
|
||||
int err;
|
||||
|
||||
lockdep_assert_held(&ctx->uring_lock);
|
||||
|
||||
if (check_add_overflow(up->offset, nr_args, &tmp))
|
||||
return -EOVERFLOW;
|
||||
err = io_rsrc_node_switch_start(ctx);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
switch (type) {
|
||||
case IORING_RSRC_FILE:
|
||||
@ -685,21 +640,21 @@ int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc)
|
||||
struct io_ring_ctx *ctx = data->ctx;
|
||||
struct io_rsrc_node *node = ctx->rsrc_node;
|
||||
u64 *tag_slot = io_get_tag_slot(data, idx);
|
||||
struct io_rsrc_put *prsrc;
|
||||
|
||||
if (!node->inline_items) {
|
||||
prsrc = &node->item;
|
||||
node->inline_items++;
|
||||
} else {
|
||||
prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
|
||||
if (!prsrc)
|
||||
return -ENOMEM;
|
||||
list_add(&prsrc->list, &node->item_list);
|
||||
ctx->rsrc_node = io_rsrc_node_alloc(ctx);
|
||||
if (unlikely(!ctx->rsrc_node)) {
|
||||
ctx->rsrc_node = node;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
prsrc->tag = *tag_slot;
|
||||
node->item.rsrc = rsrc;
|
||||
node->item.tag = *tag_slot;
|
||||
node->inline_items = 1;
|
||||
*tag_slot = 0;
|
||||
prsrc->rsrc = rsrc;
|
||||
|
||||
node->rsrc_data = data;
|
||||
list_add_tail(&node->node, &ctx->rsrc_ref_list);
|
||||
io_put_rsrc_node(ctx, node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,6 @@ enum {
|
||||
};
|
||||
|
||||
struct io_rsrc_put {
|
||||
struct list_head list;
|
||||
u64 tag;
|
||||
union {
|
||||
void *rsrc;
|
||||
@ -43,17 +42,10 @@ struct io_rsrc_node {
|
||||
struct io_cache_entry cache;
|
||||
struct io_rsrc_data *rsrc_data;
|
||||
};
|
||||
struct list_head node;
|
||||
int refs;
|
||||
|
||||
/*
|
||||
* Keeps a list of struct io_rsrc_put to be completed. Each entry
|
||||
* represents one rsrc (e.g. file or buffer), but all of them should've
|
||||
* came from the same table and so are of the same type.
|
||||
*/
|
||||
struct list_head item_list;
|
||||
struct io_rsrc_put item;
|
||||
int inline_items;
|
||||
struct list_head node;
|
||||
struct io_rsrc_put item;
|
||||
};
|
||||
|
||||
struct io_mapped_ubuf {
|
||||
@ -68,11 +60,8 @@ void io_rsrc_put_tw(struct callback_head *cb);
|
||||
void io_rsrc_node_ref_zero(struct io_rsrc_node *node);
|
||||
void io_rsrc_put_work(struct work_struct *work);
|
||||
void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *ref_node);
|
||||
int __io_rsrc_node_switch_start(struct io_ring_ctx *ctx);
|
||||
struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx);
|
||||
int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc);
|
||||
void io_rsrc_node_switch(struct io_ring_ctx *ctx,
|
||||
struct io_rsrc_data *data_to_kill);
|
||||
|
||||
int io_import_fixed(int ddir, struct iov_iter *iter,
|
||||
struct io_mapped_ubuf *imu,
|
||||
@ -109,13 +98,6 @@ static inline int io_scm_file_account(struct io_ring_ctx *ctx,
|
||||
return __io_scm_file_account(ctx, file);
|
||||
}
|
||||
|
||||
static inline int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
|
||||
{
|
||||
if (unlikely(io_alloc_cache_empty(&ctx->rsrc_node_cache)))
|
||||
return __io_rsrc_node_switch_start(ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
|
||||
unsigned nr_args);
|
||||
int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
|
||||
|
Loading…
Reference in New Issue
Block a user