mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
io_uring: enable LOOKUP_CACHED path resolution for filename lookups
Instead of being pessimistic and assume that path lookup will block, use LOOKUP_CACHED to attempt just a cached lookup. This ensures that the fast path is always done inline, and we only punt to async context if IO is needed to satisfy the lookup. For forced nonblock open attempts, mark the file O_NONBLOCK over the actual ->open() call as well. We can safely clear this again before doing fd_install(), so it'll never be user visible that we fiddled with it. This greatly improves the performance of file open where the dentry is already cached: ached 5.10-git 5.10-git+LOOKUP_CACHED Speedup --------------------------------------------------------------- 33% 1,014,975 900,474 1.1x 89% 545,466 292,937 1.9x 100% 435,636 151,475 2.9x The more cache hot we are, the faster the inline LOOKUP_CACHED optimization helps. This is unsurprising and expected, as a thread offload becomes a more dominant part of the total overhead. If we look at io_uring tracing, doing an IORING_OP_OPENAT on a file that isn't in the dentry cache will yield: 275.550481: io_uring_create: ring 00000000ddda6278, fd 3 sq size 8, cq size 16, flags 0 275.550491: io_uring_submit_sqe: ring 00000000ddda6278, op 18, data 0x0, non block 1, sq_thread 0 275.550498: io_uring_queue_async_work: ring 00000000ddda6278, request 00000000c0267d17, flags 69760, normal queue, work 000000003d683991 275.550502: io_uring_cqring_wait: ring 00000000ddda6278, min_events 1 275.550556: io_uring_complete: ring 00000000ddda6278, user_data 0x0, result 4 which shows a failed nonblock lookup, then punt to worker, and then we complete with fd == 4. This takes 65 usec in total. Re-running the same test case again: 281.253956: io_uring_create: ring 0000000008207252, fd 3 sq size 8, cq size 16, flags 0 281.253967: io_uring_submit_sqe: ring 0000000008207252, op 18, data 0x0, non block 1, sq_thread 0 281.253973: io_uring_complete: ring 0000000008207252, user_data 0x0, result 4 shows the same request completing inline, also returning fd == 4. This takes 6 usec. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
b2d86c7cec
commit
3a81fd0204
@ -489,7 +489,6 @@ struct io_sr_msg {
|
||||
struct io_open {
|
||||
struct file *file;
|
||||
int dfd;
|
||||
bool ignore_nonblock;
|
||||
struct filename *filename;
|
||||
struct open_how how;
|
||||
unsigned long nofile;
|
||||
@ -4054,7 +4053,6 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
|
||||
return ret;
|
||||
}
|
||||
req->open.nofile = rlimit(RLIMIT_NOFILE);
|
||||
req->open.ignore_nonblock = false;
|
||||
req->flags |= REQ_F_NEED_CLEANUP;
|
||||
return 0;
|
||||
}
|
||||
@ -4096,39 +4094,48 @@ static int io_openat2(struct io_kiocb *req, bool force_nonblock)
|
||||
{
|
||||
struct open_flags op;
|
||||
struct file *file;
|
||||
bool nonblock_set;
|
||||
bool resolve_nonblock;
|
||||
int ret;
|
||||
|
||||
if (force_nonblock && !req->open.ignore_nonblock)
|
||||
return -EAGAIN;
|
||||
|
||||
ret = build_open_flags(&req->open.how, &op);
|
||||
if (ret)
|
||||
goto err;
|
||||
nonblock_set = op.open_flag & O_NONBLOCK;
|
||||
resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED;
|
||||
if (force_nonblock) {
|
||||
/*
|
||||
* Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
|
||||
* it'll always -EAGAIN
|
||||
*/
|
||||
if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE))
|
||||
return -EAGAIN;
|
||||
op.lookup_flags |= LOOKUP_CACHED;
|
||||
op.open_flag |= O_NONBLOCK;
|
||||
}
|
||||
|
||||
ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
file = do_filp_open(req->open.dfd, req->open.filename, &op);
|
||||
/* only retry if RESOLVE_CACHED wasn't already set by application */
|
||||
if ((!resolve_nonblock && force_nonblock) && file == ERR_PTR(-EAGAIN)) {
|
||||
/*
|
||||
* We could hang on to this 'fd', but seems like marginal
|
||||
* gain for something that is now known to be a slower path.
|
||||
* So just put it, and we'll get a new one when we retry.
|
||||
*/
|
||||
put_unused_fd(ret);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (IS_ERR(file)) {
|
||||
put_unused_fd(ret);
|
||||
ret = PTR_ERR(file);
|
||||
/*
|
||||
* A work-around to ensure that /proc/self works that way
|
||||
* that it should - if we get -EOPNOTSUPP back, then assume
|
||||
* that proc_self_get_link() failed us because we're in async
|
||||
* context. We should be safe to retry this from the task
|
||||
* itself with force_nonblock == false set, as it should not
|
||||
* block on lookup. Would be nice to know this upfront and
|
||||
* avoid the async dance, but doesn't seem feasible.
|
||||
*/
|
||||
if (ret == -EOPNOTSUPP && io_wq_current_is_worker()) {
|
||||
req->open.ignore_nonblock = true;
|
||||
refcount_inc(&req->refs);
|
||||
io_req_task_queue(req);
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
if (force_nonblock && !nonblock_set)
|
||||
file->f_flags &= ~O_NONBLOCK;
|
||||
fsnotify_open(file);
|
||||
fd_install(ret, file);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user