io_uring/net: combine fail handlers

Merge io_send_zc_fail() into io_sendrecv_fail(), saves a few lines of
code and some headache for following patch.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/e0eba1d577413aef5602cd45f588b9230207082d.1663668091.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2022-09-21 12:17:53 +01:00 committed by Jens Axboe
parent b0e9b5517e
commit c4c0009e0b
3 changed files with 17 additions and 17 deletions

View File

@ -192,6 +192,7 @@ int io_send_prep_async(struct io_kiocb *req)
io = io_msg_alloc_async_prep(req); io = io_msg_alloc_async_prep(req);
if (!io) if (!io)
return -ENOMEM; return -ENOMEM;
io->free_iov = NULL;
ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr); ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
return ret; return ret;
} }
@ -208,6 +209,7 @@ static int io_setup_async_addr(struct io_kiocb *req,
io = io_msg_alloc_async(req, issue_flags); io = io_msg_alloc_async(req, issue_flags);
if (!io) if (!io)
return -ENOMEM; return -ENOMEM;
io->free_iov = NULL;
memcpy(&io->addr, addr_storage, sizeof(io->addr)); memcpy(&io->addr, addr_storage, sizeof(io->addr));
return -EAGAIN; return -EAGAIN;
} }
@ -1119,26 +1121,25 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
void io_sendrecv_fail(struct io_kiocb *req) void io_sendrecv_fail(struct io_kiocb *req)
{ {
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *io;
int res = req->cqe.res; int res = req->cqe.res;
if (req->flags & REQ_F_PARTIAL_IO) if (req->flags & REQ_F_PARTIAL_IO)
res = sr->done_io; res = sr->done_io;
io_req_set_res(req, res, req->cqe.flags); if ((req->flags & REQ_F_NEED_CLEANUP) &&
} req->opcode == IORING_OP_SEND_ZC) {
/* preserve notification for partial I/O */
void io_send_zc_fail(struct io_kiocb *req) if (res < 0)
{ sr->notif->flags |= REQ_F_CQE_SKIP;
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg); io_notif_flush(sr->notif);
int res = req->cqe.res; sr->notif = NULL;
if (req->flags & REQ_F_PARTIAL_IO) {
if (req->flags & REQ_F_NEED_CLEANUP) {
io_notif_flush(sr->notif);
sr->notif = NULL;
req->flags &= ~REQ_F_NEED_CLEANUP;
}
res = sr->done_io;
} }
if (req_has_async_data(req)) {
io = req->async_data;
kfree(io->free_iov);
io->free_iov = NULL;
}
req->flags &= ~REQ_F_NEED_CLEANUP;
io_req_set_res(req, res, req->cqe.flags); io_req_set_res(req, res, req->cqe.flags);
} }

View File

@ -59,7 +59,6 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags);
int io_send_zc(struct io_kiocb *req, unsigned int issue_flags); int io_send_zc(struct io_kiocb *req, unsigned int issue_flags);
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
void io_send_zc_cleanup(struct io_kiocb *req); void io_send_zc_cleanup(struct io_kiocb *req);
void io_send_zc_fail(struct io_kiocb *req);
void io_netmsg_cache_free(struct io_cache_entry *entry); void io_netmsg_cache_free(struct io_cache_entry *entry);
#else #else

View File

@ -500,7 +500,7 @@ const struct io_op_def io_op_defs[] = {
.issue = io_send_zc, .issue = io_send_zc,
.prep_async = io_send_prep_async, .prep_async = io_send_prep_async,
.cleanup = io_send_zc_cleanup, .cleanup = io_send_zc_cleanup,
.fail = io_send_zc_fail, .fail = io_sendrecv_fail,
#else #else
.prep = io_eopnotsupp_prep, .prep = io_eopnotsupp_prep,
#endif #endif