mirror of
https://github.com/torvalds/linux.git
synced 2024-11-15 08:31:55 +00:00
f4a1254f2a
Only the current owner of a request is allowed to write into req->flags.
Hence, the cancellation path should never touch it. Add a new field
instead of the flag, move it into the 3rd cache line because it should
always be initialised. poll_refs can move further as polling is an
involved process anyway.
It's a minimal patch, in the future we can and should find a better
place for it and remove now unused REQ_F_CANCEL_SEQ.
Fixes: 521223d7c2
("io_uring/cancel: don't default to setting req->work.cancel_seq")
Cc: stable@vger.kernel.org
Reported-by: Li Shi <sl1589472800@gmail.com>
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/6827b129f8f0ad76fa9d1f0a773de938b240ffab.1718323430.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
39 lines
962 B
C
39 lines
962 B
C
// SPDX-License-Identifier: GPL-2.0
|
|
#ifndef IORING_CANCEL_H
|
|
#define IORING_CANCEL_H
|
|
|
|
#include <linux/io_uring_types.h>
|
|
|
|
struct io_cancel_data {
|
|
struct io_ring_ctx *ctx;
|
|
union {
|
|
u64 data;
|
|
struct file *file;
|
|
};
|
|
u8 opcode;
|
|
u32 flags;
|
|
int seq;
|
|
};
|
|
|
|
int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
|
|
unsigned int issue_flags);
|
|
void init_hash_table(struct io_hash_table *table, unsigned size);
|
|
|
|
int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg);
|
|
bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd);
|
|
|
|
static inline bool io_cancel_match_sequence(struct io_kiocb *req, int sequence)
|
|
{
|
|
if (req->cancel_seq_set && sequence == req->work.cancel_seq)
|
|
return true;
|
|
|
|
req->cancel_seq_set = true;
|
|
req->work.cancel_seq = sequence;
|
|
return false;
|
|
}
|
|
|
|
#endif
|