io_uring: change the poll type to be 32-bits

poll events should be 32-bits to cover EPOLLEXCLUSIVE.

Explicit word-swap the poll32_events for big endian to make sure the ABI
is not changed.  We call this feature IORING_FEAT_POLL_32BITS,
applications who want to use EPOLLEXCLUSIVE should check the feature bit
first.

Signed-off-by: Jiufei Xue <jiufei.xue@linux.alibaba.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jiufei Xue 2020-06-17 17:53:55 +08:00 committed by Jens Axboe
parent 48778464bb
commit 5769a351b8
3 changed files with 17 additions and 6 deletions

View File

@ -4589,7 +4589,7 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_poll_iocb *poll = &req->poll; struct io_poll_iocb *poll = &req->poll;
u16 events; u32 events;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL; return -EINVAL;
@ -4598,7 +4598,10 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
if (!poll->file) if (!poll->file)
return -EBADF; return -EBADF;
events = READ_ONCE(sqe->poll_events); events = READ_ONCE(sqe->poll32_events);
#ifdef __BIG_ENDIAN
events = swahw32(events);
#endif
poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP; poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
io_get_req_task(req); io_get_req_task(req);
@ -7928,7 +7931,8 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP | p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS | IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL; IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
IORING_FEAT_POLL_32BITS;
if (copy_to_user(params, p, sizeof(*p))) { if (copy_to_user(params, p, sizeof(*p))) {
ret = -EFAULT; ret = -EFAULT;
@ -8217,7 +8221,8 @@ static int __init io_uring_init(void)
BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags); BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags); BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags); BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
BUILD_BUG_SQE_ELEM(28, __u16, poll_events); BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags); BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
BUILD_BUG_SQE_ELEM(28, __u32, msg_flags); BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags); BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);

View File

@ -31,7 +31,8 @@ struct io_uring_sqe {
union { union {
__kernel_rwf_t rw_flags; __kernel_rwf_t rw_flags;
__u32 fsync_flags; __u32 fsync_flags;
__u16 poll_events; __u16 poll_events; /* compatibility */
__u32 poll32_events; /* word-reversed for BE */
__u32 sync_range_flags; __u32 sync_range_flags;
__u32 msg_flags; __u32 msg_flags;
__u32 timeout_flags; __u32 timeout_flags;
@ -248,6 +249,7 @@ struct io_uring_params {
#define IORING_FEAT_RW_CUR_POS (1U << 3) #define IORING_FEAT_RW_CUR_POS (1U << 3)
#define IORING_FEAT_CUR_PERSONALITY (1U << 4) #define IORING_FEAT_CUR_PERSONALITY (1U << 4)
#define IORING_FEAT_FAST_POLL (1U << 5) #define IORING_FEAT_FAST_POLL (1U << 5)
#define IORING_FEAT_POLL_32BITS (1U << 6)
/* /*
* io_uring_register(2) opcodes and arguments * io_uring_register(2) opcodes and arguments

View File

@ -10,6 +10,7 @@ extern "C" {
#include <string.h> #include <string.h>
#include "../../include/uapi/linux/io_uring.h" #include "../../include/uapi/linux/io_uring.h"
#include <inttypes.h> #include <inttypes.h>
#include <linux/swab.h>
#include "barrier.h" #include "barrier.h"
/* /*
@ -145,11 +146,14 @@ static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
} }
static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd, static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
short poll_mask) unsigned poll_mask)
{ {
memset(sqe, 0, sizeof(*sqe)); memset(sqe, 0, sizeof(*sqe));
sqe->opcode = IORING_OP_POLL_ADD; sqe->opcode = IORING_OP_POLL_ADD;
sqe->fd = fd; sqe->fd = fd;
#if __BYTE_ORDER == __BIG_ENDIAN
poll_mask = __swahw32(poll_mask);
#endif
sqe->poll_events = poll_mask; sqe->poll_events = poll_mask;
} }