vfs: do bulk POLL* -> EPOLL* replacement

This is the mindless scripted replacement of kernel use of POLL*
variables as described by Al, done by this script:

    for V in IN OUT PRI ERR RDNORM RDBAND WRNORM WRBAND HUP RDHUP NVAL MSG; do
        L=`git grep -l -w POLL$V | grep -v '^t' | grep -v /um/ | grep -v '^sa' | grep -v '/poll.h$'|grep -v '^D'`
        for f in $L; do sed -i "-es/^\([^\"]*\)\(\<POLL$V\>\)/\\1E\\2/" $f; done
    done

with de-mangling cleanups yet to come.

NOTE! On almost all architectures, the EPOLL* constants have the same
values as the POLL* constants do.  But they keyword here is "almost".
For various bad reasons they aren't the same, and epoll() doesn't
actually work quite correctly in some cases due to this on Sparc et al.

The next patch from Al will sort out the final differences, and we
should be all done.

Scripted-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Linus Torvalds
2018-02-11 14:34:03 -08:00
parent ee5daa1361
commit a9a08845e9
297 changed files with 913 additions and 913 deletions

View File

@@ -4524,7 +4524,7 @@ static __poll_t perf_poll(struct file *file, poll_table *wait)
{
struct perf_event *event = file->private_data;
struct ring_buffer *rb;
__poll_t events = POLLHUP;
__poll_t events = EPOLLHUP;
poll_wait(file, &event->waitq, wait);

View File

@@ -19,7 +19,7 @@
static void perf_output_wakeup(struct perf_output_handle *handle)
{
atomic_set(&handle->rb->poll, POLLIN);
atomic_set(&handle->rb->poll, EPOLLIN);
handle->event->pending_wakeup = 1;
irq_work_queue(&handle->event->pending);

View File

@@ -930,7 +930,7 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
__poll_t ret = 0;
if (!user)
return POLLERR|POLLNVAL;
return EPOLLERR|EPOLLNVAL;
poll_wait(file, &log_wait, wait);
@@ -938,9 +938,9 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
if (user->seq < log_next_seq) {
/* return error when data has vanished underneath us */
if (user->seq < log_first_seq)
ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI;
ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
else
ret = POLLIN|POLLRDNORM;
ret = EPOLLIN|EPOLLRDNORM;
}
logbuf_unlock_irq();

View File

@@ -924,12 +924,12 @@ static __poll_t relay_file_poll(struct file *filp, poll_table *wait)
struct rchan_buf *buf = filp->private_data;
if (buf->finalized)
return POLLERR;
return EPOLLERR;
if (filp->f_mode & FMODE_READ) {
poll_wait(filp, &buf->read_wait, wait);
if (!relay_buf_empty(buf))
mask |= POLLIN | POLLRDNORM;
mask |= EPOLLIN | EPOLLRDNORM;
}
return mask;

View File

@@ -74,7 +74,7 @@ static __poll_t posix_clock_poll(struct file *fp, poll_table *wait)
__poll_t result = 0;
if (!clk)
return POLLERR;
return EPOLLERR;
if (clk->ops.poll)
result = clk->ops.poll(clk, fp, wait);

View File

@@ -627,7 +627,7 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
* as data is added to any of the @buffer's cpu buffers. Otherwise
* it will wait for data to be added to a specific cpu buffer.
*
* Returns POLLIN | POLLRDNORM if data exists in the buffers,
* Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
* zero otherwise.
*/
__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
@@ -665,7 +665,7 @@ __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
(cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
return POLLIN | POLLRDNORM;
return EPOLLIN | EPOLLRDNORM;
return 0;
}

View File

@@ -5623,13 +5623,13 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl
/* Iterators are static, they should be filled or empty */
if (trace_buffer_iter(iter, iter->cpu_file))
return POLLIN | POLLRDNORM;
return EPOLLIN | EPOLLRDNORM;
if (tr->trace_flags & TRACE_ITER_BLOCK)
/*
* Always select as readable when in blocking mode
*/
return POLLIN | POLLRDNORM;
return EPOLLIN | EPOLLRDNORM;
else
return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
filp, poll_table);