2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* linux/fs/pipe.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1991, 1992, 1999 Linus Torvalds
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/poll.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/mount.h>
|
|
|
|
#include <linux/pipe_fs_i.h>
|
|
|
|
#include <linux/uio.h>
|
|
|
|
#include <linux/highmem.h>
|
2006-03-30 13:15:30 +00:00
|
|
|
#include <linux/pagemap.h>
|
2007-02-07 06:48:00 +00:00
|
|
|
#include <linux/audit.h>
|
2008-05-07 03:42:38 +00:00
|
|
|
#include <linux/syscalls.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/ioctls.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We use a start+len construction, which provides full use of the
|
|
|
|
* allocated memory.
|
|
|
|
* -- Florian Coosmann (FGC)
|
|
|
|
*
|
|
|
|
* Reads with count = 0 should always return 0.
|
|
|
|
* -- Julian Bradfield 1999-06-07.
|
|
|
|
*
|
|
|
|
* FIFOs and Pipes now generate SIGIO for both readers and writers.
|
|
|
|
* -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
|
|
|
|
*
|
|
|
|
* pipe_read & write cleanup
|
|
|
|
* -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
|
|
|
|
*/
|
|
|
|
|
2009-04-14 17:48:41 +00:00
|
|
|
static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
|
|
|
|
{
|
|
|
|
if (pipe->inode)
|
|
|
|
mutex_lock_nested(&pipe->inode->i_mutex, subclass);
|
|
|
|
}
|
|
|
|
|
|
|
|
void pipe_lock(struct pipe_inode_info *pipe)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* pipe_lock() nests non-pipe inode locks (for writing to a file)
|
|
|
|
*/
|
|
|
|
pipe_lock_nested(pipe, I_MUTEX_PARENT);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pipe_lock);
|
|
|
|
|
|
|
|
void pipe_unlock(struct pipe_inode_info *pipe)
|
|
|
|
{
|
|
|
|
if (pipe->inode)
|
|
|
|
mutex_unlock(&pipe->inode->i_mutex);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(pipe_unlock);
|
|
|
|
|
|
|
|
void pipe_double_lock(struct pipe_inode_info *pipe1,
|
|
|
|
struct pipe_inode_info *pipe2)
|
|
|
|
{
|
|
|
|
BUG_ON(pipe1 == pipe2);
|
|
|
|
|
|
|
|
if (pipe1 < pipe2) {
|
|
|
|
pipe_lock_nested(pipe1, I_MUTEX_PARENT);
|
|
|
|
pipe_lock_nested(pipe2, I_MUTEX_CHILD);
|
|
|
|
} else {
|
2009-07-21 08:09:23 +00:00
|
|
|
pipe_lock_nested(pipe2, I_MUTEX_PARENT);
|
|
|
|
pipe_lock_nested(pipe1, I_MUTEX_CHILD);
|
2009-04-14 17:48:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Drop the inode semaphore and wait for a pipe event, atomically */
|
2006-04-10 13:18:35 +00:00
|
|
|
void pipe_wait(struct pipe_inode_info *pipe)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
|
2005-09-10 07:26:12 +00:00
|
|
|
/*
|
|
|
|
* Pipes are system-local resources, so sleeping on them
|
|
|
|
* is considered a noninteractive wait:
|
|
|
|
*/
|
2007-10-15 15:00:13 +00:00
|
|
|
prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
|
2009-04-14 17:48:41 +00:00
|
|
|
pipe_unlock(pipe);
|
2005-04-16 22:20:36 +00:00
|
|
|
schedule();
|
2006-04-10 13:18:35 +00:00
|
|
|
finish_wait(&pipe->wait, &wait);
|
2009-04-14 17:48:41 +00:00
|
|
|
pipe_lock(pipe);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-01-14 21:20:43 +00:00
|
|
|
static int
|
2006-05-01 18:02:05 +00:00
|
|
|
pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
|
|
|
|
int atomic)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
unsigned long copy;
|
|
|
|
|
|
|
|
while (len > 0) {
|
|
|
|
while (!iov->iov_len)
|
|
|
|
iov++;
|
|
|
|
copy = min_t(unsigned long, len, iov->iov_len);
|
|
|
|
|
2006-05-01 18:02:05 +00:00
|
|
|
if (atomic) {
|
|
|
|
if (__copy_from_user_inatomic(to, iov->iov_base, copy))
|
|
|
|
return -EFAULT;
|
|
|
|
} else {
|
|
|
|
if (copy_from_user(to, iov->iov_base, copy))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
to += copy;
|
|
|
|
len -= copy;
|
|
|
|
iov->iov_base += copy;
|
|
|
|
iov->iov_len -= copy;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-01-14 21:20:43 +00:00
|
|
|
static int
|
2006-05-01 18:02:05 +00:00
|
|
|
pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
|
|
|
|
int atomic)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
unsigned long copy;
|
|
|
|
|
|
|
|
while (len > 0) {
|
|
|
|
while (!iov->iov_len)
|
|
|
|
iov++;
|
|
|
|
copy = min_t(unsigned long, len, iov->iov_len);
|
|
|
|
|
2006-05-01 18:02:05 +00:00
|
|
|
if (atomic) {
|
|
|
|
if (__copy_to_user_inatomic(iov->iov_base, from, copy))
|
|
|
|
return -EFAULT;
|
|
|
|
} else {
|
|
|
|
if (copy_to_user(iov->iov_base, from, copy))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
from += copy;
|
|
|
|
len -= copy;
|
|
|
|
iov->iov_base += copy;
|
|
|
|
iov->iov_len -= copy;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-05-01 18:02:05 +00:00
|
|
|
/*
|
|
|
|
* Attempt to pre-fault in the user memory, so we can use atomic copies.
|
|
|
|
* Returns the number of bytes not faulted in.
|
|
|
|
*/
|
|
|
|
static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len)
|
|
|
|
{
|
|
|
|
while (!iov->iov_len)
|
|
|
|
iov++;
|
|
|
|
|
|
|
|
while (len > 0) {
|
|
|
|
unsigned long this_len;
|
|
|
|
|
|
|
|
this_len = min_t(unsigned long, len, iov->iov_len);
|
|
|
|
if (fault_in_pages_writeable(iov->iov_base, this_len))
|
|
|
|
break;
|
|
|
|
|
|
|
|
len -= this_len;
|
|
|
|
iov++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pre-fault in the user memory, so we can use atomic copies.
|
|
|
|
*/
|
|
|
|
static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len)
|
|
|
|
{
|
|
|
|
while (!iov->iov_len)
|
|
|
|
iov++;
|
|
|
|
|
|
|
|
while (len > 0) {
|
|
|
|
unsigned long this_len;
|
|
|
|
|
|
|
|
this_len = min_t(unsigned long, len, iov->iov_len);
|
|
|
|
fault_in_pages_readable(iov->iov_base, this_len);
|
|
|
|
len -= this_len;
|
|
|
|
iov++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-04-11 11:57:45 +00:00
|
|
|
static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
|
|
|
|
struct pipe_buffer *buf)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct page *page = buf->page;
|
|
|
|
|
2006-03-30 13:15:30 +00:00
|
|
|
/*
|
|
|
|
* If nobody else uses this page, and we don't already have a
|
|
|
|
* temporary page, let's keep track of it as a one-deep
|
2006-04-11 11:57:45 +00:00
|
|
|
* allocation cache. (Otherwise just release our reference to it)
|
2006-03-30 13:15:30 +00:00
|
|
|
*/
|
2006-04-11 11:57:45 +00:00
|
|
|
if (page_count(page) == 1 && !pipe->tmp_page)
|
2006-04-11 11:53:33 +00:00
|
|
|
pipe->tmp_page = page;
|
2006-04-11 11:57:45 +00:00
|
|
|
else
|
|
|
|
page_cache_release(page);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-06-12 18:51:32 +00:00
|
|
|
/**
|
|
|
|
* generic_pipe_buf_map - virtually map a pipe buffer
|
|
|
|
* @pipe: the pipe that the buffer belongs to
|
|
|
|
* @buf: the buffer that should be mapped
|
|
|
|
* @atomic: whether to use an atomic map
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* This function returns a kernel virtual address mapping for the
|
2008-02-13 23:03:22 +00:00
|
|
|
* pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
|
2007-06-12 18:51:32 +00:00
|
|
|
* and the caller has to be careful not to fault before calling
|
|
|
|
* the unmap function.
|
|
|
|
*
|
|
|
|
* Note that this function occupies KM_USER0 if @atomic != 0.
|
|
|
|
*/
|
2006-05-01 17:59:03 +00:00
|
|
|
void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
|
2006-05-01 18:02:05 +00:00
|
|
|
struct pipe_buffer *buf, int atomic)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-05-01 18:02:05 +00:00
|
|
|
if (atomic) {
|
|
|
|
buf->flags |= PIPE_BUF_FLAG_ATOMIC;
|
|
|
|
return kmap_atomic(buf->page, KM_USER0);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return kmap(buf->page);
|
|
|
|
}
|
|
|
|
|
2007-06-12 18:51:32 +00:00
|
|
|
/**
|
|
|
|
* generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
|
|
|
|
* @pipe: the pipe that the buffer belongs to
|
|
|
|
* @buf: the buffer that should be unmapped
|
|
|
|
* @map_data: the data that the mapping function returned
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* This function undoes the mapping that ->map() provided.
|
|
|
|
*/
|
2006-05-01 17:59:03 +00:00
|
|
|
void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
|
2006-05-01 18:02:05 +00:00
|
|
|
struct pipe_buffer *buf, void *map_data)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-05-01 18:02:05 +00:00
|
|
|
if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
|
|
|
|
buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
|
|
|
|
kunmap_atomic(map_data, KM_USER0);
|
|
|
|
} else
|
|
|
|
kunmap(buf->page);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-06-12 18:51:32 +00:00
|
|
|
/**
|
2008-02-13 23:03:22 +00:00
|
|
|
* generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
|
2007-06-12 18:51:32 +00:00
|
|
|
* @pipe: the pipe that the buffer belongs to
|
|
|
|
* @buf: the buffer to attempt to steal
|
|
|
|
*
|
|
|
|
* Description:
|
2008-02-13 23:03:22 +00:00
|
|
|
* This function attempts to steal the &struct page attached to
|
2007-06-12 18:51:32 +00:00
|
|
|
* @buf. If successful, this function returns 0 and returns with
|
|
|
|
* the page locked. The caller may then reuse the page for whatever
|
2008-02-13 23:03:22 +00:00
|
|
|
* he wishes; the typical use is insertion into a different file
|
2007-06-12 18:51:32 +00:00
|
|
|
* page cache.
|
|
|
|
*/
|
2006-05-02 13:29:57 +00:00
|
|
|
int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
|
|
|
|
struct pipe_buffer *buf)
|
2006-03-30 13:16:46 +00:00
|
|
|
{
|
2006-04-30 14:36:32 +00:00
|
|
|
struct page *page = buf->page;
|
|
|
|
|
2007-06-12 18:51:32 +00:00
|
|
|
/*
|
|
|
|
* A reference of one is golden, that means that the owner of this
|
|
|
|
* page is the only one holding a reference to it. lock the page
|
|
|
|
* and return OK.
|
|
|
|
*/
|
2006-04-30 14:36:32 +00:00
|
|
|
if (page_count(page) == 1) {
|
|
|
|
lock_page(page);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
2006-03-30 13:16:46 +00:00
|
|
|
}
|
|
|
|
|
2007-06-12 18:51:32 +00:00
|
|
|
/**
|
2008-02-13 23:03:22 +00:00
|
|
|
* generic_pipe_buf_get - get a reference to a &struct pipe_buffer
|
2007-06-12 18:51:32 +00:00
|
|
|
* @pipe: the pipe that the buffer belongs to
|
|
|
|
* @buf: the buffer to get a reference to
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* This function grabs an extra reference to @buf. It's used in
|
|
|
|
* in the tee() system call, when we duplicate the buffers in one
|
|
|
|
* pipe into another.
|
|
|
|
*/
|
|
|
|
void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
|
2006-04-11 13:51:17 +00:00
|
|
|
{
|
|
|
|
page_cache_get(buf->page);
|
|
|
|
}
|
|
|
|
|
2007-06-12 18:51:32 +00:00
|
|
|
/**
|
|
|
|
* generic_pipe_buf_confirm - verify contents of the pipe buffer
|
2007-07-27 06:08:51 +00:00
|
|
|
* @info: the pipe that the buffer belongs to
|
2007-06-12 18:51:32 +00:00
|
|
|
* @buf: the buffer to confirm
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* This function does nothing, because the generic pipe code uses
|
|
|
|
* pages that are always good when inserted into the pipe.
|
|
|
|
*/
|
2007-06-14 11:10:48 +00:00
|
|
|
int generic_pipe_buf_confirm(struct pipe_inode_info *info,
|
|
|
|
struct pipe_buffer *buf)
|
2006-05-01 17:59:03 +00:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-05-07 13:37:36 +00:00
|
|
|
/**
|
|
|
|
* generic_pipe_buf_release - put a reference to a &struct pipe_buffer
|
|
|
|
* @pipe: the pipe that the buffer belongs to
|
|
|
|
* @buf: the buffer to put a reference to
|
|
|
|
*
|
|
|
|
* Description:
|
|
|
|
* This function releases a reference to @buf.
|
|
|
|
*/
|
|
|
|
void generic_pipe_buf_release(struct pipe_inode_info *pipe,
|
|
|
|
struct pipe_buffer *buf)
|
|
|
|
{
|
|
|
|
page_cache_release(buf->page);
|
|
|
|
}
|
|
|
|
|
2006-12-13 08:34:04 +00:00
|
|
|
static const struct pipe_buf_operations anon_pipe_buf_ops = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.can_merge = 1,
|
2006-05-01 17:59:03 +00:00
|
|
|
.map = generic_pipe_buf_map,
|
|
|
|
.unmap = generic_pipe_buf_unmap,
|
2007-06-14 11:10:48 +00:00
|
|
|
.confirm = generic_pipe_buf_confirm,
|
2005-04-16 22:20:36 +00:00
|
|
|
.release = anon_pipe_buf_release,
|
2006-05-02 13:29:57 +00:00
|
|
|
.steal = generic_pipe_buf_steal,
|
2006-05-01 17:59:03 +00:00
|
|
|
.get = generic_pipe_buf_get,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static ssize_t
|
2006-10-01 06:28:47 +00:00
|
|
|
pipe_read(struct kiocb *iocb, const struct iovec *_iov,
|
|
|
|
unsigned long nr_segs, loff_t pos)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-10-01 06:28:47 +00:00
|
|
|
struct file *filp = iocb->ki_filp;
|
2006-12-08 10:36:35 +00:00
|
|
|
struct inode *inode = filp->f_path.dentry->d_inode;
|
2006-04-11 11:53:33 +00:00
|
|
|
struct pipe_inode_info *pipe;
|
2005-04-16 22:20:36 +00:00
|
|
|
int do_wakeup;
|
|
|
|
ssize_t ret;
|
|
|
|
struct iovec *iov = (struct iovec *)_iov;
|
|
|
|
size_t total_len;
|
|
|
|
|
|
|
|
total_len = iov_length(iov, nr_segs);
|
|
|
|
/* Null read succeeds. */
|
|
|
|
if (unlikely(total_len == 0))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
do_wakeup = 0;
|
|
|
|
ret = 0;
|
2006-04-11 11:53:10 +00:00
|
|
|
mutex_lock(&inode->i_mutex);
|
2006-04-11 11:53:33 +00:00
|
|
|
pipe = inode->i_pipe;
|
2005-04-16 22:20:36 +00:00
|
|
|
for (;;) {
|
2006-04-11 11:53:33 +00:00
|
|
|
int bufs = pipe->nrbufs;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (bufs) {
|
2006-04-11 11:53:33 +00:00
|
|
|
int curbuf = pipe->curbuf;
|
|
|
|
struct pipe_buffer *buf = pipe->bufs + curbuf;
|
2006-12-13 08:34:04 +00:00
|
|
|
const struct pipe_buf_operations *ops = buf->ops;
|
2005-04-16 22:20:36 +00:00
|
|
|
void *addr;
|
|
|
|
size_t chars = buf->len;
|
2006-05-01 18:02:05 +00:00
|
|
|
int error, atomic;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (chars > total_len)
|
|
|
|
chars = total_len;
|
|
|
|
|
2007-06-14 11:10:48 +00:00
|
|
|
error = ops->confirm(pipe, buf);
|
2006-05-01 17:59:03 +00:00
|
|
|
if (error) {
|
2006-03-30 13:15:30 +00:00
|
|
|
if (!ret)
|
2006-05-01 17:59:03 +00:00
|
|
|
error = ret;
|
2006-03-30 13:15:30 +00:00
|
|
|
break;
|
|
|
|
}
|
2006-05-01 17:59:03 +00:00
|
|
|
|
2006-05-01 18:02:05 +00:00
|
|
|
atomic = !iov_fault_in_pages_write(iov, chars);
|
|
|
|
redo:
|
|
|
|
addr = ops->map(pipe, buf, atomic);
|
|
|
|
error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
|
|
|
|
ops->unmap(pipe, buf, addr);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (unlikely(error)) {
|
2006-05-01 18:02:05 +00:00
|
|
|
/*
|
|
|
|
* Just retry with the slow path if we failed.
|
|
|
|
*/
|
|
|
|
if (atomic) {
|
|
|
|
atomic = 0;
|
|
|
|
goto redo;
|
|
|
|
}
|
2006-04-11 11:57:45 +00:00
|
|
|
if (!ret)
|
2006-05-01 18:02:05 +00:00
|
|
|
ret = error;
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
ret += chars;
|
|
|
|
buf->offset += chars;
|
|
|
|
buf->len -= chars;
|
|
|
|
if (!buf->len) {
|
|
|
|
buf->ops = NULL;
|
2006-04-11 11:53:33 +00:00
|
|
|
ops->release(pipe, buf);
|
2005-04-16 22:20:36 +00:00
|
|
|
curbuf = (curbuf + 1) & (PIPE_BUFFERS-1);
|
2006-04-11 11:53:33 +00:00
|
|
|
pipe->curbuf = curbuf;
|
|
|
|
pipe->nrbufs = --bufs;
|
2005-04-16 22:20:36 +00:00
|
|
|
do_wakeup = 1;
|
|
|
|
}
|
|
|
|
total_len -= chars;
|
|
|
|
if (!total_len)
|
|
|
|
break; /* common path: read succeeded */
|
|
|
|
}
|
|
|
|
if (bufs) /* More to do? */
|
|
|
|
continue;
|
2006-04-11 11:53:33 +00:00
|
|
|
if (!pipe->writers)
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
2006-04-11 11:53:33 +00:00
|
|
|
if (!pipe->waiting_writers) {
|
2005-04-16 22:20:36 +00:00
|
|
|
/* syscall merging: Usually we must not sleep
|
|
|
|
* if O_NONBLOCK is set, or if we got some data.
|
|
|
|
* But if a writer sleeps in kernel space, then
|
|
|
|
* we can wait for that data without violating POSIX.
|
|
|
|
*/
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
if (filp->f_flags & O_NONBLOCK) {
|
|
|
|
ret = -EAGAIN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (signal_pending(current)) {
|
2006-04-11 11:57:45 +00:00
|
|
|
if (!ret)
|
|
|
|
ret = -ERESTARTSYS;
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (do_wakeup) {
|
2006-04-11 11:53:33 +00:00
|
|
|
wake_up_interruptible_sync(&pipe->wait);
|
|
|
|
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-04-11 11:53:33 +00:00
|
|
|
pipe_wait(pipe);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-04-11 11:53:10 +00:00
|
|
|
mutex_unlock(&inode->i_mutex);
|
2006-04-11 11:57:45 +00:00
|
|
|
|
|
|
|
/* Signal writers asynchronously that there is more room. */
|
2005-04-16 22:20:36 +00:00
|
|
|
if (do_wakeup) {
|
2007-10-15 15:00:19 +00:00
|
|
|
wake_up_interruptible_sync(&pipe->wait);
|
2006-04-11 11:53:33 +00:00
|
|
|
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
if (ret > 0)
|
|
|
|
file_accessed(filp);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2006-10-01 06:28:47 +00:00
|
|
|
pipe_write(struct kiocb *iocb, const struct iovec *_iov,
|
|
|
|
unsigned long nr_segs, loff_t ppos)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-10-01 06:28:47 +00:00
|
|
|
struct file *filp = iocb->ki_filp;
|
2006-12-08 10:36:35 +00:00
|
|
|
struct inode *inode = filp->f_path.dentry->d_inode;
|
2006-04-11 11:53:33 +00:00
|
|
|
struct pipe_inode_info *pipe;
|
2005-04-16 22:20:36 +00:00
|
|
|
ssize_t ret;
|
|
|
|
int do_wakeup;
|
|
|
|
struct iovec *iov = (struct iovec *)_iov;
|
|
|
|
size_t total_len;
|
|
|
|
ssize_t chars;
|
|
|
|
|
|
|
|
total_len = iov_length(iov, nr_segs);
|
|
|
|
/* Null write succeeds. */
|
|
|
|
if (unlikely(total_len == 0))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
do_wakeup = 0;
|
|
|
|
ret = 0;
|
2006-04-11 11:53:10 +00:00
|
|
|
mutex_lock(&inode->i_mutex);
|
2006-04-11 11:53:33 +00:00
|
|
|
pipe = inode->i_pipe;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-04-11 11:53:33 +00:00
|
|
|
if (!pipe->readers) {
|
2005-04-16 22:20:36 +00:00
|
|
|
send_sig(SIGPIPE, current, 0);
|
|
|
|
ret = -EPIPE;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We try to merge small writes */
|
|
|
|
chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
|
2006-04-11 11:53:33 +00:00
|
|
|
if (pipe->nrbufs && chars != 0) {
|
2006-04-11 11:57:45 +00:00
|
|
|
int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
|
|
|
|
(PIPE_BUFFERS-1);
|
2006-04-11 11:53:33 +00:00
|
|
|
struct pipe_buffer *buf = pipe->bufs + lastbuf;
|
2006-12-13 08:34:04 +00:00
|
|
|
const struct pipe_buf_operations *ops = buf->ops;
|
2005-04-16 22:20:36 +00:00
|
|
|
int offset = buf->offset + buf->len;
|
2006-04-11 11:57:45 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (ops->can_merge && offset + chars <= PAGE_SIZE) {
|
2006-05-01 18:02:05 +00:00
|
|
|
int error, atomic = 1;
|
2006-03-30 13:15:30 +00:00
|
|
|
void *addr;
|
|
|
|
|
2007-06-14 11:10:48 +00:00
|
|
|
error = ops->confirm(pipe, buf);
|
2006-05-01 17:59:03 +00:00
|
|
|
if (error)
|
2006-03-30 13:15:30 +00:00
|
|
|
goto out;
|
2006-05-01 17:59:03 +00:00
|
|
|
|
2006-05-01 18:02:05 +00:00
|
|
|
iov_fault_in_pages_read(iov, chars);
|
|
|
|
redo1:
|
|
|
|
addr = ops->map(pipe, buf, atomic);
|
2006-03-30 13:15:30 +00:00
|
|
|
error = pipe_iov_copy_from_user(offset + addr, iov,
|
2006-05-01 18:02:05 +00:00
|
|
|
chars, atomic);
|
|
|
|
ops->unmap(pipe, buf, addr);
|
2005-04-16 22:20:36 +00:00
|
|
|
ret = error;
|
|
|
|
do_wakeup = 1;
|
2006-05-01 18:02:05 +00:00
|
|
|
if (error) {
|
|
|
|
if (atomic) {
|
|
|
|
atomic = 0;
|
|
|
|
goto redo1;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
goto out;
|
2006-05-01 18:02:05 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
buf->len += chars;
|
|
|
|
total_len -= chars;
|
|
|
|
ret = chars;
|
|
|
|
if (!total_len)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
int bufs;
|
2006-04-11 11:57:45 +00:00
|
|
|
|
2006-04-11 11:53:33 +00:00
|
|
|
if (!pipe->readers) {
|
2005-04-16 22:20:36 +00:00
|
|
|
send_sig(SIGPIPE, current, 0);
|
2006-04-11 11:57:45 +00:00
|
|
|
if (!ret)
|
|
|
|
ret = -EPIPE;
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
2006-04-11 11:53:33 +00:00
|
|
|
bufs = pipe->nrbufs;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (bufs < PIPE_BUFFERS) {
|
2006-04-11 11:53:33 +00:00
|
|
|
int newbuf = (pipe->curbuf + bufs) & (PIPE_BUFFERS-1);
|
|
|
|
struct pipe_buffer *buf = pipe->bufs + newbuf;
|
|
|
|
struct page *page = pipe->tmp_page;
|
2006-05-01 18:02:05 +00:00
|
|
|
char *src;
|
|
|
|
int error, atomic = 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!page) {
|
|
|
|
page = alloc_page(GFP_HIGHUSER);
|
|
|
|
if (unlikely(!page)) {
|
|
|
|
ret = ret ? : -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
2006-04-11 11:53:33 +00:00
|
|
|
pipe->tmp_page = page;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-04-11 11:57:45 +00:00
|
|
|
/* Always wake up, even if the copy fails. Otherwise
|
2005-04-16 22:20:36 +00:00
|
|
|
* we lock up (O_NONBLOCK-)readers that sleep due to
|
|
|
|
* syscall merging.
|
|
|
|
* FIXME! Is this really true?
|
|
|
|
*/
|
|
|
|
do_wakeup = 1;
|
|
|
|
chars = PAGE_SIZE;
|
|
|
|
if (chars > total_len)
|
|
|
|
chars = total_len;
|
|
|
|
|
2006-05-01 18:02:05 +00:00
|
|
|
iov_fault_in_pages_read(iov, chars);
|
|
|
|
redo2:
|
|
|
|
if (atomic)
|
|
|
|
src = kmap_atomic(page, KM_USER0);
|
|
|
|
else
|
|
|
|
src = kmap(page);
|
|
|
|
|
|
|
|
error = pipe_iov_copy_from_user(src, iov, chars,
|
|
|
|
atomic);
|
|
|
|
if (atomic)
|
|
|
|
kunmap_atomic(src, KM_USER0);
|
|
|
|
else
|
|
|
|
kunmap(page);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (unlikely(error)) {
|
2006-05-01 18:02:05 +00:00
|
|
|
if (atomic) {
|
|
|
|
atomic = 0;
|
|
|
|
goto redo2;
|
|
|
|
}
|
2006-04-11 11:57:45 +00:00
|
|
|
if (!ret)
|
2006-05-01 18:02:05 +00:00
|
|
|
ret = error;
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
ret += chars;
|
|
|
|
|
|
|
|
/* Insert it into the buffer array */
|
|
|
|
buf->page = page;
|
|
|
|
buf->ops = &anon_pipe_buf_ops;
|
|
|
|
buf->offset = 0;
|
|
|
|
buf->len = chars;
|
2006-04-11 11:53:33 +00:00
|
|
|
pipe->nrbufs = ++bufs;
|
|
|
|
pipe->tmp_page = NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
total_len -= chars;
|
|
|
|
if (!total_len)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (bufs < PIPE_BUFFERS)
|
|
|
|
continue;
|
|
|
|
if (filp->f_flags & O_NONBLOCK) {
|
2006-04-11 11:57:45 +00:00
|
|
|
if (!ret)
|
|
|
|
ret = -EAGAIN;
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (signal_pending(current)) {
|
2006-04-11 11:57:45 +00:00
|
|
|
if (!ret)
|
|
|
|
ret = -ERESTARTSYS;
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (do_wakeup) {
|
2006-04-11 11:53:33 +00:00
|
|
|
wake_up_interruptible_sync(&pipe->wait);
|
|
|
|
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
|
2005-04-16 22:20:36 +00:00
|
|
|
do_wakeup = 0;
|
|
|
|
}
|
2006-04-11 11:53:33 +00:00
|
|
|
pipe->waiting_writers++;
|
|
|
|
pipe_wait(pipe);
|
|
|
|
pipe->waiting_writers--;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
out:
|
2006-04-11 11:53:10 +00:00
|
|
|
mutex_unlock(&inode->i_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (do_wakeup) {
|
2007-10-15 15:00:19 +00:00
|
|
|
wake_up_interruptible_sync(&pipe->wait);
|
2006-04-11 11:53:33 +00:00
|
|
|
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
if (ret > 0)
|
2006-01-10 04:52:01 +00:00
|
|
|
file_update_time(filp);
|
2005-04-16 22:20:36 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
bad_pipe_r(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
return -EBADF;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2006-04-11 11:57:45 +00:00
|
|
|
bad_pipe_w(struct file *filp, const char __user *buf, size_t count,
|
|
|
|
loff_t *ppos)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
return -EBADF;
|
|
|
|
}
|
|
|
|
|
2008-02-08 12:21:23 +00:00
|
|
|
static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-12-08 10:36:35 +00:00
|
|
|
struct inode *inode = filp->f_path.dentry->d_inode;
|
2006-04-11 11:53:33 +00:00
|
|
|
struct pipe_inode_info *pipe;
|
2005-04-16 22:20:36 +00:00
|
|
|
int count, buf, nrbufs;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case FIONREAD:
|
2006-04-11 11:53:10 +00:00
|
|
|
mutex_lock(&inode->i_mutex);
|
2006-04-11 11:53:33 +00:00
|
|
|
pipe = inode->i_pipe;
|
2005-04-16 22:20:36 +00:00
|
|
|
count = 0;
|
2006-04-11 11:53:33 +00:00
|
|
|
buf = pipe->curbuf;
|
|
|
|
nrbufs = pipe->nrbufs;
|
2005-04-16 22:20:36 +00:00
|
|
|
while (--nrbufs >= 0) {
|
2006-04-11 11:53:33 +00:00
|
|
|
count += pipe->bufs[buf].len;
|
2005-04-16 22:20:36 +00:00
|
|
|
buf = (buf+1) & (PIPE_BUFFERS-1);
|
|
|
|
}
|
2006-04-11 11:53:10 +00:00
|
|
|
mutex_unlock(&inode->i_mutex);
|
2006-04-11 11:53:33 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return put_user(count, (int __user *)arg);
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No kernel lock held - fine */
|
|
|
|
static unsigned int
|
|
|
|
pipe_poll(struct file *filp, poll_table *wait)
|
|
|
|
{
|
|
|
|
unsigned int mask;
|
2006-12-08 10:36:35 +00:00
|
|
|
struct inode *inode = filp->f_path.dentry->d_inode;
|
2006-04-11 11:53:33 +00:00
|
|
|
struct pipe_inode_info *pipe = inode->i_pipe;
|
2005-04-16 22:20:36 +00:00
|
|
|
int nrbufs;
|
|
|
|
|
2006-04-11 11:53:33 +00:00
|
|
|
poll_wait(filp, &pipe->wait, wait);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Reading only -- no need for acquiring the semaphore. */
|
2006-04-11 11:53:33 +00:00
|
|
|
nrbufs = pipe->nrbufs;
|
2005-04-16 22:20:36 +00:00
|
|
|
mask = 0;
|
|
|
|
if (filp->f_mode & FMODE_READ) {
|
|
|
|
mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
|
2006-04-11 11:53:33 +00:00
|
|
|
if (!pipe->writers && filp->f_version != pipe->w_counter)
|
2005-04-16 22:20:36 +00:00
|
|
|
mask |= POLLHUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (filp->f_mode & FMODE_WRITE) {
|
|
|
|
mask |= (nrbufs < PIPE_BUFFERS) ? POLLOUT | POLLWRNORM : 0;
|
2005-09-06 22:17:48 +00:00
|
|
|
/*
|
|
|
|
* Most Unices do not set POLLERR for FIFOs but on Linux they
|
|
|
|
* behave exactly like pipes for poll().
|
|
|
|
*/
|
2006-04-11 11:53:33 +00:00
|
|
|
if (!pipe->readers)
|
2005-04-16 22:20:36 +00:00
|
|
|
mask |= POLLERR;
|
|
|
|
}
|
|
|
|
|
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
pipe_release(struct inode *inode, int decr, int decw)
|
|
|
|
{
|
2006-04-11 11:53:33 +00:00
|
|
|
struct pipe_inode_info *pipe;
|
|
|
|
|
2006-04-11 11:53:10 +00:00
|
|
|
mutex_lock(&inode->i_mutex);
|
2006-04-11 11:53:33 +00:00
|
|
|
pipe = inode->i_pipe;
|
|
|
|
pipe->readers -= decr;
|
|
|
|
pipe->writers -= decw;
|
2006-04-11 11:57:45 +00:00
|
|
|
|
2006-04-11 11:53:33 +00:00
|
|
|
if (!pipe->readers && !pipe->writers) {
|
2005-04-16 22:20:36 +00:00
|
|
|
free_pipe_info(inode);
|
|
|
|
} else {
|
2007-10-15 15:00:19 +00:00
|
|
|
wake_up_interruptible_sync(&pipe->wait);
|
2006-04-11 11:53:33 +00:00
|
|
|
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
|
|
|
|
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-04-11 11:53:10 +00:00
|
|
|
mutex_unlock(&inode->i_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
pipe_read_fasync(int fd, struct file *filp, int on)
|
|
|
|
{
|
2006-12-08 10:36:35 +00:00
|
|
|
struct inode *inode = filp->f_path.dentry->d_inode;
|
2005-04-16 22:20:36 +00:00
|
|
|
int retval;
|
|
|
|
|
2006-04-11 11:53:10 +00:00
|
|
|
mutex_lock(&inode->i_mutex);
|
|
|
|
retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_readers);
|
|
|
|
mutex_unlock(&inode->i_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-02-01 21:52:56 +00:00
|
|
|
return retval;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
pipe_write_fasync(int fd, struct file *filp, int on)
|
|
|
|
{
|
2006-12-08 10:36:35 +00:00
|
|
|
struct inode *inode = filp->f_path.dentry->d_inode;
|
2005-04-16 22:20:36 +00:00
|
|
|
int retval;
|
|
|
|
|
2006-04-11 11:53:10 +00:00
|
|
|
mutex_lock(&inode->i_mutex);
|
|
|
|
retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_writers);
|
|
|
|
mutex_unlock(&inode->i_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-02-01 21:52:56 +00:00
|
|
|
return retval;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
pipe_rdwr_fasync(int fd, struct file *filp, int on)
|
|
|
|
{
|
2006-12-08 10:36:35 +00:00
|
|
|
struct inode *inode = filp->f_path.dentry->d_inode;
|
2006-04-11 11:57:45 +00:00
|
|
|
struct pipe_inode_info *pipe = inode->i_pipe;
|
2005-04-16 22:20:36 +00:00
|
|
|
int retval;
|
|
|
|
|
2006-04-11 11:53:10 +00:00
|
|
|
mutex_lock(&inode->i_mutex);
|
2006-04-11 11:57:45 +00:00
|
|
|
retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
|
2009-03-12 21:31:28 +00:00
|
|
|
if (retval >= 0) {
|
2006-04-11 11:57:45 +00:00
|
|
|
retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
|
2009-03-12 21:31:28 +00:00
|
|
|
if (retval < 0) /* this can happen only if on == T */
|
|
|
|
fasync_helper(-1, filp, 0, &pipe->fasync_readers);
|
|
|
|
}
|
2006-04-11 11:53:10 +00:00
|
|
|
mutex_unlock(&inode->i_mutex);
|
2009-02-01 21:52:56 +00:00
|
|
|
return retval;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
pipe_read_release(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
return pipe_release(inode, 1, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
pipe_write_release(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
return pipe_release(inode, 0, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
pipe_rdwr_release(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
int decr, decw;
|
|
|
|
|
|
|
|
decr = (filp->f_mode & FMODE_READ) != 0;
|
|
|
|
decw = (filp->f_mode & FMODE_WRITE) != 0;
|
|
|
|
return pipe_release(inode, decr, decw);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
pipe_read_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
2009-10-19 22:55:41 +00:00
|
|
|
int ret = -ENOENT;
|
|
|
|
|
2006-04-11 11:53:10 +00:00
|
|
|
mutex_lock(&inode->i_mutex);
|
2009-10-19 22:55:41 +00:00
|
|
|
|
|
|
|
if (inode->i_pipe) {
|
|
|
|
ret = 0;
|
|
|
|
inode->i_pipe->readers++;
|
|
|
|
}
|
|
|
|
|
2006-04-11 11:53:10 +00:00
|
|
|
mutex_unlock(&inode->i_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-10-19 22:55:41 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
pipe_write_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
2009-10-19 22:55:41 +00:00
|
|
|
int ret = -ENOENT;
|
|
|
|
|
2006-04-11 11:53:10 +00:00
|
|
|
mutex_lock(&inode->i_mutex);
|
2009-10-19 22:55:41 +00:00
|
|
|
|
|
|
|
if (inode->i_pipe) {
|
|
|
|
ret = 0;
|
|
|
|
inode->i_pipe->writers++;
|
|
|
|
}
|
|
|
|
|
2006-04-11 11:53:10 +00:00
|
|
|
mutex_unlock(&inode->i_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-10-19 22:55:41 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
pipe_rdwr_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
2009-10-19 22:55:41 +00:00
|
|
|
int ret = -ENOENT;
|
|
|
|
|
2006-04-11 11:53:10 +00:00
|
|
|
mutex_lock(&inode->i_mutex);
|
2009-10-19 22:55:41 +00:00
|
|
|
|
|
|
|
if (inode->i_pipe) {
|
|
|
|
ret = 0;
|
|
|
|
if (filp->f_mode & FMODE_READ)
|
|
|
|
inode->i_pipe->readers++;
|
|
|
|
if (filp->f_mode & FMODE_WRITE)
|
|
|
|
inode->i_pipe->writers++;
|
|
|
|
}
|
|
|
|
|
2006-04-11 11:53:10 +00:00
|
|
|
mutex_unlock(&inode->i_mutex);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-10-19 22:55:41 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The file_operations structs are not static because they
|
|
|
|
* are also used in linux/fs/fifo.c to do operations on FIFOs.
|
2008-07-01 12:16:09 +00:00
|
|
|
*
|
|
|
|
* Pipes reuse fifos' file_operations structs.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2008-07-01 12:16:09 +00:00
|
|
|
const struct file_operations read_pipefifo_fops = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.llseek = no_llseek,
|
2006-10-01 06:28:47 +00:00
|
|
|
.read = do_sync_read,
|
|
|
|
.aio_read = pipe_read,
|
2005-04-16 22:20:36 +00:00
|
|
|
.write = bad_pipe_w,
|
|
|
|
.poll = pipe_poll,
|
2008-02-08 12:21:23 +00:00
|
|
|
.unlocked_ioctl = pipe_ioctl,
|
2005-04-16 22:20:36 +00:00
|
|
|
.open = pipe_read_open,
|
|
|
|
.release = pipe_read_release,
|
|
|
|
.fasync = pipe_read_fasync,
|
|
|
|
};
|
|
|
|
|
2008-07-01 12:16:09 +00:00
|
|
|
const struct file_operations write_pipefifo_fops = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.llseek = no_llseek,
|
|
|
|
.read = bad_pipe_r,
|
2006-10-01 06:28:47 +00:00
|
|
|
.write = do_sync_write,
|
|
|
|
.aio_write = pipe_write,
|
2005-04-16 22:20:36 +00:00
|
|
|
.poll = pipe_poll,
|
2008-02-08 12:21:23 +00:00
|
|
|
.unlocked_ioctl = pipe_ioctl,
|
2005-04-16 22:20:36 +00:00
|
|
|
.open = pipe_write_open,
|
|
|
|
.release = pipe_write_release,
|
|
|
|
.fasync = pipe_write_fasync,
|
|
|
|
};
|
|
|
|
|
2008-07-01 12:16:09 +00:00
|
|
|
const struct file_operations rdwr_pipefifo_fops = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.llseek = no_llseek,
|
2006-10-01 06:28:47 +00:00
|
|
|
.read = do_sync_read,
|
|
|
|
.aio_read = pipe_read,
|
|
|
|
.write = do_sync_write,
|
|
|
|
.aio_write = pipe_write,
|
2005-04-16 22:20:36 +00:00
|
|
|
.poll = pipe_poll,
|
2008-02-08 12:21:23 +00:00
|
|
|
.unlocked_ioctl = pipe_ioctl,
|
2005-04-16 22:20:36 +00:00
|
|
|
.open = pipe_rdwr_open,
|
|
|
|
.release = pipe_rdwr_release,
|
|
|
|
.fasync = pipe_rdwr_fasync,
|
|
|
|
};
|
|
|
|
|
2006-04-10 13:18:35 +00:00
|
|
|
struct pipe_inode_info * alloc_pipe_info(struct inode *inode)
|
|
|
|
{
|
2006-04-11 11:53:33 +00:00
|
|
|
struct pipe_inode_info *pipe;
|
2006-04-10 13:18:35 +00:00
|
|
|
|
2006-04-11 11:53:33 +00:00
|
|
|
pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
|
|
|
|
if (pipe) {
|
|
|
|
init_waitqueue_head(&pipe->wait);
|
|
|
|
pipe->r_counter = pipe->w_counter = 1;
|
|
|
|
pipe->inode = inode;
|
2006-04-10 13:18:35 +00:00
|
|
|
}
|
|
|
|
|
2006-04-11 11:53:33 +00:00
|
|
|
return pipe;
|
2006-04-10 13:18:35 +00:00
|
|
|
}
|
|
|
|
|
2006-04-11 11:53:33 +00:00
|
|
|
void __free_pipe_info(struct pipe_inode_info *pipe)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < PIPE_BUFFERS; i++) {
|
2006-04-11 11:53:33 +00:00
|
|
|
struct pipe_buffer *buf = pipe->bufs + i;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (buf->ops)
|
2006-04-11 11:53:33 +00:00
|
|
|
buf->ops->release(pipe, buf);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-04-11 11:53:33 +00:00
|
|
|
if (pipe->tmp_page)
|
|
|
|
__free_page(pipe->tmp_page);
|
|
|
|
kfree(pipe);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-04-11 11:52:07 +00:00
|
|
|
void free_pipe_info(struct inode *inode)
|
|
|
|
{
|
|
|
|
__free_pipe_info(inode->i_pipe);
|
|
|
|
inode->i_pipe = NULL;
|
|
|
|
}
|
|
|
|
|
2006-03-26 09:37:24 +00:00
|
|
|
static struct vfsmount *pipe_mnt __read_mostly;
|
2006-04-11 11:57:45 +00:00
|
|
|
|
2007-05-08 07:26:18 +00:00
|
|
|
/*
|
|
|
|
* pipefs_dname() is called from d_path().
|
|
|
|
*/
|
|
|
|
static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
|
|
|
|
{
|
|
|
|
return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
|
|
|
|
dentry->d_inode->i_ino);
|
|
|
|
}
|
|
|
|
|
2009-02-20 06:02:22 +00:00
|
|
|
static const struct dentry_operations pipefs_dentry_operations = {
|
2007-05-08 07:26:18 +00:00
|
|
|
.d_dname = pipefs_dname,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct inode * get_pipe_inode(void)
|
|
|
|
{
|
|
|
|
struct inode *inode = new_inode(pipe_mnt->mnt_sb);
|
2006-04-11 11:53:33 +00:00
|
|
|
struct pipe_inode_info *pipe;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!inode)
|
|
|
|
goto fail_inode;
|
|
|
|
|
2006-04-11 11:53:33 +00:00
|
|
|
pipe = alloc_pipe_info(inode);
|
|
|
|
if (!pipe)
|
2005-04-16 22:20:36 +00:00
|
|
|
goto fail_iput;
|
2006-04-11 11:53:33 +00:00
|
|
|
inode->i_pipe = pipe;
|
2006-04-10 13:18:35 +00:00
|
|
|
|
2006-04-11 11:53:33 +00:00
|
|
|
pipe->readers = pipe->writers = 1;
|
2008-07-01 12:16:09 +00:00
|
|
|
inode->i_fop = &rdwr_pipefifo_fops;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark the inode dirty from the very beginning,
|
|
|
|
* that way it will never be moved to the dirty
|
|
|
|
* list because "mark_inode_dirty()" will think
|
|
|
|
* that it already _is_ on the dirty list.
|
|
|
|
*/
|
|
|
|
inode->i_state = I_DIRTY;
|
|
|
|
inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
|
2008-11-13 23:39:05 +00:00
|
|
|
inode->i_uid = current_fsuid();
|
|
|
|
inode->i_gid = current_fsgid();
|
2005-04-16 22:20:36 +00:00
|
|
|
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
2006-04-11 11:53:33 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return inode;
|
|
|
|
|
|
|
|
fail_iput:
|
|
|
|
iput(inode);
|
2006-04-11 11:57:45 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
fail_inode:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2008-07-24 04:29:40 +00:00
|
|
|
struct file *create_write_pipe(int flags)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-10-01 06:29:26 +00:00
|
|
|
int err;
|
|
|
|
struct inode *inode;
|
|
|
|
struct file *f;
|
2009-08-08 20:52:35 +00:00
|
|
|
struct path path;
|
2007-05-08 07:26:18 +00:00
|
|
|
struct qstr name = { .name = "" };
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-10-01 06:29:26 +00:00
|
|
|
err = -ENFILE;
|
2005-04-16 22:20:36 +00:00
|
|
|
inode = get_pipe_inode();
|
|
|
|
if (!inode)
|
2008-02-15 22:37:26 +00:00
|
|
|
goto err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-10-01 06:29:26 +00:00
|
|
|
err = -ENOMEM;
|
2009-08-08 20:52:35 +00:00
|
|
|
path.dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &name);
|
|
|
|
if (!path.dentry)
|
2006-10-01 06:29:26 +00:00
|
|
|
goto err_inode;
|
2009-08-08 20:52:35 +00:00
|
|
|
path.mnt = mntget(pipe_mnt);
|
2006-04-11 11:57:45 +00:00
|
|
|
|
2009-08-08 20:52:35 +00:00
|
|
|
path.dentry->d_op = &pipefs_dentry_operations;
|
|
|
|
d_instantiate(path.dentry, inode);
|
2008-02-15 22:37:26 +00:00
|
|
|
|
|
|
|
err = -ENFILE;
|
2009-08-08 20:52:35 +00:00
|
|
|
f = alloc_file(&path, FMODE_WRITE, &write_pipefifo_fops);
|
2008-02-15 22:37:26 +00:00
|
|
|
if (!f)
|
|
|
|
goto err_dentry;
|
2006-10-01 06:29:26 +00:00
|
|
|
f->f_mapping = inode->i_mapping;
|
2006-04-11 11:57:45 +00:00
|
|
|
|
2008-07-24 04:29:40 +00:00
|
|
|
f->f_flags = O_WRONLY | (flags & O_NONBLOCK);
|
2006-10-01 06:29:26 +00:00
|
|
|
f->f_version = 0;
|
|
|
|
|
|
|
|
return f;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-02-15 22:37:26 +00:00
|
|
|
err_dentry:
|
2008-04-22 23:51:27 +00:00
|
|
|
free_pipe_info(inode);
|
2009-08-08 20:52:35 +00:00
|
|
|
path_put(&path);
|
2008-04-22 23:51:27 +00:00
|
|
|
return ERR_PTR(err);
|
|
|
|
|
2006-10-01 06:29:26 +00:00
|
|
|
err_inode:
|
2005-04-16 22:20:36 +00:00
|
|
|
free_pipe_info(inode);
|
|
|
|
iput(inode);
|
2008-02-15 22:37:26 +00:00
|
|
|
err:
|
2006-10-01 06:29:26 +00:00
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
void free_write_pipe(struct file *f)
|
|
|
|
{
|
2006-12-18 13:31:18 +00:00
|
|
|
free_pipe_info(f->f_dentry->d_inode);
|
2008-06-09 23:40:35 +00:00
|
|
|
path_put(&f->f_path);
|
2006-10-01 06:29:26 +00:00
|
|
|
put_filp(f);
|
|
|
|
}
|
|
|
|
|
2008-07-24 04:29:40 +00:00
|
|
|
struct file *create_read_pipe(struct file *wrf, int flags)
|
2006-10-01 06:29:26 +00:00
|
|
|
{
|
2009-08-08 21:01:37 +00:00
|
|
|
/* Grab pipe from the writer */
|
|
|
|
struct file *f = alloc_file(&wrf->f_path, FMODE_READ,
|
|
|
|
&read_pipefifo_fops);
|
2006-10-01 06:29:26 +00:00
|
|
|
if (!f)
|
|
|
|
return ERR_PTR(-ENFILE);
|
|
|
|
|
2008-06-09 23:40:35 +00:00
|
|
|
path_get(&wrf->f_path);
|
2008-07-24 04:29:40 +00:00
|
|
|
f->f_flags = O_RDONLY | (flags & O_NONBLOCK);
|
2006-10-01 06:29:26 +00:00
|
|
|
|
|
|
|
return f;
|
|
|
|
}
|
|
|
|
|
2008-07-24 04:29:30 +00:00
|
|
|
int do_pipe_flags(int *fd, int flags)
|
2006-10-01 06:29:26 +00:00
|
|
|
{
|
|
|
|
struct file *fw, *fr;
|
|
|
|
int error;
|
|
|
|
int fdw, fdr;
|
|
|
|
|
2008-07-24 04:29:40 +00:00
|
|
|
if (flags & ~(O_CLOEXEC | O_NONBLOCK))
|
2008-07-24 04:29:30 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2008-07-24 04:29:40 +00:00
|
|
|
fw = create_write_pipe(flags);
|
2006-10-01 06:29:26 +00:00
|
|
|
if (IS_ERR(fw))
|
|
|
|
return PTR_ERR(fw);
|
2008-07-24 04:29:40 +00:00
|
|
|
fr = create_read_pipe(fw, flags);
|
2006-10-01 06:29:26 +00:00
|
|
|
error = PTR_ERR(fr);
|
|
|
|
if (IS_ERR(fr))
|
|
|
|
goto err_write_pipe;
|
|
|
|
|
2008-07-24 04:29:30 +00:00
|
|
|
error = get_unused_fd_flags(flags);
|
2006-10-01 06:29:26 +00:00
|
|
|
if (error < 0)
|
|
|
|
goto err_read_pipe;
|
|
|
|
fdr = error;
|
|
|
|
|
2008-07-24 04:29:30 +00:00
|
|
|
error = get_unused_fd_flags(flags);
|
2006-10-01 06:29:26 +00:00
|
|
|
if (error < 0)
|
|
|
|
goto err_fdr;
|
|
|
|
fdw = error;
|
|
|
|
|
2008-12-14 09:57:47 +00:00
|
|
|
audit_fd_pair(fdr, fdw);
|
2006-10-01 06:29:26 +00:00
|
|
|
fd_install(fdr, fr);
|
|
|
|
fd_install(fdw, fw);
|
|
|
|
fd[0] = fdr;
|
|
|
|
fd[1] = fdw;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_fdr:
|
|
|
|
put_unused_fd(fdr);
|
|
|
|
err_read_pipe:
|
2008-06-09 23:40:35 +00:00
|
|
|
path_put(&fr->f_path);
|
2006-10-01 06:29:26 +00:00
|
|
|
put_filp(fr);
|
|
|
|
err_write_pipe:
|
|
|
|
free_write_pipe(fw);
|
|
|
|
return error;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-05-03 19:10:37 +00:00
|
|
|
/*
|
|
|
|
* sys_pipe() is the normal C calling standard for creating
|
|
|
|
* a pipe. It's not the way Unix traditionally does this, though.
|
|
|
|
*/
|
2009-01-14 13:14:34 +00:00
|
|
|
SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
|
2008-05-03 19:10:37 +00:00
|
|
|
{
|
|
|
|
int fd[2];
|
|
|
|
int error;
|
|
|
|
|
2008-07-24 04:29:30 +00:00
|
|
|
error = do_pipe_flags(fd, flags);
|
2008-05-03 19:10:37 +00:00
|
|
|
if (!error) {
|
2008-05-07 03:42:38 +00:00
|
|
|
if (copy_to_user(fildes, fd, sizeof(fd))) {
|
|
|
|
sys_close(fd[0]);
|
|
|
|
sys_close(fd[1]);
|
2008-05-03 19:10:37 +00:00
|
|
|
error = -EFAULT;
|
2008-05-07 03:42:38 +00:00
|
|
|
}
|
2008-05-03 19:10:37 +00:00
|
|
|
}
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2009-01-14 13:14:35 +00:00
|
|
|
SYSCALL_DEFINE1(pipe, int __user *, fildes)
|
2008-07-24 04:29:30 +00:00
|
|
|
{
|
|
|
|
return sys_pipe2(fildes, 0);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* pipefs should _never_ be mounted by userland - too much of security hassle,
|
|
|
|
* no real gain from having the whole whorehouse mounted. So we don't need
|
|
|
|
* any operations on the root directory. However, we need a non-trivial
|
|
|
|
* d_name - pipe: will go nicely and kill the special-casing in procfs.
|
|
|
|
*/
|
[PATCH] VFS: Permit filesystem to override root dentry on mount
Extend the get_sb() filesystem operation to take an extra argument that
permits the VFS to pass in the target vfsmount that defines the mountpoint.
The filesystem is then required to manually set the superblock and root dentry
pointers. For most filesystems, this should be done with simple_set_mnt()
which will set the superblock pointer and then set the root dentry to the
superblock's s_root (as per the old default behaviour).
The get_sb() op now returns an integer as there's now no need to return the
superblock pointer.
This patch permits a superblock to be implicitly shared amongst several mount
points, such as can be done with NFS to avoid potential inode aliasing. In
such a case, simple_set_mnt() would not be called, and instead the mnt_root
and mnt_sb would be set directly.
The patch also makes the following changes:
(*) the get_sb_*() convenience functions in the core kernel now take a vfsmount
pointer argument and return an integer, so most filesystems have to change
very little.
(*) If one of the convenience function is not used, then get_sb() should
normally call simple_set_mnt() to instantiate the vfsmount. This will
always return 0, and so can be tail-called from get_sb().
(*) generic_shutdown_super() now calls shrink_dcache_sb() to clean up the
dcache upon superblock destruction rather than shrink_dcache_anon().
This is required because the superblock may now have multiple trees that
aren't actually bound to s_root, but that still need to be cleaned up. The
currently called functions assume that the whole tree is rooted at s_root,
and that anonymous dentries are not the roots of trees which results in
dentries being left unculled.
However, with the way NFS superblock sharing are currently set to be
implemented, these assumptions are violated: the root of the filesystem is
simply a dummy dentry and inode (the real inode for '/' may well be
inaccessible), and all the vfsmounts are rooted on anonymous[*] dentries
with child trees.
[*] Anonymous until discovered from another tree.
(*) The documentation has been adjusted, including the additional bit of
changing ext2_* into foo_* in the documentation.
[akpm@osdl.org: convert ipath_fs, do other stuff]
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 09:02:57 +00:00
|
|
|
static int pipefs_get_sb(struct file_system_type *fs_type,
|
|
|
|
int flags, const char *dev_name, void *data,
|
|
|
|
struct vfsmount *mnt)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
[PATCH] VFS: Permit filesystem to override root dentry on mount
Extend the get_sb() filesystem operation to take an extra argument that
permits the VFS to pass in the target vfsmount that defines the mountpoint.
The filesystem is then required to manually set the superblock and root dentry
pointers. For most filesystems, this should be done with simple_set_mnt()
which will set the superblock pointer and then set the root dentry to the
superblock's s_root (as per the old default behaviour).
The get_sb() op now returns an integer as there's now no need to return the
superblock pointer.
This patch permits a superblock to be implicitly shared amongst several mount
points, such as can be done with NFS to avoid potential inode aliasing. In
such a case, simple_set_mnt() would not be called, and instead the mnt_root
and mnt_sb would be set directly.
The patch also makes the following changes:
(*) the get_sb_*() convenience functions in the core kernel now take a vfsmount
pointer argument and return an integer, so most filesystems have to change
very little.
(*) If one of the convenience function is not used, then get_sb() should
normally call simple_set_mnt() to instantiate the vfsmount. This will
always return 0, and so can be tail-called from get_sb().
(*) generic_shutdown_super() now calls shrink_dcache_sb() to clean up the
dcache upon superblock destruction rather than shrink_dcache_anon().
This is required because the superblock may now have multiple trees that
aren't actually bound to s_root, but that still need to be cleaned up. The
currently called functions assume that the whole tree is rooted at s_root,
and that anonymous dentries are not the roots of trees which results in
dentries being left unculled.
However, with the way NFS superblock sharing are currently set to be
implemented, these assumptions are violated: the root of the filesystem is
simply a dummy dentry and inode (the real inode for '/' may well be
inaccessible), and all the vfsmounts are rooted on anonymous[*] dentries
with child trees.
[*] Anonymous until discovered from another tree.
(*) The documentation has been adjusted, including the additional bit of
changing ext2_* into foo_* in the documentation.
[akpm@osdl.org: convert ipath_fs, do other stuff]
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Al Viro <viro@zeniv.linux.org.uk>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 09:02:57 +00:00
|
|
|
return get_sb_pseudo(fs_type, "pipe:", NULL, PIPEFS_MAGIC, mnt);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct file_system_type pipe_fs_type = {
|
|
|
|
.name = "pipefs",
|
|
|
|
.get_sb = pipefs_get_sb,
|
|
|
|
.kill_sb = kill_anon_super,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init init_pipe_fs(void)
|
|
|
|
{
|
|
|
|
int err = register_filesystem(&pipe_fs_type);
|
2006-04-11 11:57:45 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!err) {
|
|
|
|
pipe_mnt = kern_mount(&pipe_fs_type);
|
|
|
|
if (IS_ERR(pipe_mnt)) {
|
|
|
|
err = PTR_ERR(pipe_mnt);
|
|
|
|
unregister_filesystem(&pipe_fs_type);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit exit_pipe_fs(void)
|
|
|
|
{
|
|
|
|
unregister_filesystem(&pipe_fs_type);
|
|
|
|
mntput(pipe_mnt);
|
|
|
|
}
|
|
|
|
|
|
|
|
fs_initcall(init_pipe_fs);
|
|
|
|
module_exit(exit_pipe_fs);
|