mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 14:21:47 +00:00
fuse update for 5.1
-----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQSQHSd0lITzzeNWNm3h3BK/laaZPAUCXIdqOwAKCRDh3BK/laaZ PFRlAP0RZr7vDfGcZTXGApcIr63YDjzi8Gg1/Jhd0jrzLbKcdAD+P0d6bupWWwOl yGjVxY9LkXNJiTI2Q+Equ7AgMYvDcQk= =Lvcr -----END PGP SIGNATURE----- Merge tag 'fuse-update-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse Pull fuse updates from Miklos Szeredi: "Scalability and performance improvements, as well as minor bug fixes and cleanups" * tag 'fuse-update-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse: (25 commits) fuse: cache readdir calls if filesystem opts out of opendir fuse: support clients that don't implement 'opendir' fuse: lift bad inode checks into callers fuse: multiplex cached/direct_io file operations fuse add copy_file_range to direct io fops fuse: use iov_iter based generic splice helpers fuse: Switch to using async direct IO for FOPEN_DIRECT_IO fuse: use atomic64_t for khctr fuse: clean up aborted fuse: Protect ff->reserved_req via corresponding fi->lock fuse: Protect fi->nlookup with fi->lock fuse: Introduce fi->lock to protect write related fields fuse: Convert fc->attr_version into atomic64_t fuse: Add fuse_inode argument to fuse_prepare_release() fuse: Verify userspace asks to requeue interrupt that we really sent fuse: Do some refactoring in fuse_dev_do_write() fuse: Wake up req->waitq of only if not background fuse: Optimize request_end() by not taking fiq->waitq.lock fuse: Kill fasync only if interrupt is queued in queue_interrupt() fuse: Remove stale comment in end_requests() ...
This commit is contained in:
commit
dfee9c257b
@ -35,7 +35,9 @@ static ssize_t fuse_conn_abort_write(struct file *file, const char __user *buf,
|
||||
{
|
||||
struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
|
||||
if (fc) {
|
||||
fuse_abort_conn(fc, true);
|
||||
if (fc->abort_err)
|
||||
fc->aborted = true;
|
||||
fuse_abort_conn(fc);
|
||||
fuse_conn_put(fc);
|
||||
}
|
||||
return count;
|
||||
|
@ -141,10 +141,11 @@ static int cuse_open(struct inode *inode, struct file *file)
|
||||
|
||||
static int cuse_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
struct fuse_file *ff = file->private_data;
|
||||
struct fuse_conn *fc = ff->fc;
|
||||
|
||||
fuse_sync_release(ff, file->f_flags);
|
||||
fuse_sync_release(fi, ff, file->f_flags);
|
||||
fuse_conn_put(fc);
|
||||
|
||||
return 0;
|
||||
@ -407,7 +408,7 @@ err_unlock:
|
||||
err_region:
|
||||
unregister_chrdev_region(devt, 1);
|
||||
err:
|
||||
fuse_abort_conn(fc, false);
|
||||
fuse_abort_conn(fc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -586,7 +587,7 @@ static ssize_t cuse_class_abort_store(struct device *dev,
|
||||
{
|
||||
struct cuse_conn *cc = dev_get_drvdata(dev);
|
||||
|
||||
fuse_abort_conn(&cc->fc, false);
|
||||
fuse_abort_conn(&cc->fc);
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR(abort, 0200, NULL, cuse_class_abort_store);
|
||||
|
115
fs/fuse/dev.c
115
fs/fuse/dev.c
@ -251,17 +251,18 @@ static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
|
||||
struct file *file)
|
||||
{
|
||||
struct fuse_req *req = NULL;
|
||||
struct fuse_inode *fi = get_fuse_inode(file_inode(file));
|
||||
struct fuse_file *ff = file->private_data;
|
||||
|
||||
do {
|
||||
wait_event(fc->reserved_req_waitq, ff->reserved_req);
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
if (ff->reserved_req) {
|
||||
req = ff->reserved_req;
|
||||
ff->reserved_req = NULL;
|
||||
req->stolen_file = get_file(file);
|
||||
}
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
} while (!req);
|
||||
|
||||
return req;
|
||||
@ -273,16 +274,17 @@ static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
|
||||
static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
|
||||
{
|
||||
struct file *file = req->stolen_file;
|
||||
struct fuse_inode *fi = get_fuse_inode(file_inode(file));
|
||||
struct fuse_file *ff = file->private_data;
|
||||
|
||||
WARN_ON(req->max_pages);
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
memset(req, 0, sizeof(*req));
|
||||
fuse_request_init(req, NULL, NULL, 0);
|
||||
BUG_ON(ff->reserved_req);
|
||||
ff->reserved_req = req;
|
||||
wake_up_all(&fc->reserved_req_waitq);
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
fput(file);
|
||||
}
|
||||
|
||||
@ -431,10 +433,16 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
|
||||
|
||||
if (test_and_set_bit(FR_FINISHED, &req->flags))
|
||||
goto put_request;
|
||||
|
||||
spin_lock(&fiq->waitq.lock);
|
||||
list_del_init(&req->intr_entry);
|
||||
spin_unlock(&fiq->waitq.lock);
|
||||
/*
|
||||
* test_and_set_bit() implies smp_mb() between bit
|
||||
* changing and below intr_entry check. Pairs with
|
||||
* smp_mb() from queue_interrupt().
|
||||
*/
|
||||
if (!list_empty(&req->intr_entry)) {
|
||||
spin_lock(&fiq->waitq.lock);
|
||||
list_del_init(&req->intr_entry);
|
||||
spin_unlock(&fiq->waitq.lock);
|
||||
}
|
||||
WARN_ON(test_bit(FR_PENDING, &req->flags));
|
||||
WARN_ON(test_bit(FR_SENT, &req->flags));
|
||||
if (test_bit(FR_BACKGROUND, &req->flags)) {
|
||||
@ -462,27 +470,43 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
|
||||
fc->active_background--;
|
||||
flush_bg_queue(fc);
|
||||
spin_unlock(&fc->bg_lock);
|
||||
} else {
|
||||
/* Wake up waiter sleeping in request_wait_answer() */
|
||||
wake_up(&req->waitq);
|
||||
}
|
||||
wake_up(&req->waitq);
|
||||
|
||||
if (req->end)
|
||||
req->end(fc, req);
|
||||
put_request:
|
||||
fuse_put_request(fc, req);
|
||||
}
|
||||
|
||||
static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
|
||||
static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
|
||||
{
|
||||
spin_lock(&fiq->waitq.lock);
|
||||
if (test_bit(FR_FINISHED, &req->flags)) {
|
||||
/* Check for we've sent request to interrupt this req */
|
||||
if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
|
||||
spin_unlock(&fiq->waitq.lock);
|
||||
return;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (list_empty(&req->intr_entry)) {
|
||||
list_add_tail(&req->intr_entry, &fiq->interrupts);
|
||||
/*
|
||||
* Pairs with smp_mb() implied by test_and_set_bit()
|
||||
* from request_end().
|
||||
*/
|
||||
smp_mb();
|
||||
if (test_bit(FR_FINISHED, &req->flags)) {
|
||||
list_del_init(&req->intr_entry);
|
||||
spin_unlock(&fiq->waitq.lock);
|
||||
return 0;
|
||||
}
|
||||
wake_up_locked(&fiq->waitq);
|
||||
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
|
||||
}
|
||||
spin_unlock(&fiq->waitq.lock);
|
||||
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
|
||||
@ -1306,7 +1330,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
|
||||
goto err_unlock;
|
||||
|
||||
if (!fiq->connected) {
|
||||
err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV;
|
||||
err = fc->aborted ? -ECONNABORTED : -ENODEV;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
@ -1353,7 +1377,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
|
||||
spin_lock(&fpq->lock);
|
||||
clear_bit(FR_LOCKED, &req->flags);
|
||||
if (!fpq->connected) {
|
||||
err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV;
|
||||
err = fc->aborted ? -ECONNABORTED : -ENODEV;
|
||||
goto out_end;
|
||||
}
|
||||
if (err) {
|
||||
@ -1900,16 +1924,17 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
|
||||
struct fuse_req *req;
|
||||
struct fuse_out_header oh;
|
||||
|
||||
err = -EINVAL;
|
||||
if (nbytes < sizeof(struct fuse_out_header))
|
||||
return -EINVAL;
|
||||
goto out;
|
||||
|
||||
err = fuse_copy_one(cs, &oh, sizeof(oh));
|
||||
if (err)
|
||||
goto err_finish;
|
||||
goto copy_finish;
|
||||
|
||||
err = -EINVAL;
|
||||
if (oh.len != nbytes)
|
||||
goto err_finish;
|
||||
goto copy_finish;
|
||||
|
||||
/*
|
||||
* Zero oh.unique indicates unsolicited notification message
|
||||
@ -1917,41 +1942,40 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
|
||||
*/
|
||||
if (!oh.unique) {
|
||||
err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
|
||||
return err ? err : nbytes;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = -EINVAL;
|
||||
if (oh.error <= -1000 || oh.error > 0)
|
||||
goto err_finish;
|
||||
goto copy_finish;
|
||||
|
||||
spin_lock(&fpq->lock);
|
||||
err = -ENOENT;
|
||||
if (!fpq->connected)
|
||||
goto err_unlock_pq;
|
||||
req = NULL;
|
||||
if (fpq->connected)
|
||||
req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
|
||||
|
||||
req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
|
||||
if (!req)
|
||||
goto err_unlock_pq;
|
||||
err = -ENOENT;
|
||||
if (!req) {
|
||||
spin_unlock(&fpq->lock);
|
||||
goto copy_finish;
|
||||
}
|
||||
|
||||
/* Is it an interrupt reply ID? */
|
||||
if (oh.unique & FUSE_INT_REQ_BIT) {
|
||||
__fuse_get_request(req);
|
||||
spin_unlock(&fpq->lock);
|
||||
|
||||
err = -EINVAL;
|
||||
if (nbytes != sizeof(struct fuse_out_header)) {
|
||||
fuse_put_request(fc, req);
|
||||
goto err_finish;
|
||||
}
|
||||
|
||||
if (oh.error == -ENOSYS)
|
||||
err = 0;
|
||||
if (nbytes != sizeof(struct fuse_out_header))
|
||||
err = -EINVAL;
|
||||
else if (oh.error == -ENOSYS)
|
||||
fc->no_interrupt = 1;
|
||||
else if (oh.error == -EAGAIN)
|
||||
queue_interrupt(&fc->iq, req);
|
||||
err = queue_interrupt(&fc->iq, req);
|
||||
|
||||
fuse_put_request(fc, req);
|
||||
|
||||
fuse_copy_finish(cs);
|
||||
return nbytes;
|
||||
goto copy_finish;
|
||||
}
|
||||
|
||||
clear_bit(FR_SENT, &req->flags);
|
||||
@ -1977,14 +2001,12 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
|
||||
spin_unlock(&fpq->lock);
|
||||
|
||||
request_end(fc, req);
|
||||
|
||||
out:
|
||||
return err ? err : nbytes;
|
||||
|
||||
err_unlock_pq:
|
||||
spin_unlock(&fpq->lock);
|
||||
err_finish:
|
||||
copy_finish:
|
||||
fuse_copy_finish(cs);
|
||||
return err;
|
||||
goto out;
|
||||
}
|
||||
|
||||
static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
|
||||
@ -2109,11 +2131,7 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
|
||||
return mask;
|
||||
}
|
||||
|
||||
/*
|
||||
* Abort all requests on the given list (pending or processing)
|
||||
*
|
||||
* This function releases and reacquires fc->lock
|
||||
*/
|
||||
/* Abort all requests on the given list (pending or processing) */
|
||||
static void end_requests(struct fuse_conn *fc, struct list_head *head)
|
||||
{
|
||||
while (!list_empty(head)) {
|
||||
@ -2159,7 +2177,7 @@ static void end_polls(struct fuse_conn *fc)
|
||||
* is OK, the request will in that case be removed from the list before we touch
|
||||
* it.
|
||||
*/
|
||||
void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
|
||||
void fuse_abort_conn(struct fuse_conn *fc)
|
||||
{
|
||||
struct fuse_iqueue *fiq = &fc->iq;
|
||||
|
||||
@ -2175,7 +2193,6 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
|
||||
fc->connected = 0;
|
||||
spin_unlock(&fc->bg_lock);
|
||||
|
||||
fc->aborted = is_abort;
|
||||
fuse_set_initialized(fc);
|
||||
list_for_each_entry(fud, &fc->devices, entry) {
|
||||
struct fuse_pqueue *fpq = &fud->pq;
|
||||
@ -2253,7 +2270,7 @@ int fuse_dev_release(struct inode *inode, struct file *file)
|
||||
/* Are we the last open device? */
|
||||
if (atomic_dec_and_test(&fc->dev_count)) {
|
||||
WARN_ON(fc->iq.fasync != NULL);
|
||||
fuse_abort_conn(fc, false);
|
||||
fuse_abort_conn(fc);
|
||||
}
|
||||
fuse_dev_free(fud);
|
||||
}
|
||||
|
@ -149,21 +149,6 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
|
||||
args->out.args[0].value = outarg;
|
||||
}
|
||||
|
||||
u64 fuse_get_attr_version(struct fuse_conn *fc)
|
||||
{
|
||||
u64 curr_version;
|
||||
|
||||
/*
|
||||
* The spin lock isn't actually needed on 64bit archs, but we
|
||||
* don't yet care too much about such optimizations.
|
||||
*/
|
||||
spin_lock(&fc->lock);
|
||||
curr_version = fc->attr_version;
|
||||
spin_unlock(&fc->lock);
|
||||
|
||||
return curr_version;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether the dentry is still valid
|
||||
*
|
||||
@ -222,9 +207,9 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
|
||||
fuse_queue_forget(fc, forget, outarg.nodeid, 1);
|
||||
goto invalid;
|
||||
}
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
fi->nlookup++;
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
}
|
||||
kfree(forget);
|
||||
if (ret == -ENOMEM)
|
||||
@ -400,6 +385,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
|
||||
struct fuse_create_in inarg;
|
||||
struct fuse_open_out outopen;
|
||||
struct fuse_entry_out outentry;
|
||||
struct fuse_inode *fi;
|
||||
struct fuse_file *ff;
|
||||
|
||||
/* Userspace expects S_IFREG in create mode */
|
||||
@ -451,7 +437,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
|
||||
&outentry.attr, entry_attr_timeout(&outentry), 0);
|
||||
if (!inode) {
|
||||
flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
|
||||
fuse_sync_release(ff, flags);
|
||||
fuse_sync_release(NULL, ff, flags);
|
||||
fuse_queue_forget(fc, forget, outentry.nodeid, 1);
|
||||
err = -ENOMEM;
|
||||
goto out_err;
|
||||
@ -462,7 +448,8 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
|
||||
fuse_dir_changed(dir);
|
||||
err = finish_open(file, entry, generic_file_open);
|
||||
if (err) {
|
||||
fuse_sync_release(ff, flags);
|
||||
fi = get_fuse_inode(inode);
|
||||
fuse_sync_release(fi, ff, flags);
|
||||
} else {
|
||||
file->private_data = ff;
|
||||
fuse_finish_open(inode, file);
|
||||
@ -671,8 +658,8 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
|
||||
struct inode *inode = d_inode(entry);
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
fi->attr_version = ++fc->attr_version;
|
||||
spin_lock(&fi->lock);
|
||||
fi->attr_version = atomic64_inc_return(&fc->attr_version);
|
||||
/*
|
||||
* If i_nlink == 0 then unlink doesn't make sense, yet this can
|
||||
* happen if userspace filesystem is careless. It would be
|
||||
@ -681,7 +668,7 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
|
||||
*/
|
||||
if (inode->i_nlink > 0)
|
||||
drop_nlink(inode);
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
fuse_invalidate_attr(inode);
|
||||
fuse_dir_changed(dir);
|
||||
fuse_invalidate_entry_cache(entry);
|
||||
@ -825,10 +812,10 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
|
||||
if (!err) {
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
fi->attr_version = ++fc->attr_version;
|
||||
spin_lock(&fi->lock);
|
||||
fi->attr_version = atomic64_inc_return(&fc->attr_version);
|
||||
inc_nlink(inode);
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
fuse_invalidate_attr(inode);
|
||||
fuse_update_ctime(inode);
|
||||
} else if (err == -EINTR) {
|
||||
@ -1356,15 +1343,14 @@ static void iattr_to_fattr(struct fuse_conn *fc, struct iattr *iattr,
|
||||
*/
|
||||
void fuse_set_nowrite(struct inode *inode)
|
||||
{
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
BUG_ON(!inode_is_locked(inode));
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
BUG_ON(fi->writectr < 0);
|
||||
fi->writectr += FUSE_NOWRITE;
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
wait_event(fi->page_waitq, fi->writectr == FUSE_NOWRITE);
|
||||
}
|
||||
|
||||
@ -1385,11 +1371,11 @@ static void __fuse_release_nowrite(struct inode *inode)
|
||||
|
||||
void fuse_release_nowrite(struct inode *inode)
|
||||
{
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
__fuse_release_nowrite(inode);
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
}
|
||||
|
||||
static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
|
||||
@ -1524,7 +1510,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
|
||||
goto error;
|
||||
}
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
/* the kernel maintains i_mtime locally */
|
||||
if (trust_local_cmtime) {
|
||||
if (attr->ia_valid & ATTR_MTIME)
|
||||
@ -1542,10 +1528,10 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
|
||||
i_size_write(inode, outarg.attr.size);
|
||||
|
||||
if (is_truncate) {
|
||||
/* NOTE: this may release/reacquire fc->lock */
|
||||
/* NOTE: this may release/reacquire fi->lock */
|
||||
__fuse_release_nowrite(inode);
|
||||
}
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
/*
|
||||
* Only call invalidate_inode_pages2() after removing
|
||||
|
342
fs/fuse/file.c
342
fs/fuse/file.c
@ -19,8 +19,6 @@
|
||||
#include <linux/falloc.h>
|
||||
#include <linux/uio.h>
|
||||
|
||||
static const struct file_operations fuse_direct_io_file_operations;
|
||||
|
||||
static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
|
||||
int opcode, struct fuse_open_out *outargp)
|
||||
{
|
||||
@ -64,9 +62,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
|
||||
RB_CLEAR_NODE(&ff->polled_node);
|
||||
init_waitqueue_head(&ff->poll_wait);
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
ff->kh = ++fc->khctr;
|
||||
spin_unlock(&fc->lock);
|
||||
ff->kh = atomic64_inc_return(&fc->khctr);
|
||||
|
||||
return ff;
|
||||
}
|
||||
@ -94,7 +90,7 @@ static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
|
||||
if (refcount_dec_and_test(&ff->count)) {
|
||||
struct fuse_req *req = ff->reserved_req;
|
||||
|
||||
if (ff->fc->no_open && !isdir) {
|
||||
if (isdir ? ff->fc->no_opendir : ff->fc->no_open) {
|
||||
/*
|
||||
* Drop the release request when client does not
|
||||
* implement 'open'
|
||||
@ -128,8 +124,9 @@ int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
|
||||
return -ENOMEM;
|
||||
|
||||
ff->fh = 0;
|
||||
ff->open_flags = FOPEN_KEEP_CACHE; /* Default for no-open */
|
||||
if (!fc->no_open || isdir) {
|
||||
/* Default for no-open */
|
||||
ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0);
|
||||
if (isdir ? !fc->no_opendir : !fc->no_open) {
|
||||
struct fuse_open_out outarg;
|
||||
int err;
|
||||
|
||||
@ -138,11 +135,14 @@ int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
|
||||
ff->fh = outarg.fh;
|
||||
ff->open_flags = outarg.open_flags;
|
||||
|
||||
} else if (err != -ENOSYS || isdir) {
|
||||
} else if (err != -ENOSYS) {
|
||||
fuse_file_free(ff);
|
||||
return err;
|
||||
} else {
|
||||
fc->no_open = 1;
|
||||
if (isdir)
|
||||
fc->no_opendir = 1;
|
||||
else
|
||||
fc->no_open = 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -159,17 +159,16 @@ EXPORT_SYMBOL_GPL(fuse_do_open);
|
||||
static void fuse_link_write_file(struct file *file)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
struct fuse_file *ff = file->private_data;
|
||||
/*
|
||||
* file may be written through mmap, so chain it onto the
|
||||
* inodes's write_file list
|
||||
*/
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
if (list_empty(&ff->write_entry))
|
||||
list_add(&ff->write_entry, &fi->write_files);
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
}
|
||||
|
||||
void fuse_finish_open(struct inode *inode, struct file *file)
|
||||
@ -177,8 +176,6 @@ void fuse_finish_open(struct inode *inode, struct file *file)
|
||||
struct fuse_file *ff = file->private_data;
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
|
||||
if (ff->open_flags & FOPEN_DIRECT_IO)
|
||||
file->f_op = &fuse_direct_io_file_operations;
|
||||
if (!(ff->open_flags & FOPEN_KEEP_CACHE))
|
||||
invalidate_inode_pages2(inode->i_mapping);
|
||||
if (ff->open_flags & FOPEN_NONSEEKABLE)
|
||||
@ -186,10 +183,10 @@ void fuse_finish_open(struct inode *inode, struct file *file)
|
||||
if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
fi->attr_version = ++fc->attr_version;
|
||||
spin_lock(&fi->lock);
|
||||
fi->attr_version = atomic64_inc_return(&fc->attr_version);
|
||||
i_size_write(inode, 0);
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
fuse_invalidate_attr(inode);
|
||||
if (fc->writeback_cache)
|
||||
file_update_time(file);
|
||||
@ -224,14 +221,20 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
|
||||
static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
|
||||
int flags, int opcode)
|
||||
{
|
||||
struct fuse_conn *fc = ff->fc;
|
||||
struct fuse_req *req = ff->reserved_req;
|
||||
struct fuse_release_in *inarg = &req->misc.release.in;
|
||||
|
||||
/* Inode is NULL on error path of fuse_create_open() */
|
||||
if (likely(fi)) {
|
||||
spin_lock(&fi->lock);
|
||||
list_del(&ff->write_entry);
|
||||
spin_unlock(&fi->lock);
|
||||
}
|
||||
spin_lock(&fc->lock);
|
||||
list_del(&ff->write_entry);
|
||||
if (!RB_EMPTY_NODE(&ff->polled_node))
|
||||
rb_erase(&ff->polled_node, &fc->polled_files);
|
||||
spin_unlock(&fc->lock);
|
||||
@ -249,11 +252,12 @@ static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
|
||||
|
||||
void fuse_release_common(struct file *file, bool isdir)
|
||||
{
|
||||
struct fuse_inode *fi = get_fuse_inode(file_inode(file));
|
||||
struct fuse_file *ff = file->private_data;
|
||||
struct fuse_req *req = ff->reserved_req;
|
||||
int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
|
||||
|
||||
fuse_prepare_release(ff, file->f_flags, opcode);
|
||||
fuse_prepare_release(fi, ff, file->f_flags, opcode);
|
||||
|
||||
if (ff->flock) {
|
||||
struct fuse_release_in *inarg = &req->misc.release.in;
|
||||
@ -295,10 +299,10 @@ static int fuse_release(struct inode *inode, struct file *file)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void fuse_sync_release(struct fuse_file *ff, int flags)
|
||||
void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, int flags)
|
||||
{
|
||||
WARN_ON(refcount_read(&ff->count) > 1);
|
||||
fuse_prepare_release(ff, flags, FUSE_RELEASE);
|
||||
fuse_prepare_release(fi, ff, flags, FUSE_RELEASE);
|
||||
/*
|
||||
* iput(NULL) is a no-op and since the refcount is 1 and everything's
|
||||
* synchronous, we are fine with not doing igrab() here"
|
||||
@ -329,6 +333,24 @@ u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
|
||||
return (u64) v0 + ((u64) v1 << 32);
|
||||
}
|
||||
|
||||
static struct fuse_req *fuse_find_writeback(struct fuse_inode *fi,
|
||||
pgoff_t idx_from, pgoff_t idx_to)
|
||||
{
|
||||
struct fuse_req *req;
|
||||
|
||||
list_for_each_entry(req, &fi->writepages, writepages_entry) {
|
||||
pgoff_t curr_index;
|
||||
|
||||
WARN_ON(get_fuse_inode(req->inode) != fi);
|
||||
curr_index = req->misc.write.in.offset >> PAGE_SHIFT;
|
||||
if (idx_from < curr_index + req->num_pages &&
|
||||
curr_index <= idx_to) {
|
||||
return req;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if any page in a range is under writeback
|
||||
*
|
||||
@ -338,24 +360,12 @@ u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
|
||||
static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
|
||||
pgoff_t idx_to)
|
||||
{
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
struct fuse_req *req;
|
||||
bool found = false;
|
||||
bool found;
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
list_for_each_entry(req, &fi->writepages, writepages_entry) {
|
||||
pgoff_t curr_index;
|
||||
|
||||
BUG_ON(req->inode != inode);
|
||||
curr_index = req->misc.write.in.offset >> PAGE_SHIFT;
|
||||
if (idx_from < curr_index + req->num_pages &&
|
||||
curr_index <= idx_to) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
found = fuse_find_writeback(fi, idx_from, idx_to);
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
return found;
|
||||
}
|
||||
@ -598,9 +608,9 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
fi->attr_version = ++fc->attr_version;
|
||||
spin_unlock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
fi->attr_version = atomic64_inc_return(&fc->attr_version);
|
||||
spin_unlock(&fi->lock);
|
||||
}
|
||||
|
||||
io->iocb->ki_complete(io->iocb, res, 0);
|
||||
@ -675,13 +685,13 @@ static void fuse_read_update_size(struct inode *inode, loff_t size,
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
if (attr_ver == fi->attr_version && size < inode->i_size &&
|
||||
!test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
|
||||
fi->attr_version = ++fc->attr_version;
|
||||
fi->attr_version = atomic64_inc_return(&fc->attr_version);
|
||||
i_size_write(inode, size);
|
||||
}
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
}
|
||||
|
||||
static void fuse_short_read(struct fuse_req *req, struct inode *inode,
|
||||
@ -919,7 +929,7 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
{
|
||||
struct inode *inode = iocb->ki_filp->f_mapping->host;
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
@ -996,13 +1006,13 @@ bool fuse_write_update_size(struct inode *inode, loff_t pos)
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
bool ret = false;
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
fi->attr_version = ++fc->attr_version;
|
||||
spin_lock(&fi->lock);
|
||||
fi->attr_version = atomic64_inc_return(&fc->attr_version);
|
||||
if (pos > inode->i_size) {
|
||||
i_size_write(inode, pos);
|
||||
ret = true;
|
||||
}
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1125,9 +1135,6 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
|
||||
int err = 0;
|
||||
ssize_t res = 0;
|
||||
|
||||
if (is_bad_inode(inode))
|
||||
return -EIO;
|
||||
|
||||
if (inode->i_size < pos + iov_iter_count(ii))
|
||||
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
|
||||
|
||||
@ -1173,7 +1180,7 @@ static ssize_t fuse_perform_write(struct kiocb *iocb,
|
||||
return res > 0 ? res : err;
|
||||
}
|
||||
|
||||
static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
@ -1416,9 +1423,6 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
|
||||
ssize_t res;
|
||||
struct inode *inode = file_inode(io->iocb->ki_filp);
|
||||
|
||||
if (is_bad_inode(inode))
|
||||
return -EIO;
|
||||
|
||||
res = fuse_direct_io(io, iter, ppos, 0);
|
||||
|
||||
fuse_invalidate_atime(inode);
|
||||
@ -1426,10 +1430,21 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
|
||||
return res;
|
||||
}
|
||||
|
||||
static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
|
||||
|
||||
static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
{
|
||||
struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
|
||||
return __fuse_direct_read(&io, to, &iocb->ki_pos);
|
||||
ssize_t res;
|
||||
|
||||
if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
|
||||
res = fuse_direct_IO(iocb, to);
|
||||
} else {
|
||||
struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
|
||||
|
||||
res = __fuse_direct_read(&io, to, &iocb->ki_pos);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
@ -1438,14 +1453,17 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
|
||||
ssize_t res;
|
||||
|
||||
if (is_bad_inode(inode))
|
||||
return -EIO;
|
||||
|
||||
/* Don't allow parallel writes to the same file */
|
||||
inode_lock(inode);
|
||||
res = generic_write_checks(iocb, from);
|
||||
if (res > 0)
|
||||
res = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE);
|
||||
if (res > 0) {
|
||||
if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
|
||||
res = fuse_direct_IO(iocb, from);
|
||||
} else {
|
||||
res = fuse_direct_io(&io, from, &iocb->ki_pos,
|
||||
FUSE_DIO_WRITE);
|
||||
}
|
||||
}
|
||||
fuse_invalidate_attr(inode);
|
||||
if (res > 0)
|
||||
fuse_write_update_size(inode, iocb->ki_pos);
|
||||
@ -1454,6 +1472,34 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
return res;
|
||||
}
|
||||
|
||||
static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct fuse_file *ff = file->private_data;
|
||||
|
||||
if (is_bad_inode(file_inode(file)))
|
||||
return -EIO;
|
||||
|
||||
if (!(ff->open_flags & FOPEN_DIRECT_IO))
|
||||
return fuse_cache_read_iter(iocb, to);
|
||||
else
|
||||
return fuse_direct_read_iter(iocb, to);
|
||||
}
|
||||
|
||||
static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct fuse_file *ff = file->private_data;
|
||||
|
||||
if (is_bad_inode(file_inode(file)))
|
||||
return -EIO;
|
||||
|
||||
if (!(ff->open_flags & FOPEN_DIRECT_IO))
|
||||
return fuse_cache_write_iter(iocb, from);
|
||||
else
|
||||
return fuse_direct_write_iter(iocb, from);
|
||||
}
|
||||
|
||||
static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
|
||||
{
|
||||
int i;
|
||||
@ -1481,20 +1527,18 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
|
||||
wake_up(&fi->page_waitq);
|
||||
}
|
||||
|
||||
/* Called under fc->lock, may release and reacquire it */
|
||||
/* Called under fi->lock, may release and reacquire it */
|
||||
static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req,
|
||||
loff_t size)
|
||||
__releases(fc->lock)
|
||||
__acquires(fc->lock)
|
||||
__releases(fi->lock)
|
||||
__acquires(fi->lock)
|
||||
{
|
||||
struct fuse_req *aux, *next;
|
||||
struct fuse_inode *fi = get_fuse_inode(req->inode);
|
||||
struct fuse_write_in *inarg = &req->misc.write.in;
|
||||
__u64 data_size = req->num_pages * PAGE_SIZE;
|
||||
bool queued;
|
||||
|
||||
if (!fc->connected)
|
||||
goto out_free;
|
||||
|
||||
if (inarg->offset + data_size <= size) {
|
||||
inarg->size = data_size;
|
||||
} else if (inarg->offset < size) {
|
||||
@ -1505,28 +1549,40 @@ __acquires(fc->lock)
|
||||
}
|
||||
|
||||
req->in.args[1].size = inarg->size;
|
||||
fi->writectr++;
|
||||
queued = fuse_request_queue_background(fc, req);
|
||||
WARN_ON(!queued);
|
||||
/* Fails on broken connection only */
|
||||
if (unlikely(!queued))
|
||||
goto out_free;
|
||||
|
||||
fi->writectr++;
|
||||
return;
|
||||
|
||||
out_free:
|
||||
fuse_writepage_finish(fc, req);
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
/* After fuse_writepage_finish() aux request list is private */
|
||||
for (aux = req->misc.write.next; aux; aux = next) {
|
||||
next = aux->misc.write.next;
|
||||
aux->misc.write.next = NULL;
|
||||
fuse_writepage_free(fc, aux);
|
||||
fuse_put_request(fc, aux);
|
||||
}
|
||||
|
||||
fuse_writepage_free(fc, req);
|
||||
fuse_put_request(fc, req);
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* If fi->writectr is positive (no truncate or fsync going on) send
|
||||
* all queued writepage requests.
|
||||
*
|
||||
* Called with fc->lock
|
||||
* Called with fi->lock
|
||||
*/
|
||||
void fuse_flush_writepages(struct inode *inode)
|
||||
__releases(fc->lock)
|
||||
__acquires(fc->lock)
|
||||
__releases(fi->lock)
|
||||
__acquires(fi->lock)
|
||||
{
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
@ -1546,7 +1602,7 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
mapping_set_error(inode->i_mapping, req->out.h.error);
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
while (req->misc.write.next) {
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct fuse_write_in *inarg = &req->misc.write.in;
|
||||
@ -1583,7 +1639,7 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
|
||||
}
|
||||
fi->writectr--;
|
||||
fuse_writepage_finish(fc, req);
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
fuse_writepage_free(fc, req);
|
||||
}
|
||||
|
||||
@ -1592,13 +1648,13 @@ static struct fuse_file *__fuse_write_file_get(struct fuse_conn *fc,
|
||||
{
|
||||
struct fuse_file *ff = NULL;
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
if (!list_empty(&fi->write_files)) {
|
||||
ff = list_entry(fi->write_files.next, struct fuse_file,
|
||||
write_entry);
|
||||
fuse_file_get(ff);
|
||||
}
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
return ff;
|
||||
}
|
||||
@ -1669,11 +1725,11 @@ static int fuse_writepage_locked(struct page *page)
|
||||
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
|
||||
inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
list_add(&req->writepages_entry, &fi->writepages);
|
||||
list_add_tail(&req->list, &fi->queued_writes);
|
||||
fuse_flush_writepages(inode);
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
end_page_writeback(page);
|
||||
|
||||
@ -1722,21 +1778,27 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data)
|
||||
{
|
||||
struct fuse_req *req = data->req;
|
||||
struct inode *inode = data->inode;
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
int num_pages = req->num_pages;
|
||||
int i;
|
||||
|
||||
req->ff = fuse_file_get(data->ff);
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
list_add_tail(&req->list, &fi->queued_writes);
|
||||
fuse_flush_writepages(inode);
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
for (i = 0; i < num_pages; i++)
|
||||
end_page_writeback(data->orig_pages[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* First recheck under fi->lock if the offending offset is still under
|
||||
* writeback. If yes, then iterate auxiliary write requests, to see if there's
|
||||
* one already added for a page at this offset. If there's none, then insert
|
||||
* this new request onto the auxiliary list, otherwise reuse the existing one by
|
||||
* copying the new page contents over to the old temporary page.
|
||||
*/
|
||||
static bool fuse_writepage_in_flight(struct fuse_req *new_req,
|
||||
struct page *page)
|
||||
{
|
||||
@ -1744,57 +1806,50 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
|
||||
struct fuse_inode *fi = get_fuse_inode(new_req->inode);
|
||||
struct fuse_req *tmp;
|
||||
struct fuse_req *old_req;
|
||||
bool found = false;
|
||||
pgoff_t curr_index;
|
||||
|
||||
BUG_ON(new_req->num_pages != 0);
|
||||
WARN_ON(new_req->num_pages != 0);
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
list_del(&new_req->writepages_entry);
|
||||
list_for_each_entry(old_req, &fi->writepages, writepages_entry) {
|
||||
BUG_ON(old_req->inode != new_req->inode);
|
||||
curr_index = old_req->misc.write.in.offset >> PAGE_SHIFT;
|
||||
if (curr_index <= page->index &&
|
||||
page->index < curr_index + old_req->num_pages) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
old_req = fuse_find_writeback(fi, page->index, page->index);
|
||||
if (!old_req) {
|
||||
list_add(&new_req->writepages_entry, &fi->writepages);
|
||||
goto out_unlock;
|
||||
spin_unlock(&fi->lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
new_req->num_pages = 1;
|
||||
for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) {
|
||||
BUG_ON(tmp->inode != new_req->inode);
|
||||
for (tmp = old_req->misc.write.next; tmp; tmp = tmp->misc.write.next) {
|
||||
pgoff_t curr_index;
|
||||
|
||||
WARN_ON(tmp->inode != new_req->inode);
|
||||
curr_index = tmp->misc.write.in.offset >> PAGE_SHIFT;
|
||||
if (tmp->num_pages == 1 &&
|
||||
curr_index == page->index) {
|
||||
old_req = tmp;
|
||||
if (curr_index == page->index) {
|
||||
WARN_ON(tmp->num_pages != 1);
|
||||
WARN_ON(!test_bit(FR_PENDING, &tmp->flags));
|
||||
swap(tmp->pages[0], new_req->pages[0]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (old_req->num_pages == 1 && test_bit(FR_PENDING, &old_req->flags)) {
|
||||
struct backing_dev_info *bdi = inode_to_bdi(page->mapping->host);
|
||||
if (!tmp) {
|
||||
new_req->misc.write.next = old_req->misc.write.next;
|
||||
old_req->misc.write.next = new_req;
|
||||
}
|
||||
|
||||
copy_highpage(old_req->pages[0], page);
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
if (tmp) {
|
||||
struct backing_dev_info *bdi = inode_to_bdi(new_req->inode);
|
||||
|
||||
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
|
||||
dec_node_page_state(new_req->pages[0], NR_WRITEBACK_TEMP);
|
||||
wb_writeout_inc(&bdi->wb);
|
||||
fuse_writepage_free(fc, new_req);
|
||||
fuse_request_free(new_req);
|
||||
goto out;
|
||||
} else {
|
||||
new_req->misc.write.next = old_req->misc.write.next;
|
||||
old_req->misc.write.next = new_req;
|
||||
}
|
||||
out_unlock:
|
||||
spin_unlock(&fc->lock);
|
||||
out:
|
||||
return found;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int fuse_writepages_fill(struct page *page,
|
||||
@ -1803,6 +1858,7 @@ static int fuse_writepages_fill(struct page *page,
|
||||
struct fuse_fill_wb_data *data = _data;
|
||||
struct fuse_req *req = data->req;
|
||||
struct inode *inode = data->inode;
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct page *tmp_page;
|
||||
bool is_writeback;
|
||||
@ -1873,9 +1929,9 @@ static int fuse_writepages_fill(struct page *page,
|
||||
req->end = fuse_writepage_end;
|
||||
req->inode = inode;
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
list_add(&req->writepages_entry, &fi->writepages);
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
data->req = req;
|
||||
}
|
||||
@ -1898,12 +1954,12 @@ static int fuse_writepages_fill(struct page *page,
|
||||
data->orig_pages[req->num_pages] = page;
|
||||
|
||||
/*
|
||||
* Protected by fc->lock against concurrent access by
|
||||
* Protected by fi->lock against concurrent access by
|
||||
* fuse_page_is_writeback().
|
||||
*/
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
req->num_pages++;
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
out_unlock:
|
||||
unlock_page(page);
|
||||
@ -2087,6 +2143,18 @@ static const struct vm_operations_struct fuse_file_vm_ops = {
|
||||
|
||||
static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct fuse_file *ff = file->private_data;
|
||||
|
||||
if (ff->open_flags & FOPEN_DIRECT_IO) {
|
||||
/* Can't provide the coherency needed for MAP_SHARED */
|
||||
if (vma->vm_flags & VM_MAYSHARE)
|
||||
return -ENODEV;
|
||||
|
||||
invalidate_inode_pages2(file->f_mapping);
|
||||
|
||||
return generic_file_mmap(file, vma);
|
||||
}
|
||||
|
||||
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
|
||||
fuse_link_write_file(file);
|
||||
|
||||
@ -2095,17 +2163,6 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
/* Can't provide the coherency needed for MAP_SHARED */
|
||||
if (vma->vm_flags & VM_MAYSHARE)
|
||||
return -ENODEV;
|
||||
|
||||
invalidate_inode_pages2(file->f_mapping);
|
||||
|
||||
return generic_file_mmap(file, vma);
|
||||
}
|
||||
|
||||
static int convert_fuse_file_lock(struct fuse_conn *fc,
|
||||
const struct fuse_file_lock *ffl,
|
||||
struct file_lock *fl)
|
||||
@ -3114,6 +3171,7 @@ static const struct file_operations fuse_file_operations = {
|
||||
.lock = fuse_file_lock,
|
||||
.flock = fuse_file_flock,
|
||||
.splice_read = generic_file_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
.unlocked_ioctl = fuse_file_ioctl,
|
||||
.compat_ioctl = fuse_file_compat_ioctl,
|
||||
.poll = fuse_file_poll,
|
||||
@ -3121,24 +3179,6 @@ static const struct file_operations fuse_file_operations = {
|
||||
.copy_file_range = fuse_copy_file_range,
|
||||
};
|
||||
|
||||
static const struct file_operations fuse_direct_io_file_operations = {
|
||||
.llseek = fuse_file_llseek,
|
||||
.read_iter = fuse_direct_read_iter,
|
||||
.write_iter = fuse_direct_write_iter,
|
||||
.mmap = fuse_direct_mmap,
|
||||
.open = fuse_open,
|
||||
.flush = fuse_flush,
|
||||
.release = fuse_release,
|
||||
.fsync = fuse_fsync,
|
||||
.lock = fuse_file_lock,
|
||||
.flock = fuse_file_flock,
|
||||
.unlocked_ioctl = fuse_file_ioctl,
|
||||
.compat_ioctl = fuse_file_compat_ioctl,
|
||||
.poll = fuse_file_poll,
|
||||
.fallocate = fuse_file_fallocate,
|
||||
/* no splice_read */
|
||||
};
|
||||
|
||||
static const struct address_space_operations fuse_file_aops = {
|
||||
.readpage = fuse_readpage,
|
||||
.writepage = fuse_writepage,
|
||||
|
@ -96,7 +96,7 @@ struct fuse_inode {
|
||||
union {
|
||||
/* Write related fields (regular file only) */
|
||||
struct {
|
||||
/* Files usable in writepage. Protected by fc->lock */
|
||||
/* Files usable in writepage. Protected by fi->lock */
|
||||
struct list_head write_files;
|
||||
|
||||
/* Writepages pending on truncate or fsync */
|
||||
@ -144,6 +144,9 @@ struct fuse_inode {
|
||||
|
||||
/** Lock for serializing lookup and readdir for back compatibility*/
|
||||
struct mutex mutex;
|
||||
|
||||
/** Lock to protect write related fields */
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
/** FUSE inode state bits */
|
||||
@ -163,7 +166,10 @@ struct fuse_file {
|
||||
/** Fuse connection for this file */
|
||||
struct fuse_conn *fc;
|
||||
|
||||
/** Request reserved for flush and release */
|
||||
/*
|
||||
* Request reserved for flush and release.
|
||||
* Modified under relative fuse_inode::lock.
|
||||
*/
|
||||
struct fuse_req *reserved_req;
|
||||
|
||||
/** Kernel file handle guaranteed to be unique */
|
||||
@ -538,7 +544,7 @@ struct fuse_conn {
|
||||
struct fuse_iqueue iq;
|
||||
|
||||
/** The next unique kernel file handle */
|
||||
u64 khctr;
|
||||
atomic64_t khctr;
|
||||
|
||||
/** rbtree of fuse_files waiting for poll events indexed by ph */
|
||||
struct rb_root polled_files;
|
||||
@ -624,6 +630,9 @@ struct fuse_conn {
|
||||
/** Is open/release not implemented by fs? */
|
||||
unsigned no_open:1;
|
||||
|
||||
/** Is opendir/releasedir not implemented by fs? */
|
||||
unsigned no_opendir:1;
|
||||
|
||||
/** Is fsync not implemented by fs? */
|
||||
unsigned no_fsync:1;
|
||||
|
||||
@ -730,7 +739,7 @@ struct fuse_conn {
|
||||
struct fuse_req *destroy_req;
|
||||
|
||||
/** Version counter for attribute changes */
|
||||
u64 attr_version;
|
||||
atomic64_t attr_version;
|
||||
|
||||
/** Called on final put */
|
||||
void (*release)(struct fuse_conn *);
|
||||
@ -770,6 +779,11 @@ static inline int invalid_nodeid(u64 nodeid)
|
||||
return !nodeid || nodeid == FUSE_ROOT_ID;
|
||||
}
|
||||
|
||||
static inline u64 fuse_get_attr_version(struct fuse_conn *fc)
|
||||
{
|
||||
return atomic64_read(&fc->attr_version);
|
||||
}
|
||||
|
||||
/** Device operations */
|
||||
extern const struct file_operations fuse_dev_operations;
|
||||
|
||||
@ -817,7 +831,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc);
|
||||
void fuse_file_free(struct fuse_file *ff);
|
||||
void fuse_finish_open(struct inode *inode, struct file *file);
|
||||
|
||||
void fuse_sync_release(struct fuse_file *ff, int flags);
|
||||
void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, int flags);
|
||||
|
||||
/**
|
||||
* Send RELEASE or RELEASEDIR request
|
||||
@ -936,7 +950,7 @@ void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req);
|
||||
bool fuse_request_queue_background(struct fuse_conn *fc, struct fuse_req *req);
|
||||
|
||||
/* Abort all requests */
|
||||
void fuse_abort_conn(struct fuse_conn *fc, bool is_abort);
|
||||
void fuse_abort_conn(struct fuse_conn *fc);
|
||||
void fuse_wait_aborted(struct fuse_conn *fc);
|
||||
|
||||
/**
|
||||
@ -1000,8 +1014,6 @@ void fuse_flush_writepages(struct inode *inode);
|
||||
void fuse_set_nowrite(struct inode *inode);
|
||||
void fuse_release_nowrite(struct inode *inode);
|
||||
|
||||
u64 fuse_get_attr_version(struct fuse_conn *fc);
|
||||
|
||||
/**
|
||||
* File-system tells the kernel to invalidate cache for the given node id.
|
||||
*/
|
||||
|
@ -97,6 +97,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
|
||||
fi->orig_ino = 0;
|
||||
fi->state = 0;
|
||||
mutex_init(&fi->mutex);
|
||||
spin_lock_init(&fi->lock);
|
||||
fi->forget = fuse_alloc_forget();
|
||||
if (!fi->forget) {
|
||||
kmem_cache_free(fuse_inode_cachep, inode);
|
||||
@ -163,7 +164,9 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
|
||||
fi->attr_version = ++fc->attr_version;
|
||||
lockdep_assert_held(&fi->lock);
|
||||
|
||||
fi->attr_version = atomic64_inc_return(&fc->attr_version);
|
||||
fi->i_time = attr_valid;
|
||||
WRITE_ONCE(fi->inval_mask, 0);
|
||||
|
||||
@ -209,10 +212,10 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
|
||||
loff_t oldsize;
|
||||
struct timespec64 old_mtime;
|
||||
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
if ((attr_version != 0 && fi->attr_version > attr_version) ||
|
||||
test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -227,7 +230,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
|
||||
*/
|
||||
if (!is_wb || !S_ISREG(inode->i_mode))
|
||||
i_size_write(inode, attr->size);
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
if (!is_wb && S_ISREG(inode->i_mode)) {
|
||||
bool inval = false;
|
||||
@ -322,9 +325,9 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
|
||||
}
|
||||
|
||||
fi = get_fuse_inode(inode);
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
fi->nlookup++;
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
fuse_change_attributes(inode, attr, attr_valid, attr_version);
|
||||
|
||||
return inode;
|
||||
@ -376,7 +379,7 @@ void fuse_unlock_inode(struct inode *inode, bool locked)
|
||||
|
||||
static void fuse_umount_begin(struct super_block *sb)
|
||||
{
|
||||
fuse_abort_conn(get_fuse_conn_super(sb), false);
|
||||
fuse_abort_conn(get_fuse_conn_super(sb));
|
||||
}
|
||||
|
||||
static void fuse_send_destroy(struct fuse_conn *fc)
|
||||
@ -619,12 +622,12 @@ void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns)
|
||||
atomic_set(&fc->num_waiting, 0);
|
||||
fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND;
|
||||
fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD;
|
||||
fc->khctr = 0;
|
||||
atomic64_set(&fc->khctr, 0);
|
||||
fc->polled_files = RB_ROOT;
|
||||
fc->blocked = 0;
|
||||
fc->initialized = 0;
|
||||
fc->connected = 1;
|
||||
fc->attr_version = 1;
|
||||
atomic64_set(&fc->attr_version, 1);
|
||||
get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
|
||||
fc->pid_ns = get_pid_ns(task_active_pid_ns(current));
|
||||
fc->user_ns = get_user_ns(user_ns);
|
||||
@ -969,7 +972,8 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
|
||||
FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
|
||||
FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
|
||||
FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL |
|
||||
FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS;
|
||||
FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS |
|
||||
FUSE_NO_OPENDIR_SUPPORT;
|
||||
req->in.h.opcode = FUSE_INIT;
|
||||
req->in.numargs = 1;
|
||||
req->in.args[0].size = sizeof(*arg);
|
||||
@ -1242,7 +1246,7 @@ static void fuse_sb_destroy(struct super_block *sb)
|
||||
if (fc) {
|
||||
fuse_send_destroy(fc);
|
||||
|
||||
fuse_abort_conn(fc, false);
|
||||
fuse_abort_conn(fc);
|
||||
fuse_wait_aborted(fc);
|
||||
|
||||
down_write(&fc->killsb);
|
||||
|
@ -213,9 +213,9 @@ retry:
|
||||
}
|
||||
|
||||
fi = get_fuse_inode(inode);
|
||||
spin_lock(&fc->lock);
|
||||
spin_lock(&fi->lock);
|
||||
fi->nlookup++;
|
||||
spin_unlock(&fc->lock);
|
||||
spin_unlock(&fi->lock);
|
||||
|
||||
forget_all_cached_acls(inode);
|
||||
fuse_change_attributes(inode, &o->attr,
|
||||
|
@ -122,6 +122,9 @@
|
||||
* - add FOPEN_CACHE_DIR
|
||||
* - add FUSE_MAX_PAGES, add max_pages to init_out
|
||||
* - add FUSE_CACHE_SYMLINKS
|
||||
*
|
||||
* 7.29
|
||||
* - add FUSE_NO_OPENDIR_SUPPORT flag
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_FUSE_H
|
||||
@ -157,7 +160,7 @@
|
||||
#define FUSE_KERNEL_VERSION 7
|
||||
|
||||
/** Minor version number of this interface */
|
||||
#define FUSE_KERNEL_MINOR_VERSION 28
|
||||
#define FUSE_KERNEL_MINOR_VERSION 29
|
||||
|
||||
/** The node ID of the root inode */
|
||||
#define FUSE_ROOT_ID 1
|
||||
@ -259,6 +262,7 @@ struct fuse_file_lock {
|
||||
* FUSE_ABORT_ERROR: reading the device after abort returns ECONNABORTED
|
||||
* FUSE_MAX_PAGES: init_out.max_pages contains the max number of req pages
|
||||
* FUSE_CACHE_SYMLINKS: cache READLINK responses
|
||||
* FUSE_NO_OPENDIR_SUPPORT: kernel supports zero-message opendir
|
||||
*/
|
||||
#define FUSE_ASYNC_READ (1 << 0)
|
||||
#define FUSE_POSIX_LOCKS (1 << 1)
|
||||
@ -284,6 +288,7 @@ struct fuse_file_lock {
|
||||
#define FUSE_ABORT_ERROR (1 << 21)
|
||||
#define FUSE_MAX_PAGES (1 << 22)
|
||||
#define FUSE_CACHE_SYMLINKS (1 << 23)
|
||||
#define FUSE_NO_OPENDIR_SUPPORT (1 << 24)
|
||||
|
||||
/**
|
||||
* CUSE INIT request/reply flags
|
||||
|
Loading…
Reference in New Issue
Block a user