forked from Minki/linux
A folio locking fixup that Xiubo and David cooperated on, marked for
stable. Most of it is in netfs but I picked it up into ceph tree on agreement with David. -----BEGIN PGP SIGNATURE----- iQFHBAABCAAxFiEEydHwtzie9C7TfviiSn/eOAIR84sFAmLRle4THGlkcnlvbW92 QGdtYWlsLmNvbQAKCRBKf944AhHziwNrB/wLIT7pDkZl2h1LclJS1WfgzgPkaOVq sN8RO+QH3zIx5av/b3BH/R9Ilp2M4QjWr7f5y3emVZPxV9KQ2lrUj30XKecfIO4+ nGU3YunO+rfaUTyySJb06VFfhLpOjxjWGFEjgAO+exiWz4zl2h8dOXqYBTE/cStT +721WZKYR25UK7c7kp/LgRC9QhjqH1MDm7wvPOAg6CR7mw2OiwjYD7o8Ou+zvGfp 6GimxbWouJNT+/xW2T3wIJsmQuwZbw4L4tsLSfhKTk57ooKtR1cdm0h/N7LM1bQa fijU36LdGJGqKKF+kVJV73sNuPIZGY+KVS+ApiuOJ/LMDXxoeuiYtewT =P3hf -----END PGP SIGNATURE----- Merge tag 'ceph-for-5.19-rc7' of https://github.com/ceph/ceph-client Pull ceph fix from Ilya Dryomov: "A folio locking fixup that Xiubo and David cooperated on, marked for stable. Most of it is in netfs but I picked it up into ceph tree on agreement with David" * tag 'ceph-for-5.19-rc7' of https://github.com/ceph/ceph-client: netfs: do not unlock and put the folio twice
This commit is contained in:
commit
1ce9d792e8
@ -301,7 +301,7 @@ through which it can issue requests and negotiate::
|
||||
void (*issue_read)(struct netfs_io_subrequest *subreq);
|
||||
bool (*is_still_valid)(struct netfs_io_request *rreq);
|
||||
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
|
||||
struct folio *folio, void **_fsdata);
|
||||
struct folio **foliop, void **_fsdata);
|
||||
void (*done)(struct netfs_io_request *rreq);
|
||||
};
|
||||
|
||||
@ -381,8 +381,10 @@ The operations are as follows:
|
||||
allocated/grabbed the folio to be modified to allow the filesystem to flush
|
||||
conflicting state before allowing it to be modified.
|
||||
|
||||
It should return 0 if everything is now fine, -EAGAIN if the folio should be
|
||||
regrabbed and any other error code to abort the operation.
|
||||
It may unlock and discard the folio it was given and set the caller's folio
|
||||
pointer to NULL. It should return 0 if everything is now fine (``*foliop``
|
||||
left set) or the op should be retried (``*foliop`` cleared) and any other
|
||||
error code to abort the operation.
|
||||
|
||||
* ``done``
|
||||
|
||||
|
@ -375,7 +375,7 @@ static int afs_begin_cache_operation(struct netfs_io_request *rreq)
|
||||
}
|
||||
|
||||
static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len,
|
||||
struct folio *folio, void **_fsdata)
|
||||
struct folio **foliop, void **_fsdata)
|
||||
{
|
||||
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
|
||||
|
||||
|
@ -63,7 +63,7 @@
|
||||
(CONGESTION_ON_THRESH(congestion_kb) >> 2))
|
||||
|
||||
static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
|
||||
struct folio *folio, void **_fsdata);
|
||||
struct folio **foliop, void **_fsdata);
|
||||
|
||||
static inline struct ceph_snap_context *page_snap_context(struct page *page)
|
||||
{
|
||||
@ -1288,18 +1288,19 @@ ceph_find_incompatible(struct page *page)
|
||||
}
|
||||
|
||||
static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
|
||||
struct folio *folio, void **_fsdata)
|
||||
struct folio **foliop, void **_fsdata)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
struct ceph_snap_context *snapc;
|
||||
|
||||
snapc = ceph_find_incompatible(folio_page(folio, 0));
|
||||
snapc = ceph_find_incompatible(folio_page(*foliop, 0));
|
||||
if (snapc) {
|
||||
int r;
|
||||
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
folio_unlock(*foliop);
|
||||
folio_put(*foliop);
|
||||
*foliop = NULL;
|
||||
if (IS_ERR(snapc))
|
||||
return PTR_ERR(snapc);
|
||||
|
||||
|
@ -319,8 +319,9 @@ zero_out:
|
||||
* conflicting writes once the folio is grabbed and locked. It is passed a
|
||||
* pointer to the fsdata cookie that gets returned to the VM to be passed to
|
||||
* write_end. It is permitted to sleep. It should return 0 if the request
|
||||
* should go ahead; unlock the folio and return -EAGAIN to cause the folio to
|
||||
* be regot; or return an error.
|
||||
* should go ahead or it may return an error. It may also unlock and put the
|
||||
* folio, provided it sets ``*foliop`` to NULL, in which case a return of 0
|
||||
* will cause the folio to be re-got and the process to be retried.
|
||||
*
|
||||
* The calling netfs must initialise a netfs context contiguous to the vfs
|
||||
* inode before calling this.
|
||||
@ -348,13 +349,13 @@ retry:
|
||||
|
||||
if (ctx->ops->check_write_begin) {
|
||||
/* Allow the netfs (eg. ceph) to flush conflicts. */
|
||||
ret = ctx->ops->check_write_begin(file, pos, len, folio, _fsdata);
|
||||
ret = ctx->ops->check_write_begin(file, pos, len, &folio, _fsdata);
|
||||
if (ret < 0) {
|
||||
trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
|
||||
if (ret == -EAGAIN)
|
||||
goto retry;
|
||||
goto error;
|
||||
}
|
||||
if (!folio)
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (folio_test_uptodate(folio))
|
||||
@ -416,8 +417,10 @@ have_folio_no_wait:
|
||||
error_put:
|
||||
netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
|
||||
error:
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
if (folio) {
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
}
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -214,7 +214,7 @@ struct netfs_request_ops {
|
||||
void (*issue_read)(struct netfs_io_subrequest *subreq);
|
||||
bool (*is_still_valid)(struct netfs_io_request *rreq);
|
||||
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
|
||||
struct folio *folio, void **_fsdata);
|
||||
struct folio **foliop, void **_fsdata);
|
||||
void (*done)(struct netfs_io_request *rreq);
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user