2013-08-21 21:29:54 +00:00
|
|
|
/*
|
|
|
|
* Ceph cache definitions.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2013 by Adfin Solutions, Inc. All Rights Reserved.
|
|
|
|
* Written by Milosz Tanski (milosz@adfin.com)
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2
|
|
|
|
* as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to:
|
|
|
|
* Free Software Foundation
|
|
|
|
* 51 Franklin Street, Fifth Floor
|
|
|
|
* Boston, MA 02111-1301 USA
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "super.h"
|
|
|
|
#include "cache.h"
|
|
|
|
|
|
|
|
struct ceph_aux_inode {
|
2016-05-20 08:57:29 +00:00
|
|
|
u64 version;
|
2013-08-21 21:29:54 +00:00
|
|
|
struct timespec mtime;
|
|
|
|
loff_t size;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct fscache_netfs ceph_cache_netfs = {
|
|
|
|
.name = "ceph",
|
|
|
|
.version = 0,
|
|
|
|
};
|
|
|
|
|
2017-06-27 03:57:56 +00:00
|
|
|
static DEFINE_MUTEX(ceph_fscache_lock);
|
|
|
|
static LIST_HEAD(ceph_fscache_list);
|
|
|
|
|
|
|
|
struct ceph_fscache_entry {
|
|
|
|
struct list_head list;
|
|
|
|
struct fscache_cookie *fscache;
|
|
|
|
struct ceph_fsid fsid;
|
|
|
|
size_t uniq_len;
|
|
|
|
char uniquifier[0];
|
|
|
|
};
|
|
|
|
|
2013-08-21 21:29:54 +00:00
|
|
|
static uint16_t ceph_fscache_session_get_key(const void *cookie_netfs_data,
|
|
|
|
void *buffer, uint16_t maxbuf)
|
|
|
|
{
|
|
|
|
const struct ceph_fs_client* fsc = cookie_netfs_data;
|
2017-06-27 03:57:56 +00:00
|
|
|
const char *fscache_uniq = fsc->mount_options->fscache_uniq;
|
|
|
|
uint16_t fsid_len, uniq_len;
|
2013-08-21 21:29:54 +00:00
|
|
|
|
2017-06-27 03:57:56 +00:00
|
|
|
fsid_len = sizeof(fsc->client->fsid);
|
|
|
|
uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0;
|
|
|
|
if (fsid_len + uniq_len > maxbuf)
|
2013-08-21 21:29:54 +00:00
|
|
|
return 0;
|
|
|
|
|
2017-06-27 03:57:56 +00:00
|
|
|
memcpy(buffer, &fsc->client->fsid, fsid_len);
|
|
|
|
if (uniq_len)
|
|
|
|
memcpy(buffer + fsid_len, fscache_uniq, uniq_len);
|
|
|
|
|
|
|
|
return fsid_len + uniq_len;
|
2013-08-21 21:29:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct fscache_cookie_def ceph_fscache_fsid_object_def = {
|
|
|
|
.name = "CEPH.fsid",
|
|
|
|
.type = FSCACHE_COOKIE_TYPE_INDEX,
|
|
|
|
.get_key = ceph_fscache_session_get_key,
|
|
|
|
};
|
|
|
|
|
2013-09-06 15:13:18 +00:00
|
|
|
int ceph_fscache_register(void)
|
2013-08-21 21:29:54 +00:00
|
|
|
{
|
|
|
|
return fscache_register_netfs(&ceph_cache_netfs);
|
|
|
|
}
|
|
|
|
|
2013-09-06 15:13:18 +00:00
|
|
|
void ceph_fscache_unregister(void)
|
2013-08-21 21:29:54 +00:00
|
|
|
{
|
|
|
|
fscache_unregister_netfs(&ceph_cache_netfs);
|
|
|
|
}
|
|
|
|
|
|
|
|
int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
|
|
|
|
{
|
2017-06-27 03:57:56 +00:00
|
|
|
const struct ceph_fsid *fsid = &fsc->client->fsid;
|
|
|
|
const char *fscache_uniq = fsc->mount_options->fscache_uniq;
|
|
|
|
size_t uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0;
|
|
|
|
struct ceph_fscache_entry *ent;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
mutex_lock(&ceph_fscache_lock);
|
|
|
|
list_for_each_entry(ent, &ceph_fscache_list, list) {
|
|
|
|
if (memcmp(&ent->fsid, fsid, sizeof(*fsid)))
|
|
|
|
continue;
|
|
|
|
if (ent->uniq_len != uniq_len)
|
|
|
|
continue;
|
|
|
|
if (uniq_len && memcmp(ent->uniquifier, fscache_uniq, uniq_len))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pr_err("fscache cookie already registered for fsid %pU\n", fsid);
|
|
|
|
pr_err(" use fsc=%%s mount option to specify a uniquifier\n");
|
|
|
|
err = -EBUSY;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
ent = kzalloc(sizeof(*ent) + uniq_len, GFP_KERNEL);
|
|
|
|
if (!ent) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2013-08-21 21:29:54 +00:00
|
|
|
fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
|
|
|
|
&ceph_fscache_fsid_object_def,
|
FS-Cache: Provide the ability to enable/disable cookies
Provide the ability to enable and disable fscache cookies. A disabled cookie
will reject or ignore further requests to:
Acquire a child cookie
Invalidate and update backing objects
Check the consistency of a backing object
Allocate storage for backing page
Read backing pages
Write to backing pages
but still allows:
Checks/waits on the completion of already in-progress objects
Uncaching of pages
Relinquishment of cookies
Two new operations are provided:
(1) Disable a cookie:
void fscache_disable_cookie(struct fscache_cookie *cookie,
bool invalidate);
If the cookie is not already disabled, this locks the cookie against other
dis/enablement ops, marks the cookie as being disabled, discards or
invalidates any backing objects and waits for cessation of activity on any
associated object.
This is a wrapper around a chunk split out of fscache_relinquish_cookie(),
but it reinitialises the cookie such that it can be reenabled.
All possible failures are handled internally. The caller should consider
calling fscache_uncache_all_inode_pages() afterwards to make sure all page
markings are cleared up.
(2) Enable a cookie:
void fscache_enable_cookie(struct fscache_cookie *cookie,
bool (*can_enable)(void *data),
void *data)
If the cookie is not already enabled, this locks the cookie against other
dis/enablement ops, invokes can_enable() and, if the cookie is not an
index cookie, will begin the procedure of acquiring backing objects.
The optional can_enable() function is passed the data argument and returns
a ruling as to whether or not enablement should actually be permitted to
begin.
All possible failures are handled internally. The cookie will only be
marked as enabled if provisional backing objects are allocated.
A later patch will introduce these to NFS. Cookie enablement during nfs_open()
is then contingent on i_writecount <= 0. can_enable() checks for a race
between open(O_RDONLY) and open(O_WRONLY/O_RDWR). This simplifies NFS's cookie
handling and allows us to get rid of open(O_RDONLY) accidentally introducing
caching to an inode that's open for writing already.
One operation has its API modified:
(3) Acquire a cookie.
struct fscache_cookie *fscache_acquire_cookie(
struct fscache_cookie *parent,
const struct fscache_cookie_def *def,
void *netfs_data,
bool enable);
This now has an additional argument that indicates whether the requested
cookie should be enabled by default. It doesn't need the can_enable()
function because the caller must prevent multiple calls for the same netfs
object and it doesn't need to take the enablement lock because no one else
can get at the cookie before this returns.
Signed-off-by: David Howells <dhowells@redhat.com
2013-09-20 23:09:31 +00:00
|
|
|
fsc, true);
|
2013-08-21 21:29:54 +00:00
|
|
|
|
2017-06-27 03:57:56 +00:00
|
|
|
if (fsc->fscache) {
|
|
|
|
memcpy(&ent->fsid, fsid, sizeof(*fsid));
|
|
|
|
if (uniq_len > 0) {
|
|
|
|
memcpy(&ent->uniquifier, fscache_uniq, uniq_len);
|
|
|
|
ent->uniq_len = uniq_len;
|
|
|
|
}
|
|
|
|
ent->fscache = fsc->fscache;
|
|
|
|
list_add_tail(&ent->list, &ceph_fscache_list);
|
|
|
|
} else {
|
|
|
|
kfree(ent);
|
|
|
|
pr_err("unable to register fscache cookie for fsid %pU\n",
|
|
|
|
fsid);
|
|
|
|
/* all other fs ignore this error */
|
|
|
|
}
|
|
|
|
out_unlock:
|
|
|
|
mutex_unlock(&ceph_fscache_lock);
|
|
|
|
return err;
|
2013-08-21 21:29:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static uint16_t ceph_fscache_inode_get_key(const void *cookie_netfs_data,
|
|
|
|
void *buffer, uint16_t maxbuf)
|
|
|
|
{
|
|
|
|
const struct ceph_inode_info* ci = cookie_netfs_data;
|
|
|
|
uint16_t klen;
|
|
|
|
|
2015-09-30 03:41:05 +00:00
|
|
|
/* use ceph virtual inode (id + snapshot) */
|
2013-08-21 21:29:54 +00:00
|
|
|
klen = sizeof(ci->i_vino);
|
|
|
|
if (klen > maxbuf)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
memcpy(buffer, &ci->i_vino, klen);
|
|
|
|
return klen;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint16_t ceph_fscache_inode_get_aux(const void *cookie_netfs_data,
|
|
|
|
void *buffer, uint16_t bufmax)
|
|
|
|
{
|
|
|
|
struct ceph_aux_inode aux;
|
|
|
|
const struct ceph_inode_info* ci = cookie_netfs_data;
|
|
|
|
const struct inode* inode = &ci->vfs_inode;
|
|
|
|
|
|
|
|
memset(&aux, 0, sizeof(aux));
|
2016-05-20 08:57:29 +00:00
|
|
|
aux.version = ci->i_version;
|
2013-08-21 21:29:54 +00:00
|
|
|
aux.mtime = inode->i_mtime;
|
2015-12-30 03:32:46 +00:00
|
|
|
aux.size = i_size_read(inode);
|
2013-08-21 21:29:54 +00:00
|
|
|
|
|
|
|
memcpy(buffer, &aux, sizeof(aux));
|
|
|
|
|
|
|
|
return sizeof(aux);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ceph_fscache_inode_get_attr(const void *cookie_netfs_data,
|
|
|
|
uint64_t *size)
|
|
|
|
{
|
|
|
|
const struct ceph_inode_info* ci = cookie_netfs_data;
|
2015-12-30 03:32:46 +00:00
|
|
|
*size = i_size_read(&ci->vfs_inode);
|
2013-08-21 21:29:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static enum fscache_checkaux ceph_fscache_inode_check_aux(
|
|
|
|
void *cookie_netfs_data, const void *data, uint16_t dlen)
|
|
|
|
{
|
|
|
|
struct ceph_aux_inode aux;
|
|
|
|
struct ceph_inode_info* ci = cookie_netfs_data;
|
|
|
|
struct inode* inode = &ci->vfs_inode;
|
|
|
|
|
|
|
|
if (dlen != sizeof(aux))
|
|
|
|
return FSCACHE_CHECKAUX_OBSOLETE;
|
|
|
|
|
|
|
|
memset(&aux, 0, sizeof(aux));
|
2016-05-20 08:57:29 +00:00
|
|
|
aux.version = ci->i_version;
|
2013-08-21 21:29:54 +00:00
|
|
|
aux.mtime = inode->i_mtime;
|
2015-12-30 03:32:46 +00:00
|
|
|
aux.size = i_size_read(inode);
|
2013-08-21 21:29:54 +00:00
|
|
|
|
|
|
|
if (memcmp(data, &aux, sizeof(aux)) != 0)
|
|
|
|
return FSCACHE_CHECKAUX_OBSOLETE;
|
|
|
|
|
|
|
|
dout("ceph inode 0x%p cached okay", ci);
|
|
|
|
return FSCACHE_CHECKAUX_OKAY;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
|
|
|
|
.name = "CEPH.inode",
|
|
|
|
.type = FSCACHE_COOKIE_TYPE_DATAFILE,
|
|
|
|
.get_key = ceph_fscache_inode_get_key,
|
|
|
|
.get_attr = ceph_fscache_inode_get_attr,
|
|
|
|
.get_aux = ceph_fscache_inode_get_aux,
|
|
|
|
.check_aux = ceph_fscache_inode_check_aux,
|
|
|
|
};
|
|
|
|
|
2016-05-18 07:25:03 +00:00
|
|
|
void ceph_fscache_register_inode_cookie(struct inode *inode)
|
2013-08-21 21:29:54 +00:00
|
|
|
{
|
2016-05-18 07:25:03 +00:00
|
|
|
struct ceph_inode_info *ci = ceph_inode(inode);
|
|
|
|
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
|
2013-08-21 21:29:54 +00:00
|
|
|
|
|
|
|
/* No caching for filesystem */
|
2017-08-20 18:22:02 +00:00
|
|
|
if (!fsc->fscache)
|
2013-08-21 21:29:54 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* Only cache for regular files that are read only */
|
2016-05-18 07:25:03 +00:00
|
|
|
if (!S_ISREG(inode->i_mode))
|
2013-08-21 21:29:54 +00:00
|
|
|
return;
|
|
|
|
|
2016-05-18 07:25:03 +00:00
|
|
|
inode_lock_nested(inode, I_MUTEX_CHILD);
|
|
|
|
if (!ci->fscache) {
|
|
|
|
ci->fscache = fscache_acquire_cookie(fsc->fscache,
|
|
|
|
&ceph_fscache_inode_object_def,
|
|
|
|
ci, false);
|
|
|
|
}
|
2016-01-22 20:40:57 +00:00
|
|
|
inode_unlock(inode);
|
2013-08-21 21:29:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
|
|
|
|
{
|
|
|
|
struct fscache_cookie* cookie;
|
|
|
|
|
|
|
|
if ((cookie = ci->fscache) == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ci->fscache = NULL;
|
|
|
|
|
|
|
|
fscache_uncache_all_inode_pages(cookie, &ci->vfs_inode);
|
|
|
|
fscache_relinquish_cookie(cookie, 0);
|
|
|
|
}
|
|
|
|
|
2016-05-18 07:25:03 +00:00
|
|
|
static bool ceph_fscache_can_enable(void *data)
|
|
|
|
{
|
|
|
|
struct inode *inode = data;
|
|
|
|
return !inode_is_open_for_write(inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
struct ceph_inode_info *ci = ceph_inode(inode);
|
|
|
|
|
|
|
|
if (!fscache_cookie_valid(ci->fscache))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (inode_is_open_for_write(inode)) {
|
|
|
|
dout("fscache_file_set_cookie %p %p disabling cache\n",
|
|
|
|
inode, filp);
|
|
|
|
fscache_disable_cookie(ci->fscache, false);
|
|
|
|
fscache_uncache_all_inode_pages(ci->fscache, inode);
|
|
|
|
} else {
|
|
|
|
fscache_enable_cookie(ci->fscache, ceph_fscache_can_enable,
|
|
|
|
inode);
|
|
|
|
if (fscache_cookie_enabled(ci->fscache)) {
|
2016-12-29 20:19:32 +00:00
|
|
|
dout("fscache_file_set_cookie %p %p enabling cache\n",
|
2016-05-18 07:25:03 +00:00
|
|
|
inode, filp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-04 03:22:31 +00:00
|
|
|
static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
|
2013-08-21 21:29:54 +00:00
|
|
|
{
|
|
|
|
if (!error)
|
|
|
|
SetPageUptodate(page);
|
|
|
|
|
|
|
|
unlock_page(page);
|
|
|
|
}
|
|
|
|
|
2016-03-25 09:18:39 +00:00
|
|
|
static inline bool cache_valid(struct ceph_inode_info *ci)
|
2013-08-21 21:29:54 +00:00
|
|
|
{
|
2016-05-18 12:31:55 +00:00
|
|
|
return ci->i_fscache_gen == ci->i_rdcache_gen;
|
2013-08-21 21:29:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Atempt to read from the fscache,
|
|
|
|
*
|
|
|
|
* This function is called from the readpage_nounlock context. DO NOT attempt to
|
|
|
|
* unlock the page here (or in the callback).
|
|
|
|
*/
|
|
|
|
int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
|
|
|
|
{
|
|
|
|
struct ceph_inode_info *ci = ceph_inode(inode);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!cache_valid(ci))
|
|
|
|
return -ENOBUFS;
|
|
|
|
|
|
|
|
ret = fscache_read_or_alloc_page(ci->fscache, page,
|
2017-08-04 03:22:31 +00:00
|
|
|
ceph_readpage_from_fscache_complete, NULL,
|
2013-08-21 21:29:54 +00:00
|
|
|
GFP_KERNEL);
|
|
|
|
|
|
|
|
switch (ret) {
|
|
|
|
case 0: /* Page found */
|
|
|
|
dout("page read submitted\n");
|
|
|
|
return 0;
|
|
|
|
case -ENOBUFS: /* Pages were not found, and can't be */
|
|
|
|
case -ENODATA: /* Pages were not found */
|
|
|
|
dout("page/inode not in cache\n");
|
|
|
|
return ret;
|
|
|
|
default:
|
|
|
|
dout("%s: unknown error ret = %i\n", __func__, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int ceph_readpages_from_fscache(struct inode *inode,
|
|
|
|
struct address_space *mapping,
|
|
|
|
struct list_head *pages,
|
|
|
|
unsigned *nr_pages)
|
|
|
|
{
|
|
|
|
struct ceph_inode_info *ci = ceph_inode(inode);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!cache_valid(ci))
|
|
|
|
return -ENOBUFS;
|
|
|
|
|
|
|
|
ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
|
2017-08-04 03:22:31 +00:00
|
|
|
ceph_readpage_from_fscache_complete,
|
2013-08-21 21:29:54 +00:00
|
|
|
NULL, mapping_gfp_mask(mapping));
|
|
|
|
|
|
|
|
switch (ret) {
|
|
|
|
case 0: /* All pages found */
|
|
|
|
dout("all-page read submitted\n");
|
|
|
|
return 0;
|
|
|
|
case -ENOBUFS: /* Some pages were not found, and can't be */
|
|
|
|
case -ENODATA: /* some pages were not found */
|
|
|
|
dout("page/inode not in cache\n");
|
|
|
|
return ret;
|
|
|
|
default:
|
|
|
|
dout("%s: unknown error ret = %i\n", __func__, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ceph_readpage_to_fscache(struct inode *inode, struct page *page)
|
|
|
|
{
|
|
|
|
struct ceph_inode_info *ci = ceph_inode(inode);
|
|
|
|
int ret;
|
|
|
|
|
2013-09-03 23:11:01 +00:00
|
|
|
if (!PageFsCache(page))
|
|
|
|
return;
|
|
|
|
|
2013-08-21 21:29:54 +00:00
|
|
|
if (!cache_valid(ci))
|
|
|
|
return;
|
|
|
|
|
|
|
|
ret = fscache_write_page(ci->fscache, page, GFP_KERNEL);
|
|
|
|
if (ret)
|
|
|
|
fscache_uncache_page(ci->fscache, page);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
|
|
|
|
{
|
|
|
|
struct ceph_inode_info *ci = ceph_inode(inode);
|
|
|
|
|
2013-09-25 15:18:14 +00:00
|
|
|
if (!PageFsCache(page))
|
|
|
|
return;
|
|
|
|
|
2013-08-21 21:29:54 +00:00
|
|
|
fscache_wait_on_page_write(ci->fscache, page);
|
|
|
|
fscache_uncache_page(ci->fscache, page);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
|
|
|
|
{
|
2017-06-27 03:57:56 +00:00
|
|
|
if (fscache_cookie_valid(fsc->fscache)) {
|
|
|
|
struct ceph_fscache_entry *ent;
|
|
|
|
bool found = false;
|
|
|
|
|
|
|
|
mutex_lock(&ceph_fscache_lock);
|
|
|
|
list_for_each_entry(ent, &ceph_fscache_list, list) {
|
|
|
|
if (ent->fscache == fsc->fscache) {
|
|
|
|
list_del(&ent->list);
|
|
|
|
kfree(ent);
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
WARN_ON_ONCE(!found);
|
|
|
|
mutex_unlock(&ceph_fscache_lock);
|
|
|
|
|
|
|
|
__fscache_relinquish_cookie(fsc->fscache, 0);
|
|
|
|
}
|
2013-08-21 21:29:54 +00:00
|
|
|
fsc->fscache = NULL;
|
|
|
|
}
|
|
|
|
|
2016-05-18 12:31:55 +00:00
|
|
|
/*
|
|
|
|
* caller should hold CEPH_CAP_FILE_{RD,CACHE}
|
|
|
|
*/
|
|
|
|
void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
|
2013-08-21 21:29:54 +00:00
|
|
|
{
|
2016-05-18 12:31:55 +00:00
|
|
|
if (cache_valid(ci))
|
2013-09-05 18:29:03 +00:00
|
|
|
return;
|
|
|
|
|
2016-05-18 12:31:55 +00:00
|
|
|
/* resue i_truncate_mutex. There should be no pending
|
|
|
|
* truncate while the caller holds CEPH_CAP_FILE_RD */
|
|
|
|
mutex_lock(&ci->i_truncate_mutex);
|
|
|
|
if (!cache_valid(ci)) {
|
|
|
|
if (fscache_check_consistency(ci->fscache))
|
|
|
|
fscache_invalidate(ci->fscache);
|
|
|
|
spin_lock(&ci->i_ceph_lock);
|
|
|
|
ci->i_fscache_gen = ci->i_rdcache_gen;
|
|
|
|
spin_unlock(&ci->i_ceph_lock);
|
2013-08-21 21:29:54 +00:00
|
|
|
}
|
2016-05-18 12:31:55 +00:00
|
|
|
mutex_unlock(&ci->i_truncate_mutex);
|
2013-08-21 21:29:54 +00:00
|
|
|
}
|