2010-04-06 22:14:15 +00:00
|
|
|
#include <linux/ceph/ceph_debug.h>
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2010-09-23 02:57:10 +00:00
|
|
|
#include <linux/fs.h>
|
2009-10-06 18:31:09 +00:00
|
|
|
#include <linux/wait.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2014-03-29 05:41:15 +00:00
|
|
|
#include <linux/gfp.h>
|
2009-10-06 18:31:09 +00:00
|
|
|
#include <linux/sched.h>
|
2010-04-06 22:14:15 +00:00
|
|
|
#include <linux/debugfs.h>
|
|
|
|
#include <linux/seq_file.h>
|
2014-09-09 18:26:01 +00:00
|
|
|
#include <linux/utsname.h>
|
2015-05-22 08:38:02 +00:00
|
|
|
#include <linux/ratelimit.h>
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
#include "super.h"
|
2010-04-06 22:14:15 +00:00
|
|
|
#include "mds_client.h"
|
|
|
|
|
2012-07-30 23:23:22 +00:00
|
|
|
#include <linux/ceph/ceph_features.h>
|
2010-04-06 22:14:15 +00:00
|
|
|
#include <linux/ceph/messenger.h>
|
|
|
|
#include <linux/ceph/decode.h>
|
|
|
|
#include <linux/ceph/pagelist.h>
|
|
|
|
#include <linux/ceph/auth.h>
|
|
|
|
#include <linux/ceph/debugfs.h>
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* A cluster of MDS (metadata server) daemons is responsible for
|
|
|
|
* managing the file system namespace (the directory hierarchy and
|
|
|
|
* inodes) and for coordinating shared access to storage. Metadata is
|
|
|
|
* partitioning hierarchically across a number of servers, and that
|
|
|
|
* partition varies over time as the cluster adjusts the distribution
|
|
|
|
* in order to balance load.
|
|
|
|
*
|
|
|
|
* The MDS client is primarily responsible to managing synchronous
|
|
|
|
* metadata requests for operations like open, unlink, and so forth.
|
|
|
|
* If there is a MDS failure, we find out about it when we (possibly
|
|
|
|
* request and) receive a new MDS map, and can resubmit affected
|
|
|
|
* requests.
|
|
|
|
*
|
|
|
|
* For the most part, though, we take advantage of a lossless
|
|
|
|
* communications channel to the MDS, and do not need to worry about
|
|
|
|
* timing out or resubmitting requests.
|
|
|
|
*
|
|
|
|
* We maintain a stateful "session" with each MDS we interact with.
|
|
|
|
* Within each session, we sent periodic heartbeat messages to ensure
|
|
|
|
* any capabilities or leases we have been issues remain valid. If
|
|
|
|
* the session times out and goes stale, our leases and capabilities
|
|
|
|
* are no longer valid.
|
|
|
|
*/
|
|
|
|
|
2010-05-12 22:21:32 +00:00
|
|
|
struct ceph_reconnect_state {
|
2013-09-22 02:28:10 +00:00
|
|
|
int nr_caps;
|
2010-05-12 22:21:32 +00:00
|
|
|
struct ceph_pagelist *pagelist;
|
2016-07-04 14:05:18 +00:00
|
|
|
unsigned msg_version;
|
2010-05-12 22:21:32 +00:00
|
|
|
};
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
static void __wake_requests(struct ceph_mds_client *mdsc,
|
|
|
|
struct list_head *head);
|
|
|
|
|
2010-05-20 08:40:19 +00:00
|
|
|
static const struct ceph_connection_operations mds_con_ops;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* mds reply parsing
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* parse individual inode info
|
|
|
|
*/
|
|
|
|
static int parse_reply_info_in(void **p, void *end,
|
2010-12-15 01:37:52 +00:00
|
|
|
struct ceph_mds_reply_info_in *info,
|
2013-12-24 19:19:23 +00:00
|
|
|
u64 features)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
|
|
|
int err = -EIO;
|
|
|
|
|
|
|
|
info->in = *p;
|
|
|
|
*p += sizeof(struct ceph_mds_reply_inode) +
|
|
|
|
sizeof(*info->in->fragtree.splits) *
|
|
|
|
le32_to_cpu(info->in->fragtree.nsplits);
|
|
|
|
|
|
|
|
ceph_decode_32_safe(p, end, info->symlink_len, bad);
|
|
|
|
ceph_decode_need(p, end, info->symlink_len, bad);
|
|
|
|
info->symlink = *p;
|
|
|
|
*p += info->symlink_len;
|
|
|
|
|
2010-12-15 01:37:52 +00:00
|
|
|
if (features & CEPH_FEATURE_DIRLAYOUTHASH)
|
|
|
|
ceph_decode_copy_safe(p, end, &info->dir_layout,
|
|
|
|
sizeof(info->dir_layout), bad);
|
|
|
|
else
|
|
|
|
memset(&info->dir_layout, 0, sizeof(info->dir_layout));
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
ceph_decode_32_safe(p, end, info->xattr_len, bad);
|
|
|
|
ceph_decode_need(p, end, info->xattr_len, bad);
|
|
|
|
info->xattr_data = *p;
|
|
|
|
*p += info->xattr_len;
|
2014-11-14 13:29:55 +00:00
|
|
|
|
|
|
|
if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
|
|
|
|
ceph_decode_64_safe(p, end, info->inline_version, bad);
|
|
|
|
ceph_decode_32_safe(p, end, info->inline_len, bad);
|
|
|
|
ceph_decode_need(p, end, info->inline_len, bad);
|
|
|
|
info->inline_data = *p;
|
|
|
|
*p += info->inline_len;
|
|
|
|
} else
|
|
|
|
info->inline_version = CEPH_INLINE_NONE;
|
|
|
|
|
2016-03-07 01:35:06 +00:00
|
|
|
info->pool_ns_len = 0;
|
|
|
|
info->pool_ns_data = NULL;
|
2016-02-14 10:06:41 +00:00
|
|
|
if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
|
|
|
|
ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
|
2016-03-07 01:35:06 +00:00
|
|
|
if (info->pool_ns_len > 0) {
|
|
|
|
ceph_decode_need(p, end, info->pool_ns_len, bad);
|
|
|
|
info->pool_ns_data = *p;
|
|
|
|
*p += info->pool_ns_len;
|
|
|
|
}
|
2016-02-14 10:06:41 +00:00
|
|
|
}
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
return 0;
|
|
|
|
bad:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* parse a normal reply, which may contain a (dir+)dentry and/or a
|
|
|
|
* target inode.
|
|
|
|
*/
|
|
|
|
static int parse_reply_info_trace(void **p, void *end,
|
2010-12-15 01:37:52 +00:00
|
|
|
struct ceph_mds_reply_info_parsed *info,
|
2013-12-24 19:19:23 +00:00
|
|
|
u64 features)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (info->head->is_dentry) {
|
2010-12-15 01:37:52 +00:00
|
|
|
err = parse_reply_info_in(p, end, &info->diri, features);
|
2009-10-06 18:31:09 +00:00
|
|
|
if (err < 0)
|
|
|
|
goto out_bad;
|
|
|
|
|
|
|
|
if (unlikely(*p + sizeof(*info->dirfrag) > end))
|
|
|
|
goto bad;
|
|
|
|
info->dirfrag = *p;
|
|
|
|
*p += sizeof(*info->dirfrag) +
|
|
|
|
sizeof(u32)*le32_to_cpu(info->dirfrag->ndist);
|
|
|
|
if (unlikely(*p > end))
|
|
|
|
goto bad;
|
|
|
|
|
|
|
|
ceph_decode_32_safe(p, end, info->dname_len, bad);
|
|
|
|
ceph_decode_need(p, end, info->dname_len, bad);
|
|
|
|
info->dname = *p;
|
|
|
|
*p += info->dname_len;
|
|
|
|
info->dlease = *p;
|
|
|
|
*p += sizeof(*info->dlease);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info->head->is_target) {
|
2010-12-15 01:37:52 +00:00
|
|
|
err = parse_reply_info_in(p, end, &info->targeti, features);
|
2009-10-06 18:31:09 +00:00
|
|
|
if (err < 0)
|
|
|
|
goto out_bad;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(*p != end))
|
|
|
|
goto bad;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
bad:
|
|
|
|
err = -EIO;
|
|
|
|
out_bad:
|
|
|
|
pr_err("problem parsing mds trace %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* parse readdir results
|
|
|
|
*/
|
|
|
|
static int parse_reply_info_dir(void **p, void *end,
|
2010-12-15 01:37:52 +00:00
|
|
|
struct ceph_mds_reply_info_parsed *info,
|
2013-12-24 19:19:23 +00:00
|
|
|
u64 features)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
|
|
|
u32 num, i = 0;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
info->dir_dir = *p;
|
|
|
|
if (*p + sizeof(*info->dir_dir) > end)
|
|
|
|
goto bad;
|
|
|
|
*p += sizeof(*info->dir_dir) +
|
|
|
|
sizeof(u32)*le32_to_cpu(info->dir_dir->ndist);
|
|
|
|
if (*p > end)
|
|
|
|
goto bad;
|
|
|
|
|
|
|
|
ceph_decode_need(p, end, sizeof(num) + 2, bad);
|
2009-10-14 16:59:09 +00:00
|
|
|
num = ceph_decode_32(p);
|
2016-04-27 09:48:30 +00:00
|
|
|
{
|
|
|
|
u16 flags = ceph_decode_16(p);
|
|
|
|
info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
|
|
|
|
info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
|
2016-04-29 03:27:30 +00:00
|
|
|
info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
|
2017-04-05 16:54:05 +00:00
|
|
|
info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH);
|
2016-04-27 09:48:30 +00:00
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
if (num == 0)
|
|
|
|
goto done;
|
|
|
|
|
2016-04-28 01:37:39 +00:00
|
|
|
BUG_ON(!info->dir_entries);
|
|
|
|
if ((unsigned long)(info->dir_entries + num) >
|
|
|
|
(unsigned long)info->dir_entries + info->dir_buf_size) {
|
2014-03-29 05:41:15 +00:00
|
|
|
pr_err("dir contents are larger than expected\n");
|
|
|
|
WARN_ON(1);
|
|
|
|
goto bad;
|
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2014-03-29 05:41:15 +00:00
|
|
|
info->dir_nr = num;
|
2009-10-06 18:31:09 +00:00
|
|
|
while (num) {
|
2016-04-28 01:37:39 +00:00
|
|
|
struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
|
2009-10-06 18:31:09 +00:00
|
|
|
/* dentry */
|
|
|
|
ceph_decode_need(p, end, sizeof(u32)*2, bad);
|
2016-04-28 01:37:39 +00:00
|
|
|
rde->name_len = ceph_decode_32(p);
|
|
|
|
ceph_decode_need(p, end, rde->name_len, bad);
|
|
|
|
rde->name = *p;
|
|
|
|
*p += rde->name_len;
|
|
|
|
dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
|
|
|
|
rde->lease = *p;
|
2009-10-06 18:31:09 +00:00
|
|
|
*p += sizeof(struct ceph_mds_reply_lease);
|
|
|
|
|
|
|
|
/* inode */
|
2016-04-28 01:37:39 +00:00
|
|
|
err = parse_reply_info_in(p, end, &rde->inode, features);
|
2009-10-06 18:31:09 +00:00
|
|
|
if (err < 0)
|
|
|
|
goto out_bad;
|
2016-04-28 07:17:40 +00:00
|
|
|
/* ceph_readdir_prepopulate() will update it */
|
|
|
|
rde->offset = 0;
|
2009-10-06 18:31:09 +00:00
|
|
|
i++;
|
|
|
|
num--;
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
if (*p != end)
|
|
|
|
goto bad;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
bad:
|
|
|
|
err = -EIO;
|
|
|
|
out_bad:
|
|
|
|
pr_err("problem parsing dir contents %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2010-12-01 22:14:38 +00:00
|
|
|
/*
|
|
|
|
* parse fcntl F_GETLK results
|
|
|
|
*/
|
|
|
|
static int parse_reply_info_filelock(void **p, void *end,
|
2010-12-15 01:37:52 +00:00
|
|
|
struct ceph_mds_reply_info_parsed *info,
|
2013-12-24 19:19:23 +00:00
|
|
|
u64 features)
|
2010-12-01 22:14:38 +00:00
|
|
|
{
|
|
|
|
if (*p + sizeof(*info->filelock_reply) > end)
|
|
|
|
goto bad;
|
|
|
|
|
|
|
|
info->filelock_reply = *p;
|
|
|
|
*p += sizeof(*info->filelock_reply);
|
|
|
|
|
|
|
|
if (unlikely(*p != end))
|
|
|
|
goto bad;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
bad:
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2012-12-28 17:56:46 +00:00
|
|
|
/*
|
|
|
|
* parse create results
|
|
|
|
*/
|
|
|
|
static int parse_reply_info_create(void **p, void *end,
|
|
|
|
struct ceph_mds_reply_info_parsed *info,
|
2013-12-24 19:19:23 +00:00
|
|
|
u64 features)
|
2012-12-28 17:56:46 +00:00
|
|
|
{
|
|
|
|
if (features & CEPH_FEATURE_REPLY_CREATE_INODE) {
|
|
|
|
if (*p == end) {
|
|
|
|
info->has_create_ino = false;
|
|
|
|
} else {
|
|
|
|
info->has_create_ino = true;
|
|
|
|
info->ino = ceph_decode_64(p);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(*p != end))
|
|
|
|
goto bad;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
bad:
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2010-12-01 22:14:38 +00:00
|
|
|
/*
|
|
|
|
* parse extra results
|
|
|
|
*/
|
|
|
|
static int parse_reply_info_extra(void **p, void *end,
|
2010-12-15 01:37:52 +00:00
|
|
|
struct ceph_mds_reply_info_parsed *info,
|
2013-12-24 19:19:23 +00:00
|
|
|
u64 features)
|
2010-12-01 22:14:38 +00:00
|
|
|
{
|
2017-01-12 19:42:41 +00:00
|
|
|
u32 op = le32_to_cpu(info->head->op);
|
|
|
|
|
|
|
|
if (op == CEPH_MDS_OP_GETFILELOCK)
|
2010-12-15 01:37:52 +00:00
|
|
|
return parse_reply_info_filelock(p, end, info, features);
|
2017-01-12 19:42:41 +00:00
|
|
|
else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
|
2010-12-15 01:37:52 +00:00
|
|
|
return parse_reply_info_dir(p, end, info, features);
|
2017-01-12 19:42:41 +00:00
|
|
|
else if (op == CEPH_MDS_OP_CREATE)
|
2012-12-28 17:56:46 +00:00
|
|
|
return parse_reply_info_create(p, end, info, features);
|
|
|
|
else
|
|
|
|
return -EIO;
|
2010-12-01 22:14:38 +00:00
|
|
|
}
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
/*
|
|
|
|
* parse entire mds reply
|
|
|
|
*/
|
|
|
|
static int parse_reply_info(struct ceph_msg *msg,
|
2010-12-15 01:37:52 +00:00
|
|
|
struct ceph_mds_reply_info_parsed *info,
|
2013-12-24 19:19:23 +00:00
|
|
|
u64 features)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
|
|
|
void *p, *end;
|
|
|
|
u32 len;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
info->head = msg->front.iov_base;
|
|
|
|
p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
|
|
|
|
end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
|
|
|
|
|
|
|
|
/* trace */
|
|
|
|
ceph_decode_32_safe(&p, end, len, bad);
|
|
|
|
if (len > 0) {
|
2012-01-15 03:20:59 +00:00
|
|
|
ceph_decode_need(&p, end, len, bad);
|
2010-12-15 01:37:52 +00:00
|
|
|
err = parse_reply_info_trace(&p, p+len, info, features);
|
2009-10-06 18:31:09 +00:00
|
|
|
if (err < 0)
|
|
|
|
goto out_bad;
|
|
|
|
}
|
|
|
|
|
2010-12-01 22:14:38 +00:00
|
|
|
/* extra */
|
2009-10-06 18:31:09 +00:00
|
|
|
ceph_decode_32_safe(&p, end, len, bad);
|
|
|
|
if (len > 0) {
|
2012-01-15 03:20:59 +00:00
|
|
|
ceph_decode_need(&p, end, len, bad);
|
2010-12-15 01:37:52 +00:00
|
|
|
err = parse_reply_info_extra(&p, p+len, info, features);
|
2009-10-06 18:31:09 +00:00
|
|
|
if (err < 0)
|
|
|
|
goto out_bad;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* snap blob */
|
|
|
|
ceph_decode_32_safe(&p, end, len, bad);
|
|
|
|
info->snapblob_len = len;
|
|
|
|
info->snapblob = p;
|
|
|
|
p += len;
|
|
|
|
|
|
|
|
if (p != end)
|
|
|
|
goto bad;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
bad:
|
|
|
|
err = -EIO;
|
|
|
|
out_bad:
|
|
|
|
pr_err("mds parse_reply err %d\n", err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
|
|
|
|
{
|
2016-04-28 01:37:39 +00:00
|
|
|
if (!info->dir_entries)
|
2014-03-29 05:41:15 +00:00
|
|
|
return;
|
2016-04-28 01:37:39 +00:00
|
|
|
free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sessions
|
|
|
|
*/
|
2014-09-19 12:51:08 +00:00
|
|
|
const char *ceph_session_state_name(int s)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
|
|
|
switch (s) {
|
|
|
|
case CEPH_MDS_SESSION_NEW: return "new";
|
|
|
|
case CEPH_MDS_SESSION_OPENING: return "opening";
|
|
|
|
case CEPH_MDS_SESSION_OPEN: return "open";
|
|
|
|
case CEPH_MDS_SESSION_HUNG: return "hung";
|
|
|
|
case CEPH_MDS_SESSION_CLOSING: return "closing";
|
2010-02-15 20:08:46 +00:00
|
|
|
case CEPH_MDS_SESSION_RESTARTING: return "restarting";
|
2009-10-06 18:31:09 +00:00
|
|
|
case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
|
2016-09-14 08:39:51 +00:00
|
|
|
case CEPH_MDS_SESSION_REJECTED: return "rejected";
|
2009-10-06 18:31:09 +00:00
|
|
|
default: return "???";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
|
|
|
|
{
|
2017-03-03 09:15:06 +00:00
|
|
|
if (refcount_inc_not_zero(&s->s_ref)) {
|
2009-10-06 18:31:09 +00:00
|
|
|
dout("mdsc get_session %p %d -> %d\n", s,
|
2017-03-03 09:15:06 +00:00
|
|
|
refcount_read(&s->s_ref)-1, refcount_read(&s->s_ref));
|
2009-10-06 18:31:09 +00:00
|
|
|
return s;
|
|
|
|
} else {
|
|
|
|
dout("mdsc get_session %p 0 -- FAIL", s);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ceph_put_mds_session(struct ceph_mds_session *s)
|
|
|
|
{
|
|
|
|
dout("mdsc put_session %p %d -> %d\n", s,
|
2017-03-03 09:15:06 +00:00
|
|
|
refcount_read(&s->s_ref), refcount_read(&s->s_ref)-1);
|
|
|
|
if (refcount_dec_and_test(&s->s_ref)) {
|
2012-05-16 20:16:38 +00:00
|
|
|
if (s->s_auth.authorizer)
|
2016-04-11 17:34:49 +00:00
|
|
|
ceph_auth_destroy_authorizer(s->s_auth.authorizer);
|
2009-10-06 18:31:09 +00:00
|
|
|
kfree(s);
|
2009-11-19 00:19:57 +00:00
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* called under mdsc->mutex
|
|
|
|
*/
|
|
|
|
struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
|
|
|
|
int mds)
|
|
|
|
{
|
|
|
|
struct ceph_mds_session *session;
|
|
|
|
|
|
|
|
if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL)
|
|
|
|
return NULL;
|
|
|
|
session = mdsc->sessions[mds];
|
|
|
|
dout("lookup_mds_session %p %d\n", session,
|
2017-03-03 09:15:06 +00:00
|
|
|
refcount_read(&session->s_ref));
|
2009-10-06 18:31:09 +00:00
|
|
|
get_session(session);
|
|
|
|
return session;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool __have_session(struct ceph_mds_client *mdsc, int mds)
|
|
|
|
{
|
|
|
|
if (mds >= mdsc->max_sessions)
|
|
|
|
return false;
|
|
|
|
return mdsc->sessions[mds];
|
|
|
|
}
|
|
|
|
|
2010-02-22 23:12:16 +00:00
|
|
|
static int __verify_registered_session(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_session *s)
|
|
|
|
{
|
|
|
|
if (s->s_mds >= mdsc->max_sessions ||
|
|
|
|
mdsc->sessions[s->s_mds] != s)
|
|
|
|
return -ENOENT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
/*
|
|
|
|
* create+register a new session for given mds.
|
|
|
|
* called under mdsc->mutex.
|
|
|
|
*/
|
|
|
|
static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
|
|
|
|
int mds)
|
|
|
|
{
|
|
|
|
struct ceph_mds_session *s;
|
|
|
|
|
2017-03-28 09:04:13 +00:00
|
|
|
if (mds >= mdsc->mdsmap->m_num_mds)
|
2013-08-05 04:04:30 +00:00
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
s = kzalloc(sizeof(*s), GFP_NOFS);
|
2010-03-20 12:30:16 +00:00
|
|
|
if (!s)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2009-10-06 18:31:09 +00:00
|
|
|
s->s_mdsc = mdsc;
|
|
|
|
s->s_mds = mds;
|
|
|
|
s->s_state = CEPH_MDS_SESSION_NEW;
|
|
|
|
s->s_ttl = 0;
|
|
|
|
s->s_seq = 0;
|
|
|
|
mutex_init(&s->s_mutex);
|
|
|
|
|
2012-06-27 19:24:08 +00:00
|
|
|
ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2012-01-13 01:48:10 +00:00
|
|
|
spin_lock_init(&s->s_gen_ttl_lock);
|
2009-10-06 18:31:09 +00:00
|
|
|
s->s_cap_gen = 0;
|
2012-01-13 01:48:11 +00:00
|
|
|
s->s_cap_ttl = jiffies - 1;
|
2012-01-13 01:48:10 +00:00
|
|
|
|
|
|
|
spin_lock_init(&s->s_cap_lock);
|
2009-10-06 18:31:09 +00:00
|
|
|
s->s_renew_requested = 0;
|
|
|
|
s->s_renew_seq = 0;
|
|
|
|
INIT_LIST_HEAD(&s->s_caps);
|
|
|
|
s->s_nr_caps = 0;
|
2009-12-22 04:40:34 +00:00
|
|
|
s->s_trim_caps = 0;
|
2017-03-03 09:15:06 +00:00
|
|
|
refcount_set(&s->s_ref, 1);
|
2009-10-06 18:31:09 +00:00
|
|
|
INIT_LIST_HEAD(&s->s_waiting);
|
|
|
|
INIT_LIST_HEAD(&s->s_unsafe);
|
|
|
|
s->s_num_cap_releases = 0;
|
2013-09-22 03:08:14 +00:00
|
|
|
s->s_cap_reconnect = 0;
|
ceph: fix iterate_caps removal race
We need to be able to iterate over all caps on a session with a
possibly slow callback on each cap. To allow this, we used to
prevent cap reordering while we were iterating. However, we were
not safe from races with removal: removing the 'next' cap would
make the next pointer from list_for_each_entry_safe be invalid,
and cause a lock up or similar badness.
Instead, we keep an iterator pointer in the session pointing to
the current cap. As before, we avoid reordering. For removal,
if the cap isn't the current cap we are iterating over, we are
fine. If it is, we clear cap->ci (to mark the cap as pending
removal) but leave it in the session list. In iterate_caps, we
can safely finish removal and get the next cap pointer.
While we're at it, clean up put_cap to not take a cap reservation
context, as it was never used.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-02-16 19:39:45 +00:00
|
|
|
s->s_cap_iterator = NULL;
|
2009-10-06 18:31:09 +00:00
|
|
|
INIT_LIST_HEAD(&s->s_cap_releases);
|
|
|
|
INIT_LIST_HEAD(&s->s_cap_flushing);
|
|
|
|
|
|
|
|
dout("register_session mds%d\n", mds);
|
|
|
|
if (mds >= mdsc->max_sessions) {
|
|
|
|
int newmax = 1 << get_count_order(mds+1);
|
|
|
|
struct ceph_mds_session **sa;
|
|
|
|
|
|
|
|
dout("register_session realloc to %d\n", newmax);
|
|
|
|
sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
|
|
|
|
if (sa == NULL)
|
2009-11-18 19:22:36 +00:00
|
|
|
goto fail_realloc;
|
2009-10-06 18:31:09 +00:00
|
|
|
if (mdsc->sessions) {
|
|
|
|
memcpy(sa, mdsc->sessions,
|
|
|
|
mdsc->max_sessions * sizeof(void *));
|
|
|
|
kfree(mdsc->sessions);
|
|
|
|
}
|
|
|
|
mdsc->sessions = sa;
|
|
|
|
mdsc->max_sessions = newmax;
|
|
|
|
}
|
|
|
|
mdsc->sessions[mds] = s;
|
2015-01-09 09:00:42 +00:00
|
|
|
atomic_inc(&mdsc->num_sessions);
|
2017-03-03 09:15:06 +00:00
|
|
|
refcount_inc(&s->s_ref); /* one ref to sessions[], one to caller */
|
2009-11-18 19:22:36 +00:00
|
|
|
|
2012-06-27 19:24:08 +00:00
|
|
|
ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
|
|
|
|
ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
|
2009-11-18 19:22:36 +00:00
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
return s;
|
2009-11-18 19:22:36 +00:00
|
|
|
|
|
|
|
fail_realloc:
|
|
|
|
kfree(s);
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* called under mdsc->mutex
|
|
|
|
*/
|
2010-02-22 23:12:16 +00:00
|
|
|
static void __unregister_session(struct ceph_mds_client *mdsc,
|
2009-11-18 19:22:36 +00:00
|
|
|
struct ceph_mds_session *s)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
2010-02-22 23:12:16 +00:00
|
|
|
dout("__unregister_session mds%d %p\n", s->s_mds, s);
|
|
|
|
BUG_ON(mdsc->sessions[s->s_mds] != s);
|
2009-11-18 19:22:36 +00:00
|
|
|
mdsc->sessions[s->s_mds] = NULL;
|
|
|
|
ceph_con_close(&s->s_con);
|
|
|
|
ceph_put_mds_session(s);
|
2015-01-09 09:00:42 +00:00
|
|
|
atomic_dec(&mdsc->num_sessions);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* drop session refs in request.
|
|
|
|
*
|
|
|
|
* should be last request ref, or hold mdsc->mutex
|
|
|
|
*/
|
|
|
|
static void put_request_session(struct ceph_mds_request *req)
|
|
|
|
{
|
|
|
|
if (req->r_session) {
|
|
|
|
ceph_put_mds_session(req->r_session);
|
|
|
|
req->r_session = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-12-07 20:31:09 +00:00
|
|
|
void ceph_mdsc_release_request(struct kref *kref)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
2009-12-07 20:31:09 +00:00
|
|
|
struct ceph_mds_request *req = container_of(kref,
|
|
|
|
struct ceph_mds_request,
|
|
|
|
r_kref);
|
2014-03-29 05:41:15 +00:00
|
|
|
destroy_reply_info(&req->r_reply_info);
|
2009-12-07 20:31:09 +00:00
|
|
|
if (req->r_request)
|
|
|
|
ceph_msg_put(req->r_request);
|
2014-03-29 05:41:15 +00:00
|
|
|
if (req->r_reply)
|
2009-12-07 20:31:09 +00:00
|
|
|
ceph_msg_put(req->r_reply);
|
|
|
|
if (req->r_inode) {
|
2011-07-26 18:31:14 +00:00
|
|
|
ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
|
2009-12-07 20:31:09 +00:00
|
|
|
iput(req->r_inode);
|
|
|
|
}
|
2017-01-31 15:28:26 +00:00
|
|
|
if (req->r_parent)
|
|
|
|
ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
|
2014-11-02 14:20:59 +00:00
|
|
|
iput(req->r_target_inode);
|
2009-12-07 20:31:09 +00:00
|
|
|
if (req->r_dentry)
|
|
|
|
dput(req->r_dentry);
|
2013-02-05 21:40:09 +00:00
|
|
|
if (req->r_old_dentry)
|
|
|
|
dput(req->r_old_dentry);
|
|
|
|
if (req->r_old_dentry_dir) {
|
2011-07-26 18:31:14 +00:00
|
|
|
/*
|
|
|
|
* track (and drop pins for) r_old_dentry_dir
|
|
|
|
* separately, since r_old_dentry's d_parent may have
|
|
|
|
* changed between the dir mutex being dropped and
|
|
|
|
* this request being freed.
|
|
|
|
*/
|
|
|
|
ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
|
|
|
|
CEPH_CAP_PIN);
|
|
|
|
iput(req->r_old_dentry_dir);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
2009-12-07 20:31:09 +00:00
|
|
|
kfree(req->r_path1);
|
|
|
|
kfree(req->r_path2);
|
2014-09-16 11:15:28 +00:00
|
|
|
if (req->r_pagelist)
|
|
|
|
ceph_pagelist_release(req->r_pagelist);
|
2009-12-07 20:31:09 +00:00
|
|
|
put_request_session(req);
|
2010-06-17 23:16:12 +00:00
|
|
|
ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
|
2009-12-07 20:31:09 +00:00
|
|
|
kfree(req);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
2016-04-28 14:07:22 +00:00
|
|
|
DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
/*
|
|
|
|
* lookup session, bump ref if found.
|
|
|
|
*
|
|
|
|
* called under mdsc->mutex.
|
|
|
|
*/
|
2016-04-28 14:07:22 +00:00
|
|
|
static struct ceph_mds_request *
|
|
|
|
lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
|
|
|
struct ceph_mds_request *req;
|
2010-02-15 20:08:46 +00:00
|
|
|
|
2016-04-28 14:07:22 +00:00
|
|
|
req = lookup_request(&mdsc->request_tree, tid);
|
|
|
|
if (req)
|
|
|
|
ceph_mdsc_get_request(req);
|
2010-02-15 20:08:46 +00:00
|
|
|
|
2016-04-28 14:07:22 +00:00
|
|
|
return req;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Register an in-flight request, and assign a tid. Link to directory
|
|
|
|
* are modifying (if any).
|
|
|
|
*
|
|
|
|
* Called under mdsc->mutex.
|
|
|
|
*/
|
|
|
|
static void __register_request(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_request *req,
|
|
|
|
struct inode *dir)
|
|
|
|
{
|
|
|
|
req->r_tid = ++mdsc->last_tid;
|
|
|
|
if (req->r_num_caps)
|
2010-06-17 23:16:12 +00:00
|
|
|
ceph_reserve_caps(mdsc, &req->r_caps_reservation,
|
|
|
|
req->r_num_caps);
|
2009-10-06 18:31:09 +00:00
|
|
|
dout("__register_request %p tid %lld\n", req, req->r_tid);
|
|
|
|
ceph_mdsc_get_request(req);
|
2016-04-28 14:07:22 +00:00
|
|
|
insert_request(&mdsc->request_tree, req);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2010-11-08 15:28:52 +00:00
|
|
|
req->r_uid = current_fsuid();
|
|
|
|
req->r_gid = current_fsgid();
|
|
|
|
|
2015-05-19 10:54:40 +00:00
|
|
|
if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
|
|
|
|
mdsc->oldest_tid = req->r_tid;
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
if (dir) {
|
2011-05-18 23:12:12 +00:00
|
|
|
ihold(dir);
|
2009-10-06 18:31:09 +00:00
|
|
|
req->r_unsafe_dir = dir;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __unregister_request(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_request *req)
|
|
|
|
{
|
|
|
|
dout("__unregister_request %p tid %lld\n", req, req->r_tid);
|
2015-05-19 10:54:40 +00:00
|
|
|
|
2017-02-14 15:09:40 +00:00
|
|
|
/* Never leave an unregistered request on an unsafe list! */
|
|
|
|
list_del_init(&req->r_unsafe_item);
|
|
|
|
|
2015-05-19 10:54:40 +00:00
|
|
|
if (req->r_tid == mdsc->oldest_tid) {
|
|
|
|
struct rb_node *p = rb_next(&req->r_node);
|
|
|
|
mdsc->oldest_tid = 0;
|
|
|
|
while (p) {
|
|
|
|
struct ceph_mds_request *next_req =
|
|
|
|
rb_entry(p, struct ceph_mds_request, r_node);
|
|
|
|
if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
|
|
|
|
mdsc->oldest_tid = next_req->r_tid;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
p = rb_next(p);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-28 14:07:22 +00:00
|
|
|
erase_request(&mdsc->request_tree, req);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2017-02-01 18:49:09 +00:00
|
|
|
if (req->r_unsafe_dir &&
|
|
|
|
test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
|
2009-10-06 18:31:09 +00:00
|
|
|
struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
|
|
|
|
spin_lock(&ci->i_unsafe_lock);
|
|
|
|
list_del_init(&req->r_unsafe_dir_item);
|
|
|
|
spin_unlock(&ci->i_unsafe_lock);
|
2015-10-27 09:18:00 +00:00
|
|
|
}
|
2017-02-01 18:49:09 +00:00
|
|
|
if (req->r_target_inode &&
|
|
|
|
test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
|
2015-10-27 10:36:06 +00:00
|
|
|
struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
|
|
|
|
spin_lock(&ci->i_unsafe_lock);
|
|
|
|
list_del_init(&req->r_unsafe_target_item);
|
|
|
|
spin_unlock(&ci->i_unsafe_lock);
|
|
|
|
}
|
2011-05-18 23:12:12 +00:00
|
|
|
|
2015-10-27 09:18:00 +00:00
|
|
|
if (req->r_unsafe_dir) {
|
2011-05-18 23:12:12 +00:00
|
|
|
iput(req->r_unsafe_dir);
|
|
|
|
req->r_unsafe_dir = NULL;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
2010-03-29 04:22:50 +00:00
|
|
|
|
2013-10-31 01:10:47 +00:00
|
|
|
complete_all(&req->r_safe_completion);
|
|
|
|
|
2010-03-29 04:22:50 +00:00
|
|
|
ceph_mdsc_put_request(req);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
ceph: clean up unsafe d_parent access in __choose_mds
__choose_mds exists to pick an MDS to use when issuing a call. Doing
that typically involves picking an inode and using the authoritative
MDS for it. In most cases, that's pretty straightforward, as we are
using an inode to which we hold a reference (usually represented by
r_dentry or r_inode in the request).
In the case of a snapshotted directory however, we need to fetch
the non-snapped parent, which involves walking back up the parents
in the tree. The dentries in the snapshot dir are effectively frozen
but the overall parent is _not_, and could vanish if a concurrent
rename were to occur.
Clean this code up and take special care to ensure the validity of
the entries we're working with. First, try to use the inode in
r_locked_dir if one exists. If not and all we have is r_dentry,
then we have to walk back up the tree. Use the rcu_read_lock for
this so we can ensure that any d_parent we find won't go away, and
take extra care to deal with the possibility that the dentries could
go negative.
Change get_nonsnap_parent to return an inode, and take a reference to
that inode before returning (if any). Change all of the other places
where we set "inode" in __choose_mds to also take a reference, and then
call iput on that inode before exiting the function.
Link: http://tracker.ceph.com/issues/18148
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: Yan, Zheng <zyan@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
2016-12-15 13:37:56 +00:00
|
|
|
/*
|
|
|
|
* Walk back up the dentry tree until we hit a dentry representing a
|
|
|
|
* non-snapshot inode. We do this using the rcu_read_lock (which must be held
|
|
|
|
* when calling this) to ensure that the objects won't disappear while we're
|
|
|
|
* working with them. Once we hit a candidate dentry, we attempt to take a
|
|
|
|
* reference to it, and return that as the result.
|
|
|
|
*/
|
2017-02-23 10:39:59 +00:00
|
|
|
static struct inode *get_nonsnap_parent(struct dentry *dentry)
|
|
|
|
{
|
|
|
|
struct inode *inode = NULL;
|
ceph: clean up unsafe d_parent access in __choose_mds
__choose_mds exists to pick an MDS to use when issuing a call. Doing
that typically involves picking an inode and using the authoritative
MDS for it. In most cases, that's pretty straightforward, as we are
using an inode to which we hold a reference (usually represented by
r_dentry or r_inode in the request).
In the case of a snapshotted directory however, we need to fetch
the non-snapped parent, which involves walking back up the parents
in the tree. The dentries in the snapshot dir are effectively frozen
but the overall parent is _not_, and could vanish if a concurrent
rename were to occur.
Clean this code up and take special care to ensure the validity of
the entries we're working with. First, try to use the inode in
r_locked_dir if one exists. If not and all we have is r_dentry,
then we have to walk back up the tree. Use the rcu_read_lock for
this so we can ensure that any d_parent we find won't go away, and
take extra care to deal with the possibility that the dentries could
go negative.
Change get_nonsnap_parent to return an inode, and take a reference to
that inode before returning (if any). Change all of the other places
where we set "inode" in __choose_mds to also take a reference, and then
call iput on that inode before exiting the function.
Link: http://tracker.ceph.com/issues/18148
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: Yan, Zheng <zyan@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
2016-12-15 13:37:56 +00:00
|
|
|
|
|
|
|
while (dentry && !IS_ROOT(dentry)) {
|
|
|
|
inode = d_inode_rcu(dentry);
|
|
|
|
if (!inode || ceph_snap(inode) == CEPH_NOSNAP)
|
|
|
|
break;
|
|
|
|
dentry = dentry->d_parent;
|
|
|
|
}
|
|
|
|
if (inode)
|
|
|
|
inode = igrab(inode);
|
|
|
|
return inode;
|
|
|
|
}
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
/*
|
|
|
|
* Choose mds to send request to next. If there is a hint set in the
|
|
|
|
* request (e.g., due to a prior forward hint from the mds), use that.
|
|
|
|
* Otherwise, consult frag tree and/or caps to identify the
|
|
|
|
* appropriate mds. If all else fails, choose randomly.
|
|
|
|
*
|
|
|
|
* Called under mdsc->mutex.
|
|
|
|
*/
|
|
|
|
static int __choose_mds(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_request *req)
|
|
|
|
{
|
|
|
|
struct inode *inode;
|
|
|
|
struct ceph_inode_info *ci;
|
|
|
|
struct ceph_cap *cap;
|
|
|
|
int mode = req->r_direct_mode;
|
|
|
|
int mds = -1;
|
|
|
|
u32 hash = req->r_direct_hash;
|
2017-02-01 18:49:09 +00:00
|
|
|
bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* is there a specific mds we should try? ignore hint if we have
|
|
|
|
* no session and the mds is not up (active or recovering).
|
|
|
|
*/
|
|
|
|
if (req->r_resend_mds >= 0 &&
|
|
|
|
(__have_session(mdsc, req->r_resend_mds) ||
|
|
|
|
ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
|
|
|
|
dout("choose_mds using resend_mds mds%d\n",
|
|
|
|
req->r_resend_mds);
|
|
|
|
return req->r_resend_mds;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mode == USE_RANDOM_MDS)
|
|
|
|
goto random;
|
|
|
|
|
|
|
|
inode = NULL;
|
|
|
|
if (req->r_inode) {
|
|
|
|
inode = req->r_inode;
|
ceph: clean up unsafe d_parent access in __choose_mds
__choose_mds exists to pick an MDS to use when issuing a call. Doing
that typically involves picking an inode and using the authoritative
MDS for it. In most cases, that's pretty straightforward, as we are
using an inode to which we hold a reference (usually represented by
r_dentry or r_inode in the request).
In the case of a snapshotted directory however, we need to fetch
the non-snapped parent, which involves walking back up the parents
in the tree. The dentries in the snapshot dir are effectively frozen
but the overall parent is _not_, and could vanish if a concurrent
rename were to occur.
Clean this code up and take special care to ensure the validity of
the entries we're working with. First, try to use the inode in
r_locked_dir if one exists. If not and all we have is r_dentry,
then we have to walk back up the tree. Use the rcu_read_lock for
this so we can ensure that any d_parent we find won't go away, and
take extra care to deal with the possibility that the dentries could
go negative.
Change get_nonsnap_parent to return an inode, and take a reference to
that inode before returning (if any). Change all of the other places
where we set "inode" in __choose_mds to also take a reference, and then
call iput on that inode before exiting the function.
Link: http://tracker.ceph.com/issues/18148
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: Yan, Zheng <zyan@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
2016-12-15 13:37:56 +00:00
|
|
|
ihold(inode);
|
2009-10-06 18:31:09 +00:00
|
|
|
} else if (req->r_dentry) {
|
2011-07-26 18:31:26 +00:00
|
|
|
/* ignore race with rename; old or new d_parent is okay */
|
ceph: clean up unsafe d_parent access in __choose_mds
__choose_mds exists to pick an MDS to use when issuing a call. Doing
that typically involves picking an inode and using the authoritative
MDS for it. In most cases, that's pretty straightforward, as we are
using an inode to which we hold a reference (usually represented by
r_dentry or r_inode in the request).
In the case of a snapshotted directory however, we need to fetch
the non-snapped parent, which involves walking back up the parents
in the tree. The dentries in the snapshot dir are effectively frozen
but the overall parent is _not_, and could vanish if a concurrent
rename were to occur.
Clean this code up and take special care to ensure the validity of
the entries we're working with. First, try to use the inode in
r_locked_dir if one exists. If not and all we have is r_dentry,
then we have to walk back up the tree. Use the rcu_read_lock for
this so we can ensure that any d_parent we find won't go away, and
take extra care to deal with the possibility that the dentries could
go negative.
Change get_nonsnap_parent to return an inode, and take a reference to
that inode before returning (if any). Change all of the other places
where we set "inode" in __choose_mds to also take a reference, and then
call iput on that inode before exiting the function.
Link: http://tracker.ceph.com/issues/18148
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: Yan, Zheng <zyan@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
2016-12-15 13:37:56 +00:00
|
|
|
struct dentry *parent;
|
|
|
|
struct inode *dir;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
parent = req->r_dentry->d_parent;
|
2017-01-31 15:28:26 +00:00
|
|
|
dir = req->r_parent ? : d_inode_rcu(parent);
|
2010-08-16 16:21:27 +00:00
|
|
|
|
ceph: clean up unsafe d_parent access in __choose_mds
__choose_mds exists to pick an MDS to use when issuing a call. Doing
that typically involves picking an inode and using the authoritative
MDS for it. In most cases, that's pretty straightforward, as we are
using an inode to which we hold a reference (usually represented by
r_dentry or r_inode in the request).
In the case of a snapshotted directory however, we need to fetch
the non-snapped parent, which involves walking back up the parents
in the tree. The dentries in the snapshot dir are effectively frozen
but the overall parent is _not_, and could vanish if a concurrent
rename were to occur.
Clean this code up and take special care to ensure the validity of
the entries we're working with. First, try to use the inode in
r_locked_dir if one exists. If not and all we have is r_dentry,
then we have to walk back up the tree. Use the rcu_read_lock for
this so we can ensure that any d_parent we find won't go away, and
take extra care to deal with the possibility that the dentries could
go negative.
Change get_nonsnap_parent to return an inode, and take a reference to
that inode before returning (if any). Change all of the other places
where we set "inode" in __choose_mds to also take a reference, and then
call iput on that inode before exiting the function.
Link: http://tracker.ceph.com/issues/18148
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: Yan, Zheng <zyan@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
2016-12-15 13:37:56 +00:00
|
|
|
if (!dir || dir->i_sb != mdsc->fsc->sb) {
|
|
|
|
/* not this fs or parent went negative */
|
2015-03-17 22:25:59 +00:00
|
|
|
inode = d_inode(req->r_dentry);
|
ceph: clean up unsafe d_parent access in __choose_mds
__choose_mds exists to pick an MDS to use when issuing a call. Doing
that typically involves picking an inode and using the authoritative
MDS for it. In most cases, that's pretty straightforward, as we are
using an inode to which we hold a reference (usually represented by
r_dentry or r_inode in the request).
In the case of a snapshotted directory however, we need to fetch
the non-snapped parent, which involves walking back up the parents
in the tree. The dentries in the snapshot dir are effectively frozen
but the overall parent is _not_, and could vanish if a concurrent
rename were to occur.
Clean this code up and take special care to ensure the validity of
the entries we're working with. First, try to use the inode in
r_locked_dir if one exists. If not and all we have is r_dentry,
then we have to walk back up the tree. Use the rcu_read_lock for
this so we can ensure that any d_parent we find won't go away, and
take extra care to deal with the possibility that the dentries could
go negative.
Change get_nonsnap_parent to return an inode, and take a reference to
that inode before returning (if any). Change all of the other places
where we set "inode" in __choose_mds to also take a reference, and then
call iput on that inode before exiting the function.
Link: http://tracker.ceph.com/issues/18148
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: Yan, Zheng <zyan@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
2016-12-15 13:37:56 +00:00
|
|
|
if (inode)
|
|
|
|
ihold(inode);
|
2010-08-16 16:21:27 +00:00
|
|
|
} else if (ceph_snap(dir) != CEPH_NOSNAP) {
|
|
|
|
/* direct snapped/virtual snapdir requests
|
|
|
|
* based on parent dir inode */
|
ceph: clean up unsafe d_parent access in __choose_mds
__choose_mds exists to pick an MDS to use when issuing a call. Doing
that typically involves picking an inode and using the authoritative
MDS for it. In most cases, that's pretty straightforward, as we are
using an inode to which we hold a reference (usually represented by
r_dentry or r_inode in the request).
In the case of a snapshotted directory however, we need to fetch
the non-snapped parent, which involves walking back up the parents
in the tree. The dentries in the snapshot dir are effectively frozen
but the overall parent is _not_, and could vanish if a concurrent
rename were to occur.
Clean this code up and take special care to ensure the validity of
the entries we're working with. First, try to use the inode in
r_locked_dir if one exists. If not and all we have is r_dentry,
then we have to walk back up the tree. Use the rcu_read_lock for
this so we can ensure that any d_parent we find won't go away, and
take extra care to deal with the possibility that the dentries could
go negative.
Change get_nonsnap_parent to return an inode, and take a reference to
that inode before returning (if any). Change all of the other places
where we set "inode" in __choose_mds to also take a reference, and then
call iput on that inode before exiting the function.
Link: http://tracker.ceph.com/issues/18148
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: Yan, Zheng <zyan@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
2016-12-15 13:37:56 +00:00
|
|
|
inode = get_nonsnap_parent(parent);
|
2010-08-16 16:21:27 +00:00
|
|
|
dout("__choose_mds using nonsnap parent %p\n", inode);
|
2013-11-22 06:21:44 +00:00
|
|
|
} else {
|
2010-08-16 16:21:27 +00:00
|
|
|
/* dentry target */
|
2015-03-17 22:25:59 +00:00
|
|
|
inode = d_inode(req->r_dentry);
|
2013-11-22 06:21:44 +00:00
|
|
|
if (!inode || mode == USE_AUTH_MDS) {
|
|
|
|
/* dir + name */
|
ceph: clean up unsafe d_parent access in __choose_mds
__choose_mds exists to pick an MDS to use when issuing a call. Doing
that typically involves picking an inode and using the authoritative
MDS for it. In most cases, that's pretty straightforward, as we are
using an inode to which we hold a reference (usually represented by
r_dentry or r_inode in the request).
In the case of a snapshotted directory however, we need to fetch
the non-snapped parent, which involves walking back up the parents
in the tree. The dentries in the snapshot dir are effectively frozen
but the overall parent is _not_, and could vanish if a concurrent
rename were to occur.
Clean this code up and take special care to ensure the validity of
the entries we're working with. First, try to use the inode in
r_locked_dir if one exists. If not and all we have is r_dentry,
then we have to walk back up the tree. Use the rcu_read_lock for
this so we can ensure that any d_parent we find won't go away, and
take extra care to deal with the possibility that the dentries could
go negative.
Change get_nonsnap_parent to return an inode, and take a reference to
that inode before returning (if any). Change all of the other places
where we set "inode" in __choose_mds to also take a reference, and then
call iput on that inode before exiting the function.
Link: http://tracker.ceph.com/issues/18148
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: Yan, Zheng <zyan@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
2016-12-15 13:37:56 +00:00
|
|
|
inode = igrab(dir);
|
2013-11-22 06:21:44 +00:00
|
|
|
hash = ceph_dentry_hash(dir, req->r_dentry);
|
|
|
|
is_hash = true;
|
ceph: clean up unsafe d_parent access in __choose_mds
__choose_mds exists to pick an MDS to use when issuing a call. Doing
that typically involves picking an inode and using the authoritative
MDS for it. In most cases, that's pretty straightforward, as we are
using an inode to which we hold a reference (usually represented by
r_dentry or r_inode in the request).
In the case of a snapshotted directory however, we need to fetch
the non-snapped parent, which involves walking back up the parents
in the tree. The dentries in the snapshot dir are effectively frozen
but the overall parent is _not_, and could vanish if a concurrent
rename were to occur.
Clean this code up and take special care to ensure the validity of
the entries we're working with. First, try to use the inode in
r_locked_dir if one exists. If not and all we have is r_dentry,
then we have to walk back up the tree. Use the rcu_read_lock for
this so we can ensure that any d_parent we find won't go away, and
take extra care to deal with the possibility that the dentries could
go negative.
Change get_nonsnap_parent to return an inode, and take a reference to
that inode before returning (if any). Change all of the other places
where we set "inode" in __choose_mds to also take a reference, and then
call iput on that inode before exiting the function.
Link: http://tracker.ceph.com/issues/18148
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: Yan, Zheng <zyan@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
2016-12-15 13:37:56 +00:00
|
|
|
} else {
|
|
|
|
ihold(inode);
|
2013-11-22 06:21:44 +00:00
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
ceph: clean up unsafe d_parent access in __choose_mds
__choose_mds exists to pick an MDS to use when issuing a call. Doing
that typically involves picking an inode and using the authoritative
MDS for it. In most cases, that's pretty straightforward, as we are
using an inode to which we hold a reference (usually represented by
r_dentry or r_inode in the request).
In the case of a snapshotted directory however, we need to fetch
the non-snapped parent, which involves walking back up the parents
in the tree. The dentries in the snapshot dir are effectively frozen
but the overall parent is _not_, and could vanish if a concurrent
rename were to occur.
Clean this code up and take special care to ensure the validity of
the entries we're working with. First, try to use the inode in
r_locked_dir if one exists. If not and all we have is r_dentry,
then we have to walk back up the tree. Use the rcu_read_lock for
this so we can ensure that any d_parent we find won't go away, and
take extra care to deal with the possibility that the dentries could
go negative.
Change get_nonsnap_parent to return an inode, and take a reference to
that inode before returning (if any). Change all of the other places
where we set "inode" in __choose_mds to also take a reference, and then
call iput on that inode before exiting the function.
Link: http://tracker.ceph.com/issues/18148
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: Yan, Zheng <zyan@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
2016-12-15 13:37:56 +00:00
|
|
|
rcu_read_unlock();
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
2010-08-16 16:21:27 +00:00
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
|
|
|
|
(int)hash, mode);
|
|
|
|
if (!inode)
|
|
|
|
goto random;
|
|
|
|
ci = ceph_inode(inode);
|
|
|
|
|
|
|
|
if (is_hash && S_ISDIR(inode->i_mode)) {
|
|
|
|
struct ceph_inode_frag frag;
|
|
|
|
int found;
|
|
|
|
|
|
|
|
ceph_choose_frag(ci, hash, &frag, &found);
|
|
|
|
if (found) {
|
|
|
|
if (mode == USE_ANY_MDS && frag.ndist > 0) {
|
|
|
|
u8 r;
|
|
|
|
|
|
|
|
/* choose a random replica */
|
|
|
|
get_random_bytes(&r, 1);
|
|
|
|
r %= frag.ndist;
|
|
|
|
mds = frag.dist[r];
|
|
|
|
dout("choose_mds %p %llx.%llx "
|
|
|
|
"frag %u mds%d (%d/%d)\n",
|
|
|
|
inode, ceph_vinop(inode),
|
2011-01-22 05:16:46 +00:00
|
|
|
frag.frag, mds,
|
2009-10-06 18:31:09 +00:00
|
|
|
(int)r, frag.ndist);
|
2011-01-22 05:16:46 +00:00
|
|
|
if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
|
|
|
|
CEPH_MDS_STATE_ACTIVE)
|
ceph: clean up unsafe d_parent access in __choose_mds
__choose_mds exists to pick an MDS to use when issuing a call. Doing
that typically involves picking an inode and using the authoritative
MDS for it. In most cases, that's pretty straightforward, as we are
using an inode to which we hold a reference (usually represented by
r_dentry or r_inode in the request).
In the case of a snapshotted directory however, we need to fetch
the non-snapped parent, which involves walking back up the parents
in the tree. The dentries in the snapshot dir are effectively frozen
but the overall parent is _not_, and could vanish if a concurrent
rename were to occur.
Clean this code up and take special care to ensure the validity of
the entries we're working with. First, try to use the inode in
r_locked_dir if one exists. If not and all we have is r_dentry,
then we have to walk back up the tree. Use the rcu_read_lock for
this so we can ensure that any d_parent we find won't go away, and
take extra care to deal with the possibility that the dentries could
go negative.
Change get_nonsnap_parent to return an inode, and take a reference to
that inode before returning (if any). Change all of the other places
where we set "inode" in __choose_mds to also take a reference, and then
call iput on that inode before exiting the function.
Link: http://tracker.ceph.com/issues/18148
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: Yan, Zheng <zyan@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
2016-12-15 13:37:56 +00:00
|
|
|
goto out;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* since this file/dir wasn't known to be
|
|
|
|
* replicated, then we want to look for the
|
|
|
|
* authoritative mds. */
|
|
|
|
mode = USE_AUTH_MDS;
|
|
|
|
if (frag.mds >= 0) {
|
|
|
|
/* choose auth mds */
|
|
|
|
mds = frag.mds;
|
|
|
|
dout("choose_mds %p %llx.%llx "
|
|
|
|
"frag %u mds%d (auth)\n",
|
|
|
|
inode, ceph_vinop(inode), frag.frag, mds);
|
2011-01-22 05:16:46 +00:00
|
|
|
if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
|
|
|
|
CEPH_MDS_STATE_ACTIVE)
|
ceph: clean up unsafe d_parent access in __choose_mds
__choose_mds exists to pick an MDS to use when issuing a call. Doing
that typically involves picking an inode and using the authoritative
MDS for it. In most cases, that's pretty straightforward, as we are
using an inode to which we hold a reference (usually represented by
r_dentry or r_inode in the request).
In the case of a snapshotted directory however, we need to fetch
the non-snapped parent, which involves walking back up the parents
in the tree. The dentries in the snapshot dir are effectively frozen
but the overall parent is _not_, and could vanish if a concurrent
rename were to occur.
Clean this code up and take special care to ensure the validity of
the entries we're working with. First, try to use the inode in
r_locked_dir if one exists. If not and all we have is r_dentry,
then we have to walk back up the tree. Use the rcu_read_lock for
this so we can ensure that any d_parent we find won't go away, and
take extra care to deal with the possibility that the dentries could
go negative.
Change get_nonsnap_parent to return an inode, and take a reference to
that inode before returning (if any). Change all of the other places
where we set "inode" in __choose_mds to also take a reference, and then
call iput on that inode before exiting the function.
Link: http://tracker.ceph.com/issues/18148
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: Yan, Zheng <zyan@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
2016-12-15 13:37:56 +00:00
|
|
|
goto out;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-30 17:47:09 +00:00
|
|
|
spin_lock(&ci->i_ceph_lock);
|
2009-10-06 18:31:09 +00:00
|
|
|
cap = NULL;
|
|
|
|
if (mode == USE_AUTH_MDS)
|
|
|
|
cap = ci->i_auth_cap;
|
|
|
|
if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
|
|
|
|
cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
|
|
|
|
if (!cap) {
|
2011-11-30 17:47:09 +00:00
|
|
|
spin_unlock(&ci->i_ceph_lock);
|
ceph: clean up unsafe d_parent access in __choose_mds
__choose_mds exists to pick an MDS to use when issuing a call. Doing
that typically involves picking an inode and using the authoritative
MDS for it. In most cases, that's pretty straightforward, as we are
using an inode to which we hold a reference (usually represented by
r_dentry or r_inode in the request).
In the case of a snapshotted directory however, we need to fetch
the non-snapped parent, which involves walking back up the parents
in the tree. The dentries in the snapshot dir are effectively frozen
but the overall parent is _not_, and could vanish if a concurrent
rename were to occur.
Clean this code up and take special care to ensure the validity of
the entries we're working with. First, try to use the inode in
r_locked_dir if one exists. If not and all we have is r_dentry,
then we have to walk back up the tree. Use the rcu_read_lock for
this so we can ensure that any d_parent we find won't go away, and
take extra care to deal with the possibility that the dentries could
go negative.
Change get_nonsnap_parent to return an inode, and take a reference to
that inode before returning (if any). Change all of the other places
where we set "inode" in __choose_mds to also take a reference, and then
call iput on that inode before exiting the function.
Link: http://tracker.ceph.com/issues/18148
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: Yan, Zheng <zyan@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
2016-12-15 13:37:56 +00:00
|
|
|
iput(inode);
|
2009-10-06 18:31:09 +00:00
|
|
|
goto random;
|
|
|
|
}
|
|
|
|
mds = cap->session->s_mds;
|
|
|
|
dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
|
|
|
|
inode, ceph_vinop(inode), mds,
|
|
|
|
cap == ci->i_auth_cap ? "auth " : "", cap);
|
2011-11-30 17:47:09 +00:00
|
|
|
spin_unlock(&ci->i_ceph_lock);
|
ceph: clean up unsafe d_parent access in __choose_mds
__choose_mds exists to pick an MDS to use when issuing a call. Doing
that typically involves picking an inode and using the authoritative
MDS for it. In most cases, that's pretty straightforward, as we are
using an inode to which we hold a reference (usually represented by
r_dentry or r_inode in the request).
In the case of a snapshotted directory however, we need to fetch
the non-snapped parent, which involves walking back up the parents
in the tree. The dentries in the snapshot dir are effectively frozen
but the overall parent is _not_, and could vanish if a concurrent
rename were to occur.
Clean this code up and take special care to ensure the validity of
the entries we're working with. First, try to use the inode in
r_locked_dir if one exists. If not and all we have is r_dentry,
then we have to walk back up the tree. Use the rcu_read_lock for
this so we can ensure that any d_parent we find won't go away, and
take extra care to deal with the possibility that the dentries could
go negative.
Change get_nonsnap_parent to return an inode, and take a reference to
that inode before returning (if any). Change all of the other places
where we set "inode" in __choose_mds to also take a reference, and then
call iput on that inode before exiting the function.
Link: http://tracker.ceph.com/issues/18148
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: Yan, Zheng <zyan@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
2016-12-15 13:37:56 +00:00
|
|
|
out:
|
|
|
|
iput(inode);
|
2009-10-06 18:31:09 +00:00
|
|
|
return mds;
|
|
|
|
|
|
|
|
random:
|
|
|
|
mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
|
|
|
|
dout("choose_mds chose random mds%d\n", mds);
|
|
|
|
return mds;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* session messages
|
|
|
|
*/
|
|
|
|
static struct ceph_msg *create_session_msg(u32 op, u64 seq)
|
|
|
|
{
|
|
|
|
struct ceph_msg *msg;
|
|
|
|
struct ceph_mds_session_head *h;
|
|
|
|
|
2011-08-09 22:03:46 +00:00
|
|
|
msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
|
|
|
|
false);
|
2010-04-01 23:06:19 +00:00
|
|
|
if (!msg) {
|
2009-10-06 18:31:09 +00:00
|
|
|
pr_err("create_session_msg ENOMEM creating msg\n");
|
2010-04-01 23:06:19 +00:00
|
|
|
return NULL;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
h = msg->front.iov_base;
|
|
|
|
h->op = cpu_to_le32(op);
|
|
|
|
h->seq = cpu_to_le64(seq);
|
2014-09-09 18:26:01 +00:00
|
|
|
|
|
|
|
return msg;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* session message, specialization for CEPH_SESSION_REQUEST_OPEN
|
|
|
|
* to include additional client metadata fields.
|
|
|
|
*/
|
|
|
|
static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
|
|
|
|
{
|
|
|
|
struct ceph_msg *msg;
|
|
|
|
struct ceph_mds_session_head *h;
|
|
|
|
int i = -1;
|
|
|
|
int metadata_bytes = 0;
|
|
|
|
int metadata_key_count = 0;
|
|
|
|
struct ceph_options *opt = mdsc->fsc->client->options;
|
2016-04-21 03:09:55 +00:00
|
|
|
struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
|
2014-09-09 18:26:01 +00:00
|
|
|
void *p;
|
|
|
|
|
2015-01-16 02:54:43 +00:00
|
|
|
const char* metadata[][2] = {
|
2014-09-09 18:26:01 +00:00
|
|
|
{"hostname", utsname()->nodename},
|
2015-01-16 02:54:43 +00:00
|
|
|
{"kernel_version", utsname()->release},
|
2016-04-21 03:09:55 +00:00
|
|
|
{"entity_id", opt->name ? : ""},
|
|
|
|
{"root", fsopt->server_path ? : "/"},
|
2014-09-09 18:26:01 +00:00
|
|
|
{NULL, NULL}
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Calculate serialized length of metadata */
|
|
|
|
metadata_bytes = 4; /* map length */
|
|
|
|
for (i = 0; metadata[i][0] != NULL; ++i) {
|
|
|
|
metadata_bytes += 8 + strlen(metadata[i][0]) +
|
|
|
|
strlen(metadata[i][1]);
|
|
|
|
metadata_key_count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate the message */
|
|
|
|
msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + metadata_bytes,
|
|
|
|
GFP_NOFS, false);
|
|
|
|
if (!msg) {
|
|
|
|
pr_err("create_session_msg ENOMEM creating msg\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
h = msg->front.iov_base;
|
|
|
|
h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
|
|
|
|
h->seq = cpu_to_le64(seq);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Serialize client metadata into waiting buffer space, using
|
|
|
|
* the format that userspace expects for map<string, string>
|
2014-10-30 17:15:26 +00:00
|
|
|
*
|
|
|
|
* ClientSession messages with metadata are v2
|
2014-09-09 18:26:01 +00:00
|
|
|
*/
|
2014-10-30 17:15:26 +00:00
|
|
|
msg->hdr.version = cpu_to_le16(2);
|
|
|
|
msg->hdr.compat_version = cpu_to_le16(1);
|
2014-09-09 18:26:01 +00:00
|
|
|
|
|
|
|
/* The write pointer, following the session_head structure */
|
|
|
|
p = msg->front.iov_base + sizeof(*h);
|
|
|
|
|
|
|
|
/* Number of entries in the map */
|
|
|
|
ceph_encode_32(&p, metadata_key_count);
|
|
|
|
|
|
|
|
/* Two length-prefixed strings for each entry in the map */
|
|
|
|
for (i = 0; metadata[i][0] != NULL; ++i) {
|
|
|
|
size_t const key_len = strlen(metadata[i][0]);
|
|
|
|
size_t const val_len = strlen(metadata[i][1]);
|
|
|
|
|
|
|
|
ceph_encode_32(&p, key_len);
|
|
|
|
memcpy(p, metadata[i][0], key_len);
|
|
|
|
p += key_len;
|
|
|
|
ceph_encode_32(&p, val_len);
|
|
|
|
memcpy(p, metadata[i][1], val_len);
|
|
|
|
p += val_len;
|
|
|
|
}
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
return msg;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* send session open request.
|
|
|
|
*
|
|
|
|
* called under mdsc->mutex
|
|
|
|
*/
|
|
|
|
static int __open_session(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_session *session)
|
|
|
|
{
|
|
|
|
struct ceph_msg *msg;
|
|
|
|
int mstate;
|
|
|
|
int mds = session->s_mds;
|
|
|
|
|
|
|
|
/* wait for mds to go active? */
|
|
|
|
mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
|
|
|
|
dout("open_session to mds%d (%s)\n", mds,
|
|
|
|
ceph_mds_state_name(mstate));
|
|
|
|
session->s_state = CEPH_MDS_SESSION_OPENING;
|
|
|
|
session->s_renew_requested = jiffies;
|
|
|
|
|
|
|
|
/* send connect message */
|
2014-09-09 18:26:01 +00:00
|
|
|
msg = create_session_open_msg(mdsc, session->s_seq);
|
2010-04-01 23:06:19 +00:00
|
|
|
if (!msg)
|
|
|
|
return -ENOMEM;
|
2009-10-06 18:31:09 +00:00
|
|
|
ceph_con_send(&session->s_con, msg);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-06-21 20:38:25 +00:00
|
|
|
/*
|
|
|
|
* open sessions for any export targets for the given mds
|
|
|
|
*
|
|
|
|
* called under mdsc->mutex
|
|
|
|
*/
|
2013-11-24 06:33:01 +00:00
|
|
|
static struct ceph_mds_session *
|
|
|
|
__open_export_target_session(struct ceph_mds_client *mdsc, int target)
|
|
|
|
{
|
|
|
|
struct ceph_mds_session *session;
|
|
|
|
|
|
|
|
session = __ceph_lookup_mds_session(mdsc, target);
|
|
|
|
if (!session) {
|
|
|
|
session = register_session(mdsc, target);
|
|
|
|
if (IS_ERR(session))
|
|
|
|
return session;
|
|
|
|
}
|
|
|
|
if (session->s_state == CEPH_MDS_SESSION_NEW ||
|
|
|
|
session->s_state == CEPH_MDS_SESSION_CLOSING)
|
|
|
|
__open_session(mdsc, session);
|
|
|
|
|
|
|
|
return session;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ceph_mds_session *
|
|
|
|
ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
|
|
|
|
{
|
|
|
|
struct ceph_mds_session *session;
|
|
|
|
|
|
|
|
dout("open_export_target_session to mds%d\n", target);
|
|
|
|
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
session = __open_export_target_session(mdsc, target);
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
|
|
|
|
return session;
|
|
|
|
}
|
|
|
|
|
2010-06-21 20:38:25 +00:00
|
|
|
static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_session *session)
|
|
|
|
{
|
|
|
|
struct ceph_mds_info *mi;
|
|
|
|
struct ceph_mds_session *ts;
|
|
|
|
int i, mds = session->s_mds;
|
|
|
|
|
2017-03-28 09:04:13 +00:00
|
|
|
if (mds >= mdsc->mdsmap->m_num_mds)
|
2010-06-21 20:38:25 +00:00
|
|
|
return;
|
2013-11-24 06:33:01 +00:00
|
|
|
|
2010-06-21 20:38:25 +00:00
|
|
|
mi = &mdsc->mdsmap->m_info[mds];
|
|
|
|
dout("open_export_target_sessions for mds%d (%d targets)\n",
|
|
|
|
session->s_mds, mi->num_export_targets);
|
|
|
|
|
|
|
|
for (i = 0; i < mi->num_export_targets; i++) {
|
2013-11-24 06:33:01 +00:00
|
|
|
ts = __open_export_target_session(mdsc, mi->export_targets[i]);
|
|
|
|
if (!IS_ERR(ts))
|
|
|
|
ceph_put_mds_session(ts);
|
2010-06-21 20:38:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-06-21 20:45:04 +00:00
|
|
|
void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_session *session)
|
|
|
|
{
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
__open_export_target_sessions(mdsc, session);
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
}
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
/*
|
|
|
|
* session caps
|
|
|
|
*/
|
|
|
|
|
2015-05-14 09:22:42 +00:00
|
|
|
/* caller holds s_cap_lock, we drop it */
|
|
|
|
static void cleanup_cap_releases(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_session *session)
|
|
|
|
__releases(session->s_cap_lock)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
2015-05-14 09:22:42 +00:00
|
|
|
LIST_HEAD(tmp_list);
|
|
|
|
list_splice_init(&session->s_cap_releases, &tmp_list);
|
|
|
|
session->s_num_cap_releases = 0;
|
|
|
|
spin_unlock(&session->s_cap_lock);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2015-05-14 09:22:42 +00:00
|
|
|
dout("cleanup_cap_releases mds%d\n", session->s_mds);
|
|
|
|
while (!list_empty(&tmp_list)) {
|
|
|
|
struct ceph_cap *cap;
|
|
|
|
/* zero out the in-progress message */
|
|
|
|
cap = list_first_entry(&tmp_list,
|
|
|
|
struct ceph_cap, session_caps);
|
|
|
|
list_del(&cap->session_caps);
|
|
|
|
ceph_put_cap(mdsc, cap);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-24 12:15:36 +00:00
|
|
|
static void cleanup_session_requests(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_session *session)
|
|
|
|
{
|
|
|
|
struct ceph_mds_request *req;
|
|
|
|
struct rb_node *p;
|
|
|
|
|
|
|
|
dout("cleanup_session_requests mds%d\n", session->s_mds);
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
while (!list_empty(&session->s_unsafe)) {
|
|
|
|
req = list_first_entry(&session->s_unsafe,
|
|
|
|
struct ceph_mds_request, r_unsafe_item);
|
2015-05-22 08:38:02 +00:00
|
|
|
pr_warn_ratelimited(" dropping unsafe request %llu\n",
|
|
|
|
req->r_tid);
|
2015-03-24 12:15:36 +00:00
|
|
|
__unregister_request(mdsc, req);
|
|
|
|
}
|
|
|
|
/* zero r_attempts, so kick_requests() will re-send requests */
|
|
|
|
p = rb_first(&mdsc->request_tree);
|
|
|
|
while (p) {
|
|
|
|
req = rb_entry(p, struct ceph_mds_request, r_node);
|
|
|
|
p = rb_next(p);
|
|
|
|
if (req->r_session &&
|
|
|
|
req->r_session->s_mds == session->s_mds)
|
|
|
|
req->r_attempts = 0;
|
|
|
|
}
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
}
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
/*
|
2010-05-12 03:56:31 +00:00
|
|
|
* Helper to safely iterate over all caps associated with a session, with
|
|
|
|
* special care taken to handle a racing __ceph_remove_cap().
|
2009-10-06 18:31:09 +00:00
|
|
|
*
|
2010-05-12 03:56:31 +00:00
|
|
|
* Caller must hold session s_mutex.
|
2009-10-06 18:31:09 +00:00
|
|
|
*/
|
|
|
|
static int iterate_session_caps(struct ceph_mds_session *session,
|
|
|
|
int (*cb)(struct inode *, struct ceph_cap *,
|
|
|
|
void *), void *arg)
|
|
|
|
{
|
ceph: fix iterate_caps removal race
We need to be able to iterate over all caps on a session with a
possibly slow callback on each cap. To allow this, we used to
prevent cap reordering while we were iterating. However, we were
not safe from races with removal: removing the 'next' cap would
make the next pointer from list_for_each_entry_safe be invalid,
and cause a lock up or similar badness.
Instead, we keep an iterator pointer in the session pointing to
the current cap. As before, we avoid reordering. For removal,
if the cap isn't the current cap we are iterating over, we are
fine. If it is, we clear cap->ci (to mark the cap as pending
removal) but leave it in the session list. In iterate_caps, we
can safely finish removal and get the next cap pointer.
While we're at it, clean up put_cap to not take a cap reservation
context, as it was never used.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-02-16 19:39:45 +00:00
|
|
|
struct list_head *p;
|
|
|
|
struct ceph_cap *cap;
|
|
|
|
struct inode *inode, *last_inode = NULL;
|
|
|
|
struct ceph_cap *old_cap = NULL;
|
2009-10-06 18:31:09 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
|
|
|
|
spin_lock(&session->s_cap_lock);
|
ceph: fix iterate_caps removal race
We need to be able to iterate over all caps on a session with a
possibly slow callback on each cap. To allow this, we used to
prevent cap reordering while we were iterating. However, we were
not safe from races with removal: removing the 'next' cap would
make the next pointer from list_for_each_entry_safe be invalid,
and cause a lock up or similar badness.
Instead, we keep an iterator pointer in the session pointing to
the current cap. As before, we avoid reordering. For removal,
if the cap isn't the current cap we are iterating over, we are
fine. If it is, we clear cap->ci (to mark the cap as pending
removal) but leave it in the session list. In iterate_caps, we
can safely finish removal and get the next cap pointer.
While we're at it, clean up put_cap to not take a cap reservation
context, as it was never used.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-02-16 19:39:45 +00:00
|
|
|
p = session->s_caps.next;
|
|
|
|
while (p != &session->s_caps) {
|
|
|
|
cap = list_entry(p, struct ceph_cap, session_caps);
|
2009-10-06 18:31:09 +00:00
|
|
|
inode = igrab(&cap->ci->vfs_inode);
|
ceph: fix iterate_caps removal race
We need to be able to iterate over all caps on a session with a
possibly slow callback on each cap. To allow this, we used to
prevent cap reordering while we were iterating. However, we were
not safe from races with removal: removing the 'next' cap would
make the next pointer from list_for_each_entry_safe be invalid,
and cause a lock up or similar badness.
Instead, we keep an iterator pointer in the session pointing to
the current cap. As before, we avoid reordering. For removal,
if the cap isn't the current cap we are iterating over, we are
fine. If it is, we clear cap->ci (to mark the cap as pending
removal) but leave it in the session list. In iterate_caps, we
can safely finish removal and get the next cap pointer.
While we're at it, clean up put_cap to not take a cap reservation
context, as it was never used.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-02-16 19:39:45 +00:00
|
|
|
if (!inode) {
|
|
|
|
p = p->next;
|
2009-10-06 18:31:09 +00:00
|
|
|
continue;
|
ceph: fix iterate_caps removal race
We need to be able to iterate over all caps on a session with a
possibly slow callback on each cap. To allow this, we used to
prevent cap reordering while we were iterating. However, we were
not safe from races with removal: removing the 'next' cap would
make the next pointer from list_for_each_entry_safe be invalid,
and cause a lock up or similar badness.
Instead, we keep an iterator pointer in the session pointing to
the current cap. As before, we avoid reordering. For removal,
if the cap isn't the current cap we are iterating over, we are
fine. If it is, we clear cap->ci (to mark the cap as pending
removal) but leave it in the session list. In iterate_caps, we
can safely finish removal and get the next cap pointer.
While we're at it, clean up put_cap to not take a cap reservation
context, as it was never used.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-02-16 19:39:45 +00:00
|
|
|
}
|
|
|
|
session->s_cap_iterator = cap;
|
2009-10-06 18:31:09 +00:00
|
|
|
spin_unlock(&session->s_cap_lock);
|
ceph: fix iterate_caps removal race
We need to be able to iterate over all caps on a session with a
possibly slow callback on each cap. To allow this, we used to
prevent cap reordering while we were iterating. However, we were
not safe from races with removal: removing the 'next' cap would
make the next pointer from list_for_each_entry_safe be invalid,
and cause a lock up or similar badness.
Instead, we keep an iterator pointer in the session pointing to
the current cap. As before, we avoid reordering. For removal,
if the cap isn't the current cap we are iterating over, we are
fine. If it is, we clear cap->ci (to mark the cap as pending
removal) but leave it in the session list. In iterate_caps, we
can safely finish removal and get the next cap pointer.
While we're at it, clean up put_cap to not take a cap reservation
context, as it was never used.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-02-16 19:39:45 +00:00
|
|
|
|
|
|
|
if (last_inode) {
|
|
|
|
iput(last_inode);
|
|
|
|
last_inode = NULL;
|
|
|
|
}
|
|
|
|
if (old_cap) {
|
2010-06-17 23:16:12 +00:00
|
|
|
ceph_put_cap(session->s_mdsc, old_cap);
|
ceph: fix iterate_caps removal race
We need to be able to iterate over all caps on a session with a
possibly slow callback on each cap. To allow this, we used to
prevent cap reordering while we were iterating. However, we were
not safe from races with removal: removing the 'next' cap would
make the next pointer from list_for_each_entry_safe be invalid,
and cause a lock up or similar badness.
Instead, we keep an iterator pointer in the session pointing to
the current cap. As before, we avoid reordering. For removal,
if the cap isn't the current cap we are iterating over, we are
fine. If it is, we clear cap->ci (to mark the cap as pending
removal) but leave it in the session list. In iterate_caps, we
can safely finish removal and get the next cap pointer.
While we're at it, clean up put_cap to not take a cap reservation
context, as it was never used.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-02-16 19:39:45 +00:00
|
|
|
old_cap = NULL;
|
|
|
|
}
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
ret = cb(inode, cap, arg);
|
ceph: fix iterate_caps removal race
We need to be able to iterate over all caps on a session with a
possibly slow callback on each cap. To allow this, we used to
prevent cap reordering while we were iterating. However, we were
not safe from races with removal: removing the 'next' cap would
make the next pointer from list_for_each_entry_safe be invalid,
and cause a lock up or similar badness.
Instead, we keep an iterator pointer in the session pointing to
the current cap. As before, we avoid reordering. For removal,
if the cap isn't the current cap we are iterating over, we are
fine. If it is, we clear cap->ci (to mark the cap as pending
removal) but leave it in the session list. In iterate_caps, we
can safely finish removal and get the next cap pointer.
While we're at it, clean up put_cap to not take a cap reservation
context, as it was never used.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-02-16 19:39:45 +00:00
|
|
|
last_inode = inode;
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
spin_lock(&session->s_cap_lock);
|
ceph: fix iterate_caps removal race
We need to be able to iterate over all caps on a session with a
possibly slow callback on each cap. To allow this, we used to
prevent cap reordering while we were iterating. However, we were
not safe from races with removal: removing the 'next' cap would
make the next pointer from list_for_each_entry_safe be invalid,
and cause a lock up or similar badness.
Instead, we keep an iterator pointer in the session pointing to
the current cap. As before, we avoid reordering. For removal,
if the cap isn't the current cap we are iterating over, we are
fine. If it is, we clear cap->ci (to mark the cap as pending
removal) but leave it in the session list. In iterate_caps, we
can safely finish removal and get the next cap pointer.
While we're at it, clean up put_cap to not take a cap reservation
context, as it was never used.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-02-16 19:39:45 +00:00
|
|
|
p = p->next;
|
|
|
|
if (cap->ci == NULL) {
|
|
|
|
dout("iterate_session_caps finishing cap %p removal\n",
|
|
|
|
cap);
|
|
|
|
BUG_ON(cap->session != session);
|
2015-05-14 09:22:42 +00:00
|
|
|
cap->session = NULL;
|
ceph: fix iterate_caps removal race
We need to be able to iterate over all caps on a session with a
possibly slow callback on each cap. To allow this, we used to
prevent cap reordering while we were iterating. However, we were
not safe from races with removal: removing the 'next' cap would
make the next pointer from list_for_each_entry_safe be invalid,
and cause a lock up or similar badness.
Instead, we keep an iterator pointer in the session pointing to
the current cap. As before, we avoid reordering. For removal,
if the cap isn't the current cap we are iterating over, we are
fine. If it is, we clear cap->ci (to mark the cap as pending
removal) but leave it in the session list. In iterate_caps, we
can safely finish removal and get the next cap pointer.
While we're at it, clean up put_cap to not take a cap reservation
context, as it was never used.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-02-16 19:39:45 +00:00
|
|
|
list_del_init(&cap->session_caps);
|
|
|
|
session->s_nr_caps--;
|
2015-05-14 09:22:42 +00:00
|
|
|
if (cap->queue_release) {
|
|
|
|
list_add_tail(&cap->session_caps,
|
|
|
|
&session->s_cap_releases);
|
|
|
|
session->s_num_cap_releases++;
|
|
|
|
} else {
|
|
|
|
old_cap = cap; /* put_cap it w/o locks held */
|
|
|
|
}
|
ceph: fix iterate_caps removal race
We need to be able to iterate over all caps on a session with a
possibly slow callback on each cap. To allow this, we used to
prevent cap reordering while we were iterating. However, we were
not safe from races with removal: removing the 'next' cap would
make the next pointer from list_for_each_entry_safe be invalid,
and cause a lock up or similar badness.
Instead, we keep an iterator pointer in the session pointing to
the current cap. As before, we avoid reordering. For removal,
if the cap isn't the current cap we are iterating over, we are
fine. If it is, we clear cap->ci (to mark the cap as pending
removal) but leave it in the session list. In iterate_caps, we
can safely finish removal and get the next cap pointer.
While we're at it, clean up put_cap to not take a cap reservation
context, as it was never used.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-02-16 19:39:45 +00:00
|
|
|
}
|
2009-12-22 04:40:34 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto out;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
2009-12-22 04:40:34 +00:00
|
|
|
ret = 0;
|
|
|
|
out:
|
ceph: fix iterate_caps removal race
We need to be able to iterate over all caps on a session with a
possibly slow callback on each cap. To allow this, we used to
prevent cap reordering while we were iterating. However, we were
not safe from races with removal: removing the 'next' cap would
make the next pointer from list_for_each_entry_safe be invalid,
and cause a lock up or similar badness.
Instead, we keep an iterator pointer in the session pointing to
the current cap. As before, we avoid reordering. For removal,
if the cap isn't the current cap we are iterating over, we are
fine. If it is, we clear cap->ci (to mark the cap as pending
removal) but leave it in the session list. In iterate_caps, we
can safely finish removal and get the next cap pointer.
While we're at it, clean up put_cap to not take a cap reservation
context, as it was never used.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-02-16 19:39:45 +00:00
|
|
|
session->s_cap_iterator = NULL;
|
2009-10-06 18:31:09 +00:00
|
|
|
spin_unlock(&session->s_cap_lock);
|
ceph: fix iterate_caps removal race
We need to be able to iterate over all caps on a session with a
possibly slow callback on each cap. To allow this, we used to
prevent cap reordering while we were iterating. However, we were
not safe from races with removal: removing the 'next' cap would
make the next pointer from list_for_each_entry_safe be invalid,
and cause a lock up or similar badness.
Instead, we keep an iterator pointer in the session pointing to
the current cap. As before, we avoid reordering. For removal,
if the cap isn't the current cap we are iterating over, we are
fine. If it is, we clear cap->ci (to mark the cap as pending
removal) but leave it in the session list. In iterate_caps, we
can safely finish removal and get the next cap pointer.
While we're at it, clean up put_cap to not take a cap reservation
context, as it was never used.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-02-16 19:39:45 +00:00
|
|
|
|
2014-11-02 14:20:59 +00:00
|
|
|
iput(last_inode);
|
ceph: fix iterate_caps removal race
We need to be able to iterate over all caps on a session with a
possibly slow callback on each cap. To allow this, we used to
prevent cap reordering while we were iterating. However, we were
not safe from races with removal: removing the 'next' cap would
make the next pointer from list_for_each_entry_safe be invalid,
and cause a lock up or similar badness.
Instead, we keep an iterator pointer in the session pointing to
the current cap. As before, we avoid reordering. For removal,
if the cap isn't the current cap we are iterating over, we are
fine. If it is, we clear cap->ci (to mark the cap as pending
removal) but leave it in the session list. In iterate_caps, we
can safely finish removal and get the next cap pointer.
While we're at it, clean up put_cap to not take a cap reservation
context, as it was never used.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-02-16 19:39:45 +00:00
|
|
|
if (old_cap)
|
2010-06-17 23:16:12 +00:00
|
|
|
ceph_put_cap(session->s_mdsc, old_cap);
|
ceph: fix iterate_caps removal race
We need to be able to iterate over all caps on a session with a
possibly slow callback on each cap. To allow this, we used to
prevent cap reordering while we were iterating. However, we were
not safe from races with removal: removing the 'next' cap would
make the next pointer from list_for_each_entry_safe be invalid,
and cause a lock up or similar badness.
Instead, we keep an iterator pointer in the session pointing to
the current cap. As before, we avoid reordering. For removal,
if the cap isn't the current cap we are iterating over, we are
fine. If it is, we clear cap->ci (to mark the cap as pending
removal) but leave it in the session list. In iterate_caps, we
can safely finish removal and get the next cap pointer.
While we're at it, clean up put_cap to not take a cap reservation
context, as it was never used.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-02-16 19:39:45 +00:00
|
|
|
|
2009-12-22 04:40:34 +00:00
|
|
|
return ret;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
|
2010-05-10 23:12:25 +00:00
|
|
|
void *arg)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
2016-04-15 05:56:12 +00:00
|
|
|
struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
|
2009-10-06 18:31:09 +00:00
|
|
|
struct ceph_inode_info *ci = ceph_inode(inode);
|
2015-06-09 07:48:57 +00:00
|
|
|
LIST_HEAD(to_remove);
|
2016-04-15 05:56:12 +00:00
|
|
|
bool drop = false;
|
|
|
|
bool invalidate = false;
|
2010-05-10 23:12:25 +00:00
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
dout("removing cap %p, ci is %p, inode is %p\n",
|
|
|
|
cap, ci, &ci->vfs_inode);
|
2011-11-30 17:47:09 +00:00
|
|
|
spin_lock(&ci->i_ceph_lock);
|
2013-09-22 02:15:58 +00:00
|
|
|
__ceph_remove_cap(cap, false);
|
2015-03-24 03:36:08 +00:00
|
|
|
if (!ci->i_auth_cap) {
|
2015-06-09 07:48:57 +00:00
|
|
|
struct ceph_cap_flush *cf;
|
2016-04-15 05:56:12 +00:00
|
|
|
struct ceph_mds_client *mdsc = fsc->mdsc;
|
2010-05-10 23:12:25 +00:00
|
|
|
|
2016-04-08 07:27:16 +00:00
|
|
|
ci->i_ceph_flags |= CEPH_I_CAP_DROPPED;
|
|
|
|
|
2016-04-15 05:56:12 +00:00
|
|
|
if (ci->i_wrbuffer_ref > 0 &&
|
2016-12-26 09:26:34 +00:00
|
|
|
READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
|
2016-04-15 05:56:12 +00:00
|
|
|
invalidate = true;
|
|
|
|
|
2016-07-06 03:12:56 +00:00
|
|
|
while (!list_empty(&ci->i_cap_flush_list)) {
|
|
|
|
cf = list_first_entry(&ci->i_cap_flush_list,
|
|
|
|
struct ceph_cap_flush, i_list);
|
2016-08-13 09:10:28 +00:00
|
|
|
list_move(&cf->i_list, &to_remove);
|
2015-06-09 07:48:57 +00:00
|
|
|
}
|
|
|
|
|
2010-05-10 23:12:25 +00:00
|
|
|
spin_lock(&mdsc->cap_dirty_lock);
|
2015-06-09 09:20:12 +00:00
|
|
|
|
2016-07-06 03:12:56 +00:00
|
|
|
list_for_each_entry(cf, &to_remove, i_list)
|
|
|
|
list_del(&cf->g_list);
|
2015-06-09 09:20:12 +00:00
|
|
|
|
2010-05-10 23:12:25 +00:00
|
|
|
if (!list_empty(&ci->i_dirty_item)) {
|
2015-05-22 08:38:02 +00:00
|
|
|
pr_warn_ratelimited(
|
|
|
|
" dropping dirty %s state for %p %lld\n",
|
2010-05-10 23:12:25 +00:00
|
|
|
ceph_cap_string(ci->i_dirty_caps),
|
|
|
|
inode, ceph_ino(inode));
|
|
|
|
ci->i_dirty_caps = 0;
|
|
|
|
list_del_init(&ci->i_dirty_item);
|
2016-04-15 05:56:12 +00:00
|
|
|
drop = true;
|
2010-05-10 23:12:25 +00:00
|
|
|
}
|
|
|
|
if (!list_empty(&ci->i_flushing_item)) {
|
2015-05-22 08:38:02 +00:00
|
|
|
pr_warn_ratelimited(
|
|
|
|
" dropping dirty+flushing %s state for %p %lld\n",
|
2010-05-10 23:12:25 +00:00
|
|
|
ceph_cap_string(ci->i_flushing_caps),
|
|
|
|
inode, ceph_ino(inode));
|
|
|
|
ci->i_flushing_caps = 0;
|
|
|
|
list_del_init(&ci->i_flushing_item);
|
|
|
|
mdsc->num_cap_flushing--;
|
2016-04-15 05:56:12 +00:00
|
|
|
drop = true;
|
2010-05-10 23:12:25 +00:00
|
|
|
}
|
|
|
|
spin_unlock(&mdsc->cap_dirty_lock);
|
2015-06-09 07:48:57 +00:00
|
|
|
|
2015-06-10 09:26:13 +00:00
|
|
|
if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
|
2016-07-06 03:12:56 +00:00
|
|
|
list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
|
2015-06-10 09:26:13 +00:00
|
|
|
ci->i_prealloc_cap_flush = NULL;
|
|
|
|
}
|
2010-05-10 23:12:25 +00:00
|
|
|
}
|
2011-11-30 17:47:09 +00:00
|
|
|
spin_unlock(&ci->i_ceph_lock);
|
2015-06-09 07:48:57 +00:00
|
|
|
while (!list_empty(&to_remove)) {
|
|
|
|
struct ceph_cap_flush *cf;
|
|
|
|
cf = list_first_entry(&to_remove,
|
2016-07-06 03:12:56 +00:00
|
|
|
struct ceph_cap_flush, i_list);
|
|
|
|
list_del(&cf->i_list);
|
2015-06-10 09:26:13 +00:00
|
|
|
ceph_free_cap_flush(cf);
|
2015-06-09 07:48:57 +00:00
|
|
|
}
|
2016-04-08 07:27:16 +00:00
|
|
|
|
|
|
|
wake_up_all(&ci->i_cap_wq);
|
2016-04-15 05:56:12 +00:00
|
|
|
if (invalidate)
|
|
|
|
ceph_queue_invalidate(inode);
|
2016-04-08 07:27:16 +00:00
|
|
|
if (drop)
|
2010-05-10 23:12:25 +00:00
|
|
|
iput(inode);
|
2009-10-06 18:31:09 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* caller must hold session s_mutex
|
|
|
|
*/
|
|
|
|
static void remove_session_caps(struct ceph_mds_session *session)
|
|
|
|
{
|
2016-04-15 05:56:12 +00:00
|
|
|
struct ceph_fs_client *fsc = session->s_mdsc->fsc;
|
|
|
|
struct super_block *sb = fsc->sb;
|
2009-10-06 18:31:09 +00:00
|
|
|
dout("remove_session_caps on %p\n", session);
|
2016-04-15 05:56:12 +00:00
|
|
|
iterate_session_caps(session, remove_session_caps_cb, fsc);
|
2013-07-24 04:22:11 +00:00
|
|
|
|
2016-07-07 07:22:38 +00:00
|
|
|
wake_up_all(&fsc->mdsc->cap_flushing_wq);
|
|
|
|
|
2013-07-24 04:22:11 +00:00
|
|
|
spin_lock(&session->s_cap_lock);
|
|
|
|
if (session->s_nr_caps > 0) {
|
|
|
|
struct inode *inode;
|
|
|
|
struct ceph_cap *cap, *prev = NULL;
|
|
|
|
struct ceph_vino vino;
|
|
|
|
/*
|
|
|
|
* iterate_session_caps() skips inodes that are being
|
|
|
|
* deleted, we need to wait until deletions are complete.
|
|
|
|
* __wait_on_freeing_inode() is designed for the job,
|
|
|
|
* but it is not exported, so use lookup inode function
|
|
|
|
* to access it.
|
|
|
|
*/
|
|
|
|
while (!list_empty(&session->s_caps)) {
|
|
|
|
cap = list_entry(session->s_caps.next,
|
|
|
|
struct ceph_cap, session_caps);
|
|
|
|
if (cap == prev)
|
|
|
|
break;
|
|
|
|
prev = cap;
|
|
|
|
vino = cap->ci->i_vino;
|
|
|
|
spin_unlock(&session->s_cap_lock);
|
|
|
|
|
2013-09-02 07:19:53 +00:00
|
|
|
inode = ceph_find_inode(sb, vino);
|
2013-07-24 04:22:11 +00:00
|
|
|
iput(inode);
|
|
|
|
|
|
|
|
spin_lock(&session->s_cap_lock);
|
|
|
|
}
|
|
|
|
}
|
2015-05-14 09:22:42 +00:00
|
|
|
|
|
|
|
// drop cap expires and unlock s_cap_lock
|
|
|
|
cleanup_cap_releases(session->s_mdsc, session);
|
2013-07-24 04:22:11 +00:00
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
BUG_ON(session->s_nr_caps > 0);
|
2010-05-10 23:12:25 +00:00
|
|
|
BUG_ON(!list_empty(&session->s_cap_flushing));
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* wake up any threads waiting on this session's caps. if the cap is
|
|
|
|
* old (didn't get renewed on the client reconnect), remove it now.
|
|
|
|
*
|
|
|
|
* caller must hold s_mutex.
|
|
|
|
*/
|
|
|
|
static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
|
|
|
|
void *arg)
|
|
|
|
{
|
2009-11-20 21:43:45 +00:00
|
|
|
struct ceph_inode_info *ci = ceph_inode(inode);
|
|
|
|
|
|
|
|
if (arg) {
|
2011-11-30 17:47:09 +00:00
|
|
|
spin_lock(&ci->i_ceph_lock);
|
2009-11-20 21:43:45 +00:00
|
|
|
ci->i_wanted_max_size = 0;
|
|
|
|
ci->i_requested_max_size = 0;
|
2011-11-30 17:47:09 +00:00
|
|
|
spin_unlock(&ci->i_ceph_lock);
|
2009-11-20 21:43:45 +00:00
|
|
|
}
|
2016-05-19 11:15:19 +00:00
|
|
|
wake_up_all(&ci->i_cap_wq);
|
2009-10-06 18:31:09 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-20 21:43:45 +00:00
|
|
|
static void wake_up_session_caps(struct ceph_mds_session *session,
|
|
|
|
int reconnect)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
|
|
|
dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
|
2009-11-20 21:43:45 +00:00
|
|
|
iterate_session_caps(session, wake_up_session_cb,
|
|
|
|
(void *)(unsigned long)reconnect);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send periodic message to MDS renewing all currently held caps. The
|
|
|
|
* ack will reset the expiration for all caps from this session.
|
|
|
|
*
|
|
|
|
* caller holds s_mutex
|
|
|
|
*/
|
|
|
|
static int send_renew_caps(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_session *session)
|
|
|
|
{
|
|
|
|
struct ceph_msg *msg;
|
|
|
|
int state;
|
|
|
|
|
|
|
|
if (time_after_eq(jiffies, session->s_cap_ttl) &&
|
|
|
|
time_after_eq(session->s_cap_ttl, session->s_renew_requested))
|
|
|
|
pr_info("mds%d caps stale\n", session->s_mds);
|
2010-03-18 20:43:09 +00:00
|
|
|
session->s_renew_requested = jiffies;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
/* do not try to renew caps until a recovering mds has reconnected
|
|
|
|
* with its clients. */
|
|
|
|
state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
|
|
|
|
if (state < CEPH_MDS_STATE_RECONNECT) {
|
|
|
|
dout("send_renew_caps ignoring mds%d (%s)\n",
|
|
|
|
session->s_mds, ceph_mds_state_name(state));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
|
|
|
|
ceph_mds_state_name(state));
|
|
|
|
msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
|
|
|
|
++session->s_renew_seq);
|
2010-04-01 23:06:19 +00:00
|
|
|
if (!msg)
|
|
|
|
return -ENOMEM;
|
2009-10-06 18:31:09 +00:00
|
|
|
ceph_con_send(&session->s_con, msg);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-11-22 06:48:37 +00:00
|
|
|
static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_session *session, u64 seq)
|
|
|
|
{
|
|
|
|
struct ceph_msg *msg;
|
|
|
|
|
|
|
|
dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
|
2014-09-19 12:51:08 +00:00
|
|
|
session->s_mds, ceph_session_state_name(session->s_state), seq);
|
2013-11-22 06:48:37 +00:00
|
|
|
msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
|
|
|
|
if (!msg)
|
|
|
|
return -ENOMEM;
|
|
|
|
ceph_con_send(&session->s_con, msg);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
/*
|
|
|
|
* Note new cap ttl, and any transition from stale -> not stale (fresh?).
|
2009-11-20 21:43:45 +00:00
|
|
|
*
|
|
|
|
* Called under session->s_mutex
|
2009-10-06 18:31:09 +00:00
|
|
|
*/
|
|
|
|
static void renewed_caps(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_session *session, int is_renew)
|
|
|
|
{
|
|
|
|
int was_stale;
|
|
|
|
int wake = 0;
|
|
|
|
|
|
|
|
spin_lock(&session->s_cap_lock);
|
2012-01-13 01:48:11 +00:00
|
|
|
was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
session->s_cap_ttl = session->s_renew_requested +
|
|
|
|
mdsc->mdsmap->m_session_timeout*HZ;
|
|
|
|
|
|
|
|
if (was_stale) {
|
|
|
|
if (time_before(jiffies, session->s_cap_ttl)) {
|
|
|
|
pr_info("mds%d caps renewed\n", session->s_mds);
|
|
|
|
wake = 1;
|
|
|
|
} else {
|
|
|
|
pr_info("mds%d caps still stale\n", session->s_mds);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
|
|
|
|
session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
|
|
|
|
time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
|
|
|
|
spin_unlock(&session->s_cap_lock);
|
|
|
|
|
|
|
|
if (wake)
|
2009-11-20 21:43:45 +00:00
|
|
|
wake_up_session_caps(session, 0);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* send a session close request
|
|
|
|
*/
|
|
|
|
static int request_close_session(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_session *session)
|
|
|
|
{
|
|
|
|
struct ceph_msg *msg;
|
|
|
|
|
|
|
|
dout("request_close_session mds%d state %s seq %lld\n",
|
2014-09-19 12:51:08 +00:00
|
|
|
session->s_mds, ceph_session_state_name(session->s_state),
|
2009-10-06 18:31:09 +00:00
|
|
|
session->s_seq);
|
|
|
|
msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
|
2010-04-01 23:06:19 +00:00
|
|
|
if (!msg)
|
|
|
|
return -ENOMEM;
|
|
|
|
ceph_con_send(&session->s_con, msg);
|
2016-09-14 08:39:51 +00:00
|
|
|
return 1;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called with s_mutex held.
|
|
|
|
*/
|
|
|
|
static int __close_session(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_session *session)
|
|
|
|
{
|
|
|
|
if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
|
|
|
|
return 0;
|
|
|
|
session->s_state = CEPH_MDS_SESSION_CLOSING;
|
|
|
|
return request_close_session(mdsc, session);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Trim old(er) caps.
|
|
|
|
*
|
|
|
|
* Because we can't cache an inode without one or more caps, we do
|
|
|
|
* this indirectly: if a cap is unused, we prune its aliases, at which
|
|
|
|
* point the inode will hopefully get dropped to.
|
|
|
|
*
|
|
|
|
* Yes, this is a bit sloppy. Our only real goal here is to respond to
|
|
|
|
* memory pressure from the MDS, though, so it needn't be perfect.
|
|
|
|
*/
|
|
|
|
static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
|
|
|
|
{
|
|
|
|
struct ceph_mds_session *session = arg;
|
|
|
|
struct ceph_inode_info *ci = ceph_inode(inode);
|
2013-11-22 05:56:24 +00:00
|
|
|
int used, wanted, oissued, mine;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
if (session->s_trim_caps <= 0)
|
|
|
|
return -1;
|
|
|
|
|
2011-11-30 17:47:09 +00:00
|
|
|
spin_lock(&ci->i_ceph_lock);
|
2009-10-06 18:31:09 +00:00
|
|
|
mine = cap->issued | cap->implemented;
|
|
|
|
used = __ceph_caps_used(ci);
|
2013-11-22 05:56:24 +00:00
|
|
|
wanted = __ceph_caps_file_wanted(ci);
|
2009-10-06 18:31:09 +00:00
|
|
|
oissued = __ceph_caps_issued_other(ci, cap);
|
|
|
|
|
2013-11-22 05:56:24 +00:00
|
|
|
dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
|
2009-10-06 18:31:09 +00:00
|
|
|
inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
|
2013-11-22 05:56:24 +00:00
|
|
|
ceph_cap_string(used), ceph_cap_string(wanted));
|
|
|
|
if (cap == ci->i_auth_cap) {
|
2015-05-07 02:59:47 +00:00
|
|
|
if (ci->i_dirty_caps || ci->i_flushing_caps ||
|
|
|
|
!list_empty(&ci->i_cap_snaps))
|
2013-11-22 05:56:24 +00:00
|
|
|
goto out;
|
|
|
|
if ((used | wanted) & CEPH_CAP_ANY_WR)
|
|
|
|
goto out;
|
|
|
|
}
|
2015-10-26 08:08:43 +00:00
|
|
|
/* The inode has cached pages, but it's no longer used.
|
|
|
|
* we can safely drop it */
|
|
|
|
if (wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
|
|
|
|
!(oissued & CEPH_CAP_FILE_CACHE)) {
|
|
|
|
used = 0;
|
|
|
|
oissued = 0;
|
|
|
|
}
|
2013-11-22 05:56:24 +00:00
|
|
|
if ((used | wanted) & ~oissued & mine)
|
2009-10-06 18:31:09 +00:00
|
|
|
goto out; /* we need these caps */
|
|
|
|
|
|
|
|
session->s_trim_caps--;
|
|
|
|
if (oissued) {
|
|
|
|
/* we aren't the only cap.. just remove us */
|
2013-09-22 02:15:58 +00:00
|
|
|
__ceph_remove_cap(cap, true);
|
2009-10-06 18:31:09 +00:00
|
|
|
} else {
|
2015-10-26 08:08:43 +00:00
|
|
|
/* try dropping referring dentries */
|
2011-11-30 17:47:09 +00:00
|
|
|
spin_unlock(&ci->i_ceph_lock);
|
2009-10-06 18:31:09 +00:00
|
|
|
d_prune_aliases(inode);
|
|
|
|
dout("trim_caps_cb %p cap %p pruned, count now %d\n",
|
|
|
|
inode, cap, atomic_read(&inode->i_count));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2011-11-30 17:47:09 +00:00
|
|
|
spin_unlock(&ci->i_ceph_lock);
|
2009-10-06 18:31:09 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Trim session cap count down to some max number.
|
|
|
|
*/
|
|
|
|
static int trim_caps(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_session *session,
|
|
|
|
int max_caps)
|
|
|
|
{
|
|
|
|
int trim_caps = session->s_nr_caps - max_caps;
|
|
|
|
|
|
|
|
dout("trim_caps mds%d start: %d / %d, trim %d\n",
|
|
|
|
session->s_mds, session->s_nr_caps, max_caps, trim_caps);
|
|
|
|
if (trim_caps > 0) {
|
|
|
|
session->s_trim_caps = trim_caps;
|
|
|
|
iterate_session_caps(session, trim_caps_cb, session);
|
|
|
|
dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
|
|
|
|
session->s_mds, session->s_nr_caps, max_caps,
|
|
|
|
trim_caps - session->s_trim_caps);
|
2009-12-22 04:40:34 +00:00
|
|
|
session->s_trim_caps = 0;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
2014-04-01 12:34:56 +00:00
|
|
|
|
|
|
|
ceph_send_cap_releases(mdsc, session);
|
2009-10-06 18:31:09 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-09 09:20:12 +00:00
|
|
|
static int check_caps_flush(struct ceph_mds_client *mdsc,
|
|
|
|
u64 want_flush_tid)
|
|
|
|
{
|
|
|
|
int ret = 1;
|
|
|
|
|
|
|
|
spin_lock(&mdsc->cap_dirty_lock);
|
2016-07-06 03:12:56 +00:00
|
|
|
if (!list_empty(&mdsc->cap_flush_list)) {
|
|
|
|
struct ceph_cap_flush *cf =
|
|
|
|
list_first_entry(&mdsc->cap_flush_list,
|
|
|
|
struct ceph_cap_flush, g_list);
|
|
|
|
if (cf->tid <= want_flush_tid) {
|
|
|
|
dout("check_caps_flush still flushing tid "
|
|
|
|
"%llu <= %llu\n", cf->tid, want_flush_tid);
|
|
|
|
ret = 0;
|
|
|
|
}
|
2015-06-09 09:20:12 +00:00
|
|
|
}
|
|
|
|
spin_unlock(&mdsc->cap_dirty_lock);
|
|
|
|
return ret;
|
2015-01-08 13:30:12 +00:00
|
|
|
}
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
/*
|
|
|
|
* flush all dirty inode data to disk.
|
|
|
|
*
|
2015-06-09 09:20:12 +00:00
|
|
|
* returns true if we've flushed through want_flush_tid
|
2009-10-06 18:31:09 +00:00
|
|
|
*/
|
2015-05-05 13:22:13 +00:00
|
|
|
static void wait_caps_flush(struct ceph_mds_client *mdsc,
|
2016-07-04 10:06:41 +00:00
|
|
|
u64 want_flush_tid)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
2016-07-04 10:06:41 +00:00
|
|
|
dout("check_caps_flush want %llu\n", want_flush_tid);
|
2015-06-09 09:20:12 +00:00
|
|
|
|
|
|
|
wait_event(mdsc->cap_flushing_wq,
|
|
|
|
check_caps_flush(mdsc, want_flush_tid));
|
|
|
|
|
|
|
|
dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* called under s_mutex
|
|
|
|
*/
|
2010-06-09 23:47:10 +00:00
|
|
|
void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_session *session)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
2015-05-14 09:22:42 +00:00
|
|
|
struct ceph_msg *msg = NULL;
|
|
|
|
struct ceph_mds_cap_release *head;
|
|
|
|
struct ceph_mds_cap_item *item;
|
|
|
|
struct ceph_cap *cap;
|
|
|
|
LIST_HEAD(tmp_list);
|
|
|
|
int num_cap_releases;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2010-05-05 22:51:35 +00:00
|
|
|
spin_lock(&session->s_cap_lock);
|
2015-05-14 09:22:42 +00:00
|
|
|
again:
|
|
|
|
list_splice_init(&session->s_cap_releases, &tmp_list);
|
|
|
|
num_cap_releases = session->s_num_cap_releases;
|
|
|
|
session->s_num_cap_releases = 0;
|
2009-10-06 18:31:09 +00:00
|
|
|
spin_unlock(&session->s_cap_lock);
|
2010-05-10 22:36:44 +00:00
|
|
|
|
2015-05-14 09:22:42 +00:00
|
|
|
while (!list_empty(&tmp_list)) {
|
|
|
|
if (!msg) {
|
|
|
|
msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
|
|
|
PAGE_SIZE, GFP_NOFS, false);
|
2015-05-14 09:22:42 +00:00
|
|
|
if (!msg)
|
|
|
|
goto out_err;
|
|
|
|
head = msg->front.iov_base;
|
|
|
|
head->num = cpu_to_le32(0);
|
|
|
|
msg->front.iov_len = sizeof(*head);
|
|
|
|
}
|
|
|
|
cap = list_first_entry(&tmp_list, struct ceph_cap,
|
|
|
|
session_caps);
|
|
|
|
list_del(&cap->session_caps);
|
|
|
|
num_cap_releases--;
|
2010-05-10 22:36:44 +00:00
|
|
|
|
2014-03-24 01:56:43 +00:00
|
|
|
head = msg->front.iov_base;
|
2015-05-14 09:22:42 +00:00
|
|
|
le32_add_cpu(&head->num, 1);
|
|
|
|
item = msg->front.iov_base + msg->front.iov_len;
|
|
|
|
item->ino = cpu_to_le64(cap->cap_ino);
|
|
|
|
item->cap_id = cpu_to_le64(cap->cap_id);
|
|
|
|
item->migrate_seq = cpu_to_le32(cap->mseq);
|
|
|
|
item->seq = cpu_to_le32(cap->issue_seq);
|
|
|
|
msg->front.iov_len += sizeof(*item);
|
|
|
|
|
|
|
|
ceph_put_cap(mdsc, cap);
|
|
|
|
|
|
|
|
if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
|
|
|
|
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
|
|
|
|
dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
|
|
|
|
ceph_con_send(&session->s_con, msg);
|
|
|
|
msg = NULL;
|
|
|
|
}
|
2014-03-24 01:56:43 +00:00
|
|
|
}
|
2010-05-10 22:36:44 +00:00
|
|
|
|
2015-05-14 09:22:42 +00:00
|
|
|
BUG_ON(num_cap_releases != 0);
|
2010-05-10 22:36:44 +00:00
|
|
|
|
2015-05-14 09:22:42 +00:00
|
|
|
spin_lock(&session->s_cap_lock);
|
|
|
|
if (!list_empty(&session->s_cap_releases))
|
|
|
|
goto again;
|
|
|
|
spin_unlock(&session->s_cap_lock);
|
|
|
|
|
|
|
|
if (msg) {
|
|
|
|
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
|
|
|
|
dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
|
|
|
|
ceph_con_send(&session->s_con, msg);
|
2010-05-10 22:36:44 +00:00
|
|
|
}
|
2015-05-14 09:22:42 +00:00
|
|
|
return;
|
|
|
|
out_err:
|
|
|
|
pr_err("send_cap_releases mds%d, failed to allocate message\n",
|
|
|
|
session->s_mds);
|
|
|
|
spin_lock(&session->s_cap_lock);
|
|
|
|
list_splice(&tmp_list, &session->s_cap_releases);
|
|
|
|
session->s_num_cap_releases += num_cap_releases;
|
|
|
|
spin_unlock(&session->s_cap_lock);
|
2010-05-10 22:36:44 +00:00
|
|
|
}
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
/*
|
|
|
|
* requests
|
|
|
|
*/
|
|
|
|
|
2014-03-29 05:41:15 +00:00
|
|
|
int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
|
|
|
|
struct inode *dir)
|
|
|
|
{
|
|
|
|
struct ceph_inode_info *ci = ceph_inode(dir);
|
|
|
|
struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
|
|
|
|
struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
|
2016-04-28 01:37:39 +00:00
|
|
|
size_t size = sizeof(struct ceph_mds_reply_dir_entry);
|
2014-03-29 05:41:15 +00:00
|
|
|
int order, num_entries;
|
|
|
|
|
|
|
|
spin_lock(&ci->i_ceph_lock);
|
|
|
|
num_entries = ci->i_files + ci->i_subdirs;
|
|
|
|
spin_unlock(&ci->i_ceph_lock);
|
|
|
|
num_entries = max(num_entries, 1);
|
|
|
|
num_entries = min(num_entries, opt->max_readdir);
|
|
|
|
|
|
|
|
order = get_order(size * num_entries);
|
|
|
|
while (order >= 0) {
|
2016-04-28 01:37:39 +00:00
|
|
|
rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
|
|
|
|
__GFP_NOWARN,
|
|
|
|
order);
|
|
|
|
if (rinfo->dir_entries)
|
2014-03-29 05:41:15 +00:00
|
|
|
break;
|
|
|
|
order--;
|
|
|
|
}
|
2016-04-28 01:37:39 +00:00
|
|
|
if (!rinfo->dir_entries)
|
2014-03-29 05:41:15 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
num_entries = (PAGE_SIZE << order) / size;
|
|
|
|
num_entries = min(num_entries, opt->max_readdir);
|
|
|
|
|
|
|
|
rinfo->dir_buf_size = PAGE_SIZE << order;
|
|
|
|
req->r_num_caps = num_entries + 1;
|
|
|
|
req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
|
|
|
|
req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
/*
|
|
|
|
* Create an mds request.
|
|
|
|
*/
|
|
|
|
struct ceph_mds_request *
|
|
|
|
ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
|
|
|
|
{
|
|
|
|
struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
|
|
|
|
|
|
|
|
if (!req)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
2010-05-13 19:01:13 +00:00
|
|
|
mutex_init(&req->r_fill_mutex);
|
2010-06-17 23:16:12 +00:00
|
|
|
req->r_mdsc = mdsc;
|
2009-10-06 18:31:09 +00:00
|
|
|
req->r_started = jiffies;
|
|
|
|
req->r_resend_mds = -1;
|
|
|
|
INIT_LIST_HEAD(&req->r_unsafe_dir_item);
|
2015-10-27 10:36:06 +00:00
|
|
|
INIT_LIST_HEAD(&req->r_unsafe_target_item);
|
2009-10-06 18:31:09 +00:00
|
|
|
req->r_fmode = -1;
|
2009-12-07 20:31:09 +00:00
|
|
|
kref_init(&req->r_kref);
|
2016-04-28 14:07:22 +00:00
|
|
|
RB_CLEAR_NODE(&req->r_node);
|
2009-10-06 18:31:09 +00:00
|
|
|
INIT_LIST_HEAD(&req->r_wait);
|
|
|
|
init_completion(&req->r_completion);
|
|
|
|
init_completion(&req->r_safe_completion);
|
|
|
|
INIT_LIST_HEAD(&req->r_unsafe_item);
|
|
|
|
|
2016-02-03 06:07:48 +00:00
|
|
|
req->r_stamp = current_fs_time(mdsc->fsc->sb);
|
2014-05-22 00:41:08 +00:00
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
req->r_op = op;
|
|
|
|
req->r_direct_mode = mode;
|
|
|
|
return req;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-02-15 20:08:46 +00:00
|
|
|
* return oldest (lowest) request, tid in request tree, 0 if none.
|
2009-10-06 18:31:09 +00:00
|
|
|
*
|
|
|
|
* called under mdsc->mutex.
|
|
|
|
*/
|
2010-02-15 20:08:46 +00:00
|
|
|
static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
|
|
|
|
{
|
|
|
|
if (RB_EMPTY_ROOT(&mdsc->request_tree))
|
|
|
|
return NULL;
|
|
|
|
return rb_entry(rb_first(&mdsc->request_tree),
|
|
|
|
struct ceph_mds_request, r_node);
|
|
|
|
}
|
|
|
|
|
2015-05-19 10:54:40 +00:00
|
|
|
static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
2015-05-19 10:54:40 +00:00
|
|
|
return mdsc->oldest_tid;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Build a dentry's path. Allocate on heap; caller must kfree. Based
|
|
|
|
* on build_path_from_dentry in fs/cifs/dir.c.
|
|
|
|
*
|
|
|
|
* If @stop_on_nosnap, generate path relative to the first non-snapped
|
|
|
|
* inode.
|
|
|
|
*
|
|
|
|
* Encode hidden .snap dirs as a double /, i.e.
|
|
|
|
* foo/.snap/bar -> foo//bar
|
|
|
|
*/
|
|
|
|
char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
|
|
|
|
int stop_on_nosnap)
|
|
|
|
{
|
|
|
|
struct dentry *temp;
|
|
|
|
char *path;
|
|
|
|
int len, pos;
|
2011-07-17 03:43:58 +00:00
|
|
|
unsigned seq;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
if (dentry == NULL)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
|
|
|
retry:
|
|
|
|
len = 0;
|
2011-07-17 03:43:58 +00:00
|
|
|
seq = read_seqbegin(&rename_lock);
|
|
|
|
rcu_read_lock();
|
2009-10-06 18:31:09 +00:00
|
|
|
for (temp = dentry; !IS_ROOT(temp);) {
|
2015-03-17 22:25:59 +00:00
|
|
|
struct inode *inode = d_inode(temp);
|
2009-10-06 18:31:09 +00:00
|
|
|
if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
|
|
|
|
len++; /* slash only */
|
|
|
|
else if (stop_on_nosnap && inode &&
|
|
|
|
ceph_snap(inode) == CEPH_NOSNAP)
|
|
|
|
break;
|
|
|
|
else
|
|
|
|
len += 1 + temp->d_name.len;
|
|
|
|
temp = temp->d_parent;
|
|
|
|
}
|
2011-07-17 03:43:58 +00:00
|
|
|
rcu_read_unlock();
|
2009-10-06 18:31:09 +00:00
|
|
|
if (len)
|
|
|
|
len--; /* no leading '/' */
|
|
|
|
|
|
|
|
path = kmalloc(len+1, GFP_NOFS);
|
|
|
|
if (path == NULL)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
pos = len;
|
|
|
|
path[pos] = 0; /* trailing null */
|
2011-07-17 03:43:58 +00:00
|
|
|
rcu_read_lock();
|
2009-10-06 18:31:09 +00:00
|
|
|
for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
|
2011-07-17 03:43:58 +00:00
|
|
|
struct inode *inode;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2011-07-17 03:43:58 +00:00
|
|
|
spin_lock(&temp->d_lock);
|
2015-03-17 22:25:59 +00:00
|
|
|
inode = d_inode(temp);
|
2009-10-06 18:31:09 +00:00
|
|
|
if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
|
2010-03-18 17:14:30 +00:00
|
|
|
dout("build_path path+%d: %p SNAPDIR\n",
|
2009-10-06 18:31:09 +00:00
|
|
|
pos, temp);
|
|
|
|
} else if (stop_on_nosnap && inode &&
|
|
|
|
ceph_snap(inode) == CEPH_NOSNAP) {
|
2011-12-13 17:57:44 +00:00
|
|
|
spin_unlock(&temp->d_lock);
|
2009-10-06 18:31:09 +00:00
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
pos -= temp->d_name.len;
|
2011-07-17 03:43:58 +00:00
|
|
|
if (pos < 0) {
|
|
|
|
spin_unlock(&temp->d_lock);
|
2009-10-06 18:31:09 +00:00
|
|
|
break;
|
2011-07-17 03:43:58 +00:00
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
strncpy(path + pos, temp->d_name.name,
|
|
|
|
temp->d_name.len);
|
|
|
|
}
|
2011-07-17 03:43:58 +00:00
|
|
|
spin_unlock(&temp->d_lock);
|
2009-10-06 18:31:09 +00:00
|
|
|
if (pos)
|
|
|
|
path[--pos] = '/';
|
|
|
|
temp = temp->d_parent;
|
|
|
|
}
|
2011-07-17 03:43:58 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
if (pos != 0 || read_seqretry(&rename_lock, seq)) {
|
2010-03-18 17:14:30 +00:00
|
|
|
pr_err("build_path did not end path lookup where "
|
2009-10-06 18:31:09 +00:00
|
|
|
"expected, namelen is %d, pos is %d\n", len, pos);
|
|
|
|
/* presumably this is only possible if racing with a
|
|
|
|
rename of one of the parent directories (we can not
|
|
|
|
lock the dentries above us to prevent this, but
|
|
|
|
retrying should be harmless) */
|
|
|
|
kfree(path);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:25:59 +00:00
|
|
|
*base = ceph_ino(d_inode(temp));
|
2009-10-06 18:31:09 +00:00
|
|
|
*plen = len;
|
2010-03-18 17:14:30 +00:00
|
|
|
dout("build_path on %p %d built %llx '%.*s'\n",
|
2013-07-05 14:59:33 +00:00
|
|
|
dentry, d_count(dentry), *base, len, path);
|
2009-10-06 18:31:09 +00:00
|
|
|
return path;
|
|
|
|
}
|
|
|
|
|
2016-12-15 13:37:58 +00:00
|
|
|
static int build_dentry_path(struct dentry *dentry, struct inode *dir,
|
2009-10-06 18:31:09 +00:00
|
|
|
const char **ppath, int *ppathlen, u64 *pino,
|
|
|
|
int *pfreepath)
|
|
|
|
{
|
|
|
|
char *path;
|
|
|
|
|
2016-12-15 13:37:57 +00:00
|
|
|
rcu_read_lock();
|
2016-12-15 13:37:58 +00:00
|
|
|
if (!dir)
|
|
|
|
dir = d_inode_rcu(dentry->d_parent);
|
2016-12-15 13:37:57 +00:00
|
|
|
if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
|
|
|
|
*pino = ceph_ino(dir);
|
|
|
|
rcu_read_unlock();
|
2009-10-06 18:31:09 +00:00
|
|
|
*ppath = dentry->d_name.name;
|
|
|
|
*ppathlen = dentry->d_name.len;
|
|
|
|
return 0;
|
|
|
|
}
|
2016-12-15 13:37:57 +00:00
|
|
|
rcu_read_unlock();
|
2009-10-06 18:31:09 +00:00
|
|
|
path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
|
|
|
|
if (IS_ERR(path))
|
|
|
|
return PTR_ERR(path);
|
|
|
|
*ppath = path;
|
|
|
|
*pfreepath = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int build_inode_path(struct inode *inode,
|
|
|
|
const char **ppath, int *ppathlen, u64 *pino,
|
|
|
|
int *pfreepath)
|
|
|
|
{
|
|
|
|
struct dentry *dentry;
|
|
|
|
char *path;
|
|
|
|
|
|
|
|
if (ceph_snap(inode) == CEPH_NOSNAP) {
|
|
|
|
*pino = ceph_ino(inode);
|
|
|
|
*ppathlen = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
dentry = d_find_alias(inode);
|
|
|
|
path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
|
|
|
|
dput(dentry);
|
|
|
|
if (IS_ERR(path))
|
|
|
|
return PTR_ERR(path);
|
|
|
|
*ppath = path;
|
|
|
|
*pfreepath = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* request arguments may be specified via an inode *, a dentry *, or
|
|
|
|
* an explicit ino+path.
|
|
|
|
*/
|
|
|
|
static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
|
2016-12-15 13:37:58 +00:00
|
|
|
struct inode *rdiri, const char *rpath,
|
|
|
|
u64 rino, const char **ppath, int *pathlen,
|
2009-10-06 18:31:09 +00:00
|
|
|
u64 *ino, int *freepath)
|
|
|
|
{
|
|
|
|
int r = 0;
|
|
|
|
|
|
|
|
if (rinode) {
|
|
|
|
r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
|
|
|
|
dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
|
|
|
|
ceph_snap(rinode));
|
|
|
|
} else if (rdentry) {
|
2016-12-15 13:37:58 +00:00
|
|
|
r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
|
|
|
|
freepath);
|
2009-10-06 18:31:09 +00:00
|
|
|
dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
|
|
|
|
*ppath);
|
2011-08-15 20:02:37 +00:00
|
|
|
} else if (rpath || rino) {
|
2009-10-06 18:31:09 +00:00
|
|
|
*ino = rino;
|
|
|
|
*ppath = rpath;
|
2012-10-25 17:23:46 +00:00
|
|
|
*pathlen = rpath ? strlen(rpath) : 0;
|
2009-10-06 18:31:09 +00:00
|
|
|
dout(" path %.*s\n", *pathlen, rpath);
|
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* called under mdsc->mutex
|
|
|
|
*/
|
|
|
|
static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_request *req,
|
2015-02-27 00:54:08 +00:00
|
|
|
int mds, bool drop_cap_releases)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
|
|
|
struct ceph_msg *msg;
|
|
|
|
struct ceph_mds_request_head *head;
|
|
|
|
const char *path1 = NULL;
|
|
|
|
const char *path2 = NULL;
|
|
|
|
u64 ino1 = 0, ino2 = 0;
|
|
|
|
int pathlen1 = 0, pathlen2 = 0;
|
|
|
|
int freepath1 = 0, freepath2 = 0;
|
|
|
|
int len;
|
|
|
|
u16 releases;
|
|
|
|
void *p, *end;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = set_request_path_attr(req->r_inode, req->r_dentry,
|
2017-01-31 15:28:26 +00:00
|
|
|
req->r_parent, req->r_path1, req->r_ino1.ino,
|
2009-10-06 18:31:09 +00:00
|
|
|
&path1, &pathlen1, &ino1, &freepath1);
|
|
|
|
if (ret < 0) {
|
|
|
|
msg = ERR_PTR(ret);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = set_request_path_attr(NULL, req->r_old_dentry,
|
2016-12-15 13:37:58 +00:00
|
|
|
req->r_old_dentry_dir,
|
2009-10-06 18:31:09 +00:00
|
|
|
req->r_path2, req->r_ino2.ino,
|
|
|
|
&path2, &pathlen2, &ino2, &freepath2);
|
|
|
|
if (ret < 0) {
|
|
|
|
msg = ERR_PTR(ret);
|
|
|
|
goto out_free1;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = sizeof(*head) +
|
2014-05-22 00:41:08 +00:00
|
|
|
pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
|
2015-09-30 13:04:42 +00:00
|
|
|
sizeof(struct ceph_timespec);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
/* calculate (max) length for cap releases */
|
|
|
|
len += sizeof(struct ceph_mds_request_release) *
|
|
|
|
(!!req->r_inode_drop + !!req->r_dentry_drop +
|
|
|
|
!!req->r_old_inode_drop + !!req->r_old_dentry_drop);
|
|
|
|
if (req->r_dentry_drop)
|
|
|
|
len += req->r_dentry->d_name.len;
|
|
|
|
if (req->r_old_dentry_drop)
|
|
|
|
len += req->r_old_dentry->d_name.len;
|
|
|
|
|
2011-08-09 22:03:46 +00:00
|
|
|
msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false);
|
2010-04-01 23:06:19 +00:00
|
|
|
if (!msg) {
|
|
|
|
msg = ERR_PTR(-ENOMEM);
|
2009-10-06 18:31:09 +00:00
|
|
|
goto out_free2;
|
2010-04-01 23:06:19 +00:00
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2014-10-30 17:15:26 +00:00
|
|
|
msg->hdr.version = cpu_to_le16(2);
|
2009-12-22 19:24:33 +00:00
|
|
|
msg->hdr.tid = cpu_to_le64(req->r_tid);
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
head = msg->front.iov_base;
|
|
|
|
p = msg->front.iov_base + sizeof(*head);
|
|
|
|
end = msg->front.iov_base + msg->front.iov_len;
|
|
|
|
|
|
|
|
head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
|
|
|
|
head->op = cpu_to_le32(req->r_op);
|
2013-01-31 12:01:53 +00:00
|
|
|
head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid));
|
|
|
|
head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid));
|
2009-10-06 18:31:09 +00:00
|
|
|
head->args = req->r_args;
|
|
|
|
|
|
|
|
ceph_encode_filepath(&p, end, ino1, path1);
|
|
|
|
ceph_encode_filepath(&p, end, ino2, path2);
|
|
|
|
|
2010-07-15 21:58:39 +00:00
|
|
|
/* make note of release offset, in case we need to replay */
|
|
|
|
req->r_request_release_offset = p - msg->front.iov_base;
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
/* cap releases */
|
|
|
|
releases = 0;
|
|
|
|
if (req->r_inode_drop)
|
|
|
|
releases += ceph_encode_inode_release(&p,
|
2015-03-17 22:25:59 +00:00
|
|
|
req->r_inode ? req->r_inode : d_inode(req->r_dentry),
|
2009-10-06 18:31:09 +00:00
|
|
|
mds, req->r_inode_drop, req->r_inode_unless, 0);
|
|
|
|
if (req->r_dentry_drop)
|
|
|
|
releases += ceph_encode_dentry_release(&p, req->r_dentry,
|
2017-01-31 15:28:26 +00:00
|
|
|
req->r_parent, mds, req->r_dentry_drop,
|
2016-12-15 13:37:59 +00:00
|
|
|
req->r_dentry_unless);
|
2009-10-06 18:31:09 +00:00
|
|
|
if (req->r_old_dentry_drop)
|
|
|
|
releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
|
2016-12-15 13:37:59 +00:00
|
|
|
req->r_old_dentry_dir, mds,
|
|
|
|
req->r_old_dentry_drop,
|
|
|
|
req->r_old_dentry_unless);
|
2009-10-06 18:31:09 +00:00
|
|
|
if (req->r_old_inode_drop)
|
|
|
|
releases += ceph_encode_inode_release(&p,
|
2015-03-17 22:25:59 +00:00
|
|
|
d_inode(req->r_old_dentry),
|
2009-10-06 18:31:09 +00:00
|
|
|
mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
|
2015-02-27 00:54:08 +00:00
|
|
|
|
|
|
|
if (drop_cap_releases) {
|
|
|
|
releases = 0;
|
|
|
|
p = msg->front.iov_base + req->r_request_release_offset;
|
|
|
|
}
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
head->num_releases = cpu_to_le16(releases);
|
|
|
|
|
2014-05-22 00:41:08 +00:00
|
|
|
/* time stamp */
|
2015-01-13 07:20:52 +00:00
|
|
|
{
|
|
|
|
struct ceph_timespec ts;
|
|
|
|
ceph_encode_timespec(&ts, &req->r_stamp);
|
|
|
|
ceph_encode_copy(&p, &ts, sizeof(ts));
|
|
|
|
}
|
2014-05-22 00:41:08 +00:00
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
BUG_ON(p > end);
|
|
|
|
msg->front.iov_len = p - msg->front.iov_base;
|
|
|
|
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
|
|
|
|
|
2014-09-16 11:15:28 +00:00
|
|
|
if (req->r_pagelist) {
|
|
|
|
struct ceph_pagelist *pagelist = req->r_pagelist;
|
2017-03-17 12:10:29 +00:00
|
|
|
refcount_inc(&pagelist->refcnt);
|
2014-09-16 11:15:28 +00:00
|
|
|
ceph_msg_data_add_pagelist(msg, pagelist);
|
|
|
|
msg->hdr.data_len = cpu_to_le32(pagelist->length);
|
|
|
|
} else {
|
|
|
|
msg->hdr.data_len = 0;
|
2013-03-05 04:29:57 +00:00
|
|
|
}
|
2013-02-14 18:16:43 +00:00
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
msg->hdr.data_off = cpu_to_le16(0);
|
|
|
|
|
|
|
|
out_free2:
|
|
|
|
if (freepath2)
|
|
|
|
kfree((char *)path2);
|
|
|
|
out_free1:
|
|
|
|
if (freepath1)
|
|
|
|
kfree((char *)path1);
|
|
|
|
out:
|
|
|
|
return msg;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* called under mdsc->mutex if error, under no mutex if
|
|
|
|
* success.
|
|
|
|
*/
|
|
|
|
static void complete_request(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_request *req)
|
|
|
|
{
|
|
|
|
if (req->r_callback)
|
|
|
|
req->r_callback(mdsc, req);
|
|
|
|
else
|
2010-07-27 20:11:08 +00:00
|
|
|
complete_all(&req->r_completion);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* called under mdsc->mutex
|
|
|
|
*/
|
|
|
|
static int __prepare_send_request(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_request *req,
|
2015-02-27 00:54:08 +00:00
|
|
|
int mds, bool drop_cap_releases)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
|
|
|
struct ceph_mds_request_head *rhead;
|
|
|
|
struct ceph_msg *msg;
|
|
|
|
int flags = 0;
|
|
|
|
|
|
|
|
req->r_attempts++;
|
2010-06-22 22:58:01 +00:00
|
|
|
if (req->r_inode) {
|
|
|
|
struct ceph_cap *cap =
|
|
|
|
ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
|
|
|
|
|
|
|
|
if (cap)
|
|
|
|
req->r_sent_on_mseq = cap->mseq;
|
|
|
|
else
|
|
|
|
req->r_sent_on_mseq = -1;
|
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
|
|
|
|
req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
|
|
|
|
|
2017-02-01 18:49:09 +00:00
|
|
|
if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
|
2014-07-01 08:54:34 +00:00
|
|
|
void *p;
|
2010-07-15 20:24:32 +00:00
|
|
|
/*
|
|
|
|
* Replay. Do not regenerate message (and rebuild
|
|
|
|
* paths, etc.); just use the original message.
|
|
|
|
* Rebuilding paths will break for renames because
|
|
|
|
* d_move mangles the src name.
|
|
|
|
*/
|
|
|
|
msg = req->r_request;
|
|
|
|
rhead = msg->front.iov_base;
|
|
|
|
|
|
|
|
flags = le32_to_cpu(rhead->flags);
|
|
|
|
flags |= CEPH_MDS_FLAG_REPLAY;
|
|
|
|
rhead->flags = cpu_to_le32(flags);
|
|
|
|
|
|
|
|
if (req->r_target_inode)
|
|
|
|
rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
|
|
|
|
|
|
|
|
rhead->num_retry = req->r_attempts - 1;
|
2010-07-15 21:58:39 +00:00
|
|
|
|
|
|
|
/* remove cap/dentry releases from message */
|
|
|
|
rhead->num_releases = 0;
|
2014-07-01 08:54:34 +00:00
|
|
|
|
|
|
|
/* time stamp */
|
|
|
|
p = msg->front.iov_base + req->r_request_release_offset;
|
2015-01-13 07:20:52 +00:00
|
|
|
{
|
|
|
|
struct ceph_timespec ts;
|
|
|
|
ceph_encode_timespec(&ts, &req->r_stamp);
|
|
|
|
ceph_encode_copy(&p, &ts, sizeof(ts));
|
|
|
|
}
|
2014-07-01 08:54:34 +00:00
|
|
|
|
|
|
|
msg->front.iov_len = p - msg->front.iov_base;
|
|
|
|
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
|
2010-07-15 20:24:32 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
if (req->r_request) {
|
|
|
|
ceph_msg_put(req->r_request);
|
|
|
|
req->r_request = NULL;
|
|
|
|
}
|
2015-02-27 00:54:08 +00:00
|
|
|
msg = create_request_message(mdsc, req, mds, drop_cap_releases);
|
2009-10-06 18:31:09 +00:00
|
|
|
if (IS_ERR(msg)) {
|
2010-05-13 18:19:06 +00:00
|
|
|
req->r_err = PTR_ERR(msg);
|
2010-04-01 23:06:19 +00:00
|
|
|
return PTR_ERR(msg);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
req->r_request = msg;
|
|
|
|
|
|
|
|
rhead = msg->front.iov_base;
|
|
|
|
rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
|
2017-02-01 18:49:09 +00:00
|
|
|
if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
|
2009-10-06 18:31:09 +00:00
|
|
|
flags |= CEPH_MDS_FLAG_REPLAY;
|
2017-01-31 15:28:26 +00:00
|
|
|
if (req->r_parent)
|
2009-10-06 18:31:09 +00:00
|
|
|
flags |= CEPH_MDS_FLAG_WANT_DENTRY;
|
|
|
|
rhead->flags = cpu_to_le32(flags);
|
|
|
|
rhead->num_fwd = req->r_num_fwd;
|
|
|
|
rhead->num_retry = req->r_attempts - 1;
|
2010-07-15 20:24:32 +00:00
|
|
|
rhead->ino = 0;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2017-01-31 15:28:26 +00:00
|
|
|
dout(" r_parent = %p\n", req->r_parent);
|
2009-10-06 18:31:09 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* send request, or put it on the appropriate wait list.
|
|
|
|
*/
|
|
|
|
static int __do_request(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_request *req)
|
|
|
|
{
|
|
|
|
struct ceph_mds_session *session = NULL;
|
|
|
|
int mds = -1;
|
2015-07-01 08:27:46 +00:00
|
|
|
int err = 0;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2017-02-01 18:49:09 +00:00
|
|
|
if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
|
|
|
|
if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
|
2013-09-26 06:25:36 +00:00
|
|
|
__unregister_request(mdsc, req);
|
2009-10-06 18:31:09 +00:00
|
|
|
goto out;
|
2013-09-26 06:25:36 +00:00
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
if (req->r_timeout &&
|
|
|
|
time_after_eq(jiffies, req->r_started + req->r_timeout)) {
|
|
|
|
dout("do_request timed out\n");
|
|
|
|
err = -EIO;
|
|
|
|
goto finish;
|
|
|
|
}
|
2016-12-26 09:26:34 +00:00
|
|
|
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
|
2015-07-01 08:27:46 +00:00
|
|
|
dout("do_request forced umount\n");
|
|
|
|
err = -EIO;
|
|
|
|
goto finish;
|
|
|
|
}
|
2016-12-26 09:26:34 +00:00
|
|
|
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
|
2016-11-10 08:02:06 +00:00
|
|
|
if (mdsc->mdsmap_err) {
|
|
|
|
err = mdsc->mdsmap_err;
|
|
|
|
dout("do_request mdsmap err %d\n", err);
|
|
|
|
goto finish;
|
|
|
|
}
|
2017-01-04 08:21:58 +00:00
|
|
|
if (mdsc->mdsmap->m_epoch == 0) {
|
|
|
|
dout("do_request no mdsmap, waiting for map\n");
|
|
|
|
list_add(&req->r_wait, &mdsc->waiting_for_map);
|
|
|
|
goto finish;
|
|
|
|
}
|
2016-11-10 08:02:06 +00:00
|
|
|
if (!(mdsc->fsc->mount_options->flags &
|
|
|
|
CEPH_MOUNT_OPT_MOUNTWAIT) &&
|
|
|
|
!ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
|
|
|
|
err = -ENOENT;
|
|
|
|
pr_info("probably no mds server is up\n");
|
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2010-11-02 20:49:00 +00:00
|
|
|
put_request_session(req);
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
mds = __choose_mds(mdsc, req);
|
|
|
|
if (mds < 0 ||
|
|
|
|
ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
|
|
|
|
dout("do_request no mds or not active, waiting for map\n");
|
|
|
|
list_add(&req->r_wait, &mdsc->waiting_for_map);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get, open session */
|
|
|
|
session = __ceph_lookup_mds_session(mdsc, mds);
|
2010-03-21 03:43:28 +00:00
|
|
|
if (!session) {
|
2009-10-06 18:31:09 +00:00
|
|
|
session = register_session(mdsc, mds);
|
2010-03-21 03:43:28 +00:00
|
|
|
if (IS_ERR(session)) {
|
|
|
|
err = PTR_ERR(session);
|
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
}
|
2010-11-02 20:49:00 +00:00
|
|
|
req->r_session = get_session(session);
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
dout("do_request mds%d session %p state %s\n", mds, session,
|
2014-09-19 12:51:08 +00:00
|
|
|
ceph_session_state_name(session->s_state));
|
2009-10-06 18:31:09 +00:00
|
|
|
if (session->s_state != CEPH_MDS_SESSION_OPEN &&
|
|
|
|
session->s_state != CEPH_MDS_SESSION_HUNG) {
|
2016-09-14 08:39:51 +00:00
|
|
|
if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
|
|
|
|
err = -EACCES;
|
|
|
|
goto out_session;
|
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
if (session->s_state == CEPH_MDS_SESSION_NEW ||
|
|
|
|
session->s_state == CEPH_MDS_SESSION_CLOSING)
|
|
|
|
__open_session(mdsc, session);
|
|
|
|
list_add(&req->r_wait, &session->s_waiting);
|
|
|
|
goto out_session;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* send request */
|
|
|
|
req->r_resend_mds = -1; /* forget any previous mds hint */
|
|
|
|
|
|
|
|
if (req->r_request_started == 0) /* note request start time */
|
|
|
|
req->r_request_started = jiffies;
|
|
|
|
|
2015-02-27 00:54:08 +00:00
|
|
|
err = __prepare_send_request(mdsc, req, mds, false);
|
2009-10-06 18:31:09 +00:00
|
|
|
if (!err) {
|
|
|
|
ceph_msg_get(req->r_request);
|
|
|
|
ceph_con_send(&session->s_con, req->r_request);
|
|
|
|
}
|
|
|
|
|
|
|
|
out_session:
|
|
|
|
ceph_put_mds_session(session);
|
2015-07-01 08:27:46 +00:00
|
|
|
finish:
|
|
|
|
if (err) {
|
|
|
|
dout("__do_request early error %d\n", err);
|
|
|
|
req->r_err = err;
|
|
|
|
complete_request(mdsc, req);
|
|
|
|
__unregister_request(mdsc, req);
|
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* called under mdsc->mutex
|
|
|
|
*/
|
|
|
|
static void __wake_requests(struct ceph_mds_client *mdsc,
|
|
|
|
struct list_head *head)
|
|
|
|
{
|
2012-11-19 02:49:06 +00:00
|
|
|
struct ceph_mds_request *req;
|
|
|
|
LIST_HEAD(tmp_list);
|
|
|
|
|
|
|
|
list_splice_init(head, &tmp_list);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2012-11-19 02:49:06 +00:00
|
|
|
while (!list_empty(&tmp_list)) {
|
|
|
|
req = list_entry(tmp_list.next,
|
|
|
|
struct ceph_mds_request, r_wait);
|
2009-10-06 18:31:09 +00:00
|
|
|
list_del_init(&req->r_wait);
|
2013-05-02 04:15:58 +00:00
|
|
|
dout(" wake request %p tid %llu\n", req, req->r_tid);
|
2009-10-06 18:31:09 +00:00
|
|
|
__do_request(mdsc, req);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wake up threads with requests pending for @mds, so that they can
|
2010-03-18 21:45:05 +00:00
|
|
|
* resubmit their requests to a possibly different mds.
|
2009-10-06 18:31:09 +00:00
|
|
|
*/
|
2010-03-18 21:45:05 +00:00
|
|
|
static void kick_requests(struct ceph_mds_client *mdsc, int mds)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
2010-02-15 20:08:46 +00:00
|
|
|
struct ceph_mds_request *req;
|
2014-07-30 02:12:47 +00:00
|
|
|
struct rb_node *p = rb_first(&mdsc->request_tree);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
dout("kick_requests mds%d\n", mds);
|
2014-07-30 02:12:47 +00:00
|
|
|
while (p) {
|
2010-02-15 20:08:46 +00:00
|
|
|
req = rb_entry(p, struct ceph_mds_request, r_node);
|
2014-07-30 02:12:47 +00:00
|
|
|
p = rb_next(p);
|
2017-02-01 18:49:09 +00:00
|
|
|
if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
|
2010-02-15 20:08:46 +00:00
|
|
|
continue;
|
2015-02-04 06:26:22 +00:00
|
|
|
if (req->r_attempts > 0)
|
|
|
|
continue; /* only new requests */
|
2010-02-15 20:08:46 +00:00
|
|
|
if (req->r_session &&
|
|
|
|
req->r_session->s_mds == mds) {
|
|
|
|
dout(" kicking tid %llu\n", req->r_tid);
|
2014-09-11 06:28:56 +00:00
|
|
|
list_del_init(&req->r_wait);
|
2010-02-15 20:08:46 +00:00
|
|
|
__do_request(mdsc, req);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_request *req)
|
|
|
|
{
|
|
|
|
dout("submit_request on %p\n", req);
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
__register_request(mdsc, req, NULL);
|
|
|
|
__do_request(mdsc, req);
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Synchrously perform an mds request. Take care of all of the
|
|
|
|
* session setup, forwarding, retry details.
|
|
|
|
*/
|
|
|
|
int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
|
|
|
|
struct inode *dir,
|
|
|
|
struct ceph_mds_request *req)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
dout("do_request on %p\n", req);
|
|
|
|
|
2017-01-31 15:28:26 +00:00
|
|
|
/* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
|
2009-10-06 18:31:09 +00:00
|
|
|
if (req->r_inode)
|
|
|
|
ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
|
2017-01-31 15:28:26 +00:00
|
|
|
if (req->r_parent)
|
|
|
|
ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
|
2013-02-05 21:40:09 +00:00
|
|
|
if (req->r_old_dentry_dir)
|
2011-07-26 18:31:14 +00:00
|
|
|
ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
|
|
|
|
CEPH_CAP_PIN);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
/* issue */
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
__register_request(mdsc, req, dir);
|
|
|
|
__do_request(mdsc, req);
|
|
|
|
|
2010-05-13 18:19:06 +00:00
|
|
|
if (req->r_err) {
|
|
|
|
err = req->r_err;
|
|
|
|
goto out;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
2010-05-13 18:19:06 +00:00
|
|
|
/* wait */
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
dout("do_request waiting\n");
|
2015-05-19 09:05:38 +00:00
|
|
|
if (!req->r_timeout && req->r_wait_for_completion) {
|
2014-10-14 02:33:35 +00:00
|
|
|
err = req->r_wait_for_completion(mdsc, req);
|
2010-05-13 18:19:06 +00:00
|
|
|
} else {
|
2015-05-19 09:05:38 +00:00
|
|
|
long timeleft = wait_for_completion_killable_timeout(
|
|
|
|
&req->r_completion,
|
|
|
|
ceph_timeout_jiffies(req->r_timeout));
|
|
|
|
if (timeleft > 0)
|
|
|
|
err = 0;
|
|
|
|
else if (!timeleft)
|
|
|
|
err = -EIO; /* timed out */
|
|
|
|
else
|
|
|
|
err = timeleft; /* killed */
|
2010-05-13 18:19:06 +00:00
|
|
|
}
|
|
|
|
dout("do_request waited, got %d\n", err);
|
|
|
|
mutex_lock(&mdsc->mutex);
|
2010-01-25 19:33:08 +00:00
|
|
|
|
2010-05-13 18:19:06 +00:00
|
|
|
/* only abort if we didn't race with a real reply */
|
2017-02-01 18:49:09 +00:00
|
|
|
if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
|
2010-05-13 18:19:06 +00:00
|
|
|
err = le32_to_cpu(req->r_reply_info.head->result);
|
|
|
|
} else if (err < 0) {
|
|
|
|
dout("aborted request %lld with %d\n", req->r_tid, err);
|
2010-05-13 19:01:13 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* ensure we aren't running concurrently with
|
|
|
|
* ceph_fill_trace or ceph_readdir_prepopulate, which
|
|
|
|
* rely on locks (dir mutex) held by our caller.
|
|
|
|
*/
|
|
|
|
mutex_lock(&req->r_fill_mutex);
|
2010-05-13 18:19:06 +00:00
|
|
|
req->r_err = err;
|
2017-02-01 18:49:09 +00:00
|
|
|
set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
|
2010-05-13 19:01:13 +00:00
|
|
|
mutex_unlock(&req->r_fill_mutex);
|
2010-01-25 19:33:08 +00:00
|
|
|
|
2017-01-31 15:28:26 +00:00
|
|
|
if (req->r_parent &&
|
2010-05-14 17:02:57 +00:00
|
|
|
(req->r_op & CEPH_MDS_OP_WRITE))
|
|
|
|
ceph_invalidate_dir_request(req);
|
2009-10-06 18:31:09 +00:00
|
|
|
} else {
|
2010-05-13 18:19:06 +00:00
|
|
|
err = req->r_err;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
2010-05-13 18:19:06 +00:00
|
|
|
out:
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
2009-10-06 18:31:09 +00:00
|
|
|
dout("do_request %p done, result %d\n", req, err);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2010-05-14 17:02:57 +00:00
|
|
|
/*
|
2013-03-13 11:44:32 +00:00
|
|
|
* Invalidate dir's completeness, dentry lease state on an aborted MDS
|
2010-05-14 17:02:57 +00:00
|
|
|
* namespace request.
|
|
|
|
*/
|
|
|
|
void ceph_invalidate_dir_request(struct ceph_mds_request *req)
|
|
|
|
{
|
2017-01-31 15:28:26 +00:00
|
|
|
struct inode *inode = req->r_parent;
|
2010-05-14 17:02:57 +00:00
|
|
|
|
2013-03-13 11:44:32 +00:00
|
|
|
dout("invalidate_dir_request %p (complete, lease(s))\n", inode);
|
2010-05-14 17:02:57 +00:00
|
|
|
|
2013-03-13 11:44:32 +00:00
|
|
|
ceph_dir_clear_complete(inode);
|
2010-05-14 17:02:57 +00:00
|
|
|
if (req->r_dentry)
|
|
|
|
ceph_invalidate_dentry_lease(req->r_dentry);
|
|
|
|
if (req->r_old_dentry)
|
|
|
|
ceph_invalidate_dentry_lease(req->r_old_dentry);
|
|
|
|
}
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
/*
|
|
|
|
* Handle mds reply.
|
|
|
|
*
|
|
|
|
* We take the session mutex and parse and process the reply immediately.
|
|
|
|
* This preserves the logical ordering of replies, capabilities, etc., sent
|
|
|
|
* by the MDS as they are applied to our local cache.
|
|
|
|
*/
|
|
|
|
static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
|
|
|
|
{
|
|
|
|
struct ceph_mds_client *mdsc = session->s_mdsc;
|
|
|
|
struct ceph_mds_request *req;
|
|
|
|
struct ceph_mds_reply_head *head = msg->front.iov_base;
|
|
|
|
struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */
|
2014-12-23 07:30:54 +00:00
|
|
|
struct ceph_snap_realm *realm;
|
2009-10-06 18:31:09 +00:00
|
|
|
u64 tid;
|
|
|
|
int err, result;
|
2010-02-22 23:12:16 +00:00
|
|
|
int mds = session->s_mds;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
if (msg->front.iov_len < sizeof(*head)) {
|
|
|
|
pr_err("mdsc_handle_reply got corrupt (short) reply\n");
|
2009-12-14 23:13:47 +00:00
|
|
|
ceph_msg_dump(msg);
|
2009-10-06 18:31:09 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get request, session */
|
2009-12-22 19:24:33 +00:00
|
|
|
tid = le64_to_cpu(msg->hdr.tid);
|
2009-10-06 18:31:09 +00:00
|
|
|
mutex_lock(&mdsc->mutex);
|
2016-04-28 14:07:22 +00:00
|
|
|
req = lookup_get_request(mdsc, tid);
|
2009-10-06 18:31:09 +00:00
|
|
|
if (!req) {
|
|
|
|
dout("handle_reply on unknown tid %llu\n", tid);
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
dout("handle_reply %p\n", req);
|
|
|
|
|
|
|
|
/* correct session? */
|
2010-03-21 03:50:58 +00:00
|
|
|
if (req->r_session != session) {
|
2009-10-06 18:31:09 +00:00
|
|
|
pr_err("mdsc_handle_reply got %llu on session mds%d"
|
|
|
|
" not mds%d\n", tid, session->s_mds,
|
|
|
|
req->r_session ? req->r_session->s_mds : -1);
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* dup? */
|
2017-02-01 18:49:09 +00:00
|
|
|
if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
|
|
|
|
(test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
|
2014-06-06 21:35:37 +00:00
|
|
|
pr_warn("got a dup %s reply on %llu from mds%d\n",
|
2009-10-06 18:31:09 +00:00
|
|
|
head->safe ? "safe" : "unsafe", tid, mds);
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
goto out;
|
|
|
|
}
|
2017-02-01 18:49:09 +00:00
|
|
|
if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
|
2014-06-06 21:35:37 +00:00
|
|
|
pr_warn("got unsafe after safe on %llu from mds%d\n",
|
2010-05-13 16:06:02 +00:00
|
|
|
tid, mds);
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
goto out;
|
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
result = le32_to_cpu(head->result);
|
|
|
|
|
|
|
|
/*
|
2010-06-22 22:58:01 +00:00
|
|
|
* Handle an ESTALE
|
|
|
|
* if we're not talking to the authority, send to them
|
|
|
|
* if the authority has changed while we weren't looking,
|
|
|
|
* send to new authority
|
|
|
|
* Otherwise we just have to return an ESTALE
|
2009-10-06 18:31:09 +00:00
|
|
|
*/
|
|
|
|
if (result == -ESTALE) {
|
2010-06-22 22:58:01 +00:00
|
|
|
dout("got ESTALE on request %llu", req->r_tid);
|
2014-07-14 02:48:29 +00:00
|
|
|
req->r_resend_mds = -1;
|
2013-11-22 06:21:44 +00:00
|
|
|
if (req->r_direct_mode != USE_AUTH_MDS) {
|
2010-06-22 22:58:01 +00:00
|
|
|
dout("not using auth, setting for that now");
|
|
|
|
req->r_direct_mode = USE_AUTH_MDS;
|
2009-10-06 18:31:09 +00:00
|
|
|
__do_request(mdsc, req);
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
goto out;
|
2010-06-22 22:58:01 +00:00
|
|
|
} else {
|
2013-11-22 06:21:44 +00:00
|
|
|
int mds = __choose_mds(mdsc, req);
|
|
|
|
if (mds >= 0 && mds != req->r_session->s_mds) {
|
|
|
|
dout("but auth changed, so resending");
|
2010-06-22 22:58:01 +00:00
|
|
|
__do_request(mdsc, req);
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
goto out;
|
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
2010-06-22 22:58:01 +00:00
|
|
|
dout("have to return ESTALE on request %llu", req->r_tid);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
2010-06-22 22:58:01 +00:00
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
if (head->safe) {
|
2017-02-01 18:49:09 +00:00
|
|
|
set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
|
2009-10-06 18:31:09 +00:00
|
|
|
__unregister_request(mdsc, req);
|
|
|
|
|
2017-02-01 18:49:09 +00:00
|
|
|
if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
|
2009-10-06 18:31:09 +00:00
|
|
|
/*
|
|
|
|
* We already handled the unsafe response, now do the
|
|
|
|
* cleanup. No need to examine the response; the MDS
|
|
|
|
* doesn't include any result info in the safe
|
|
|
|
* response. And even if it did, there is nothing
|
|
|
|
* useful we could do with a revised return value.
|
|
|
|
*/
|
|
|
|
dout("got safe reply %llu, mds%d\n", tid, mds);
|
|
|
|
|
|
|
|
/* last unsafe request during umount? */
|
2010-02-15 20:08:46 +00:00
|
|
|
if (mdsc->stopping && !__get_oldest_req(mdsc))
|
2010-07-27 20:11:08 +00:00
|
|
|
complete_all(&mdsc->safe_umount_waiters);
|
2009-10-06 18:31:09 +00:00
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
goto out;
|
|
|
|
}
|
2010-05-13 18:19:06 +00:00
|
|
|
} else {
|
2017-02-01 18:49:09 +00:00
|
|
|
set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags);
|
2009-10-06 18:31:09 +00:00
|
|
|
list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
|
2015-10-27 09:18:00 +00:00
|
|
|
if (req->r_unsafe_dir) {
|
|
|
|
struct ceph_inode_info *ci =
|
|
|
|
ceph_inode(req->r_unsafe_dir);
|
|
|
|
spin_lock(&ci->i_unsafe_lock);
|
|
|
|
list_add_tail(&req->r_unsafe_dir_item,
|
|
|
|
&ci->i_unsafe_dirops);
|
|
|
|
spin_unlock(&ci->i_unsafe_lock);
|
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dout("handle_reply tid %lld result %d\n", tid, result);
|
|
|
|
rinfo = &req->r_reply_info;
|
2010-12-15 01:37:52 +00:00
|
|
|
err = parse_reply_info(msg, rinfo, session->s_con.peer_features);
|
2009-10-06 18:31:09 +00:00
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
|
|
|
|
mutex_lock(&session->s_mutex);
|
|
|
|
if (err < 0) {
|
2010-12-01 22:14:38 +00:00
|
|
|
pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
|
2009-12-14 23:13:47 +00:00
|
|
|
ceph_msg_dump(msg);
|
2009-10-06 18:31:09 +00:00
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* snap trace */
|
2014-12-23 07:30:54 +00:00
|
|
|
realm = NULL;
|
2009-10-06 18:31:09 +00:00
|
|
|
if (rinfo->snapblob_len) {
|
|
|
|
down_write(&mdsc->snap_rwsem);
|
|
|
|
ceph_update_snap_trace(mdsc, rinfo->snapblob,
|
2014-12-23 07:30:54 +00:00
|
|
|
rinfo->snapblob + rinfo->snapblob_len,
|
|
|
|
le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
|
|
|
|
&realm);
|
2009-10-06 18:31:09 +00:00
|
|
|
downgrade_write(&mdsc->snap_rwsem);
|
|
|
|
} else {
|
|
|
|
down_read(&mdsc->snap_rwsem);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* insert trace into our cache */
|
2010-05-13 19:01:13 +00:00
|
|
|
mutex_lock(&req->r_fill_mutex);
|
2016-03-07 02:34:50 +00:00
|
|
|
current->journal_info = req;
|
2017-01-31 16:06:13 +00:00
|
|
|
err = ceph_fill_trace(mdsc->fsc->sb, req);
|
2009-10-06 18:31:09 +00:00
|
|
|
if (err == 0) {
|
2012-12-28 17:56:46 +00:00
|
|
|
if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
|
2013-09-18 01:44:13 +00:00
|
|
|
req->r_op == CEPH_MDS_OP_LSSNAP))
|
2009-10-06 18:31:09 +00:00
|
|
|
ceph_readdir_prepopulate(req, req->r_session);
|
2010-06-17 23:16:12 +00:00
|
|
|
ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
2016-03-07 02:34:50 +00:00
|
|
|
current->journal_info = NULL;
|
2010-05-13 19:01:13 +00:00
|
|
|
mutex_unlock(&req->r_fill_mutex);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
up_read(&mdsc->snap_rwsem);
|
2014-12-23 07:30:54 +00:00
|
|
|
if (realm)
|
|
|
|
ceph_put_snap_realm(mdsc, realm);
|
2015-10-27 10:36:06 +00:00
|
|
|
|
2017-02-01 18:49:09 +00:00
|
|
|
if (err == 0 && req->r_target_inode &&
|
|
|
|
test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
|
2015-10-27 10:36:06 +00:00
|
|
|
struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
|
|
|
|
spin_lock(&ci->i_unsafe_lock);
|
|
|
|
list_add_tail(&req->r_unsafe_target_item, &ci->i_unsafe_iops);
|
|
|
|
spin_unlock(&ci->i_unsafe_lock);
|
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
out_err:
|
2010-05-13 18:19:06 +00:00
|
|
|
mutex_lock(&mdsc->mutex);
|
2017-02-01 18:49:09 +00:00
|
|
|
if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
|
2010-05-13 18:19:06 +00:00
|
|
|
if (err) {
|
|
|
|
req->r_err = err;
|
|
|
|
} else {
|
2015-08-18 02:30:38 +00:00
|
|
|
req->r_reply = ceph_msg_get(msg);
|
2017-02-01 18:49:09 +00:00
|
|
|
set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
|
2010-05-13 18:19:06 +00:00
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
} else {
|
2010-05-13 18:19:06 +00:00
|
|
|
dout("reply arrived after request %lld was aborted\n", tid);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
2010-05-13 18:19:06 +00:00
|
|
|
mutex_unlock(&mdsc->mutex);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
mutex_unlock(&session->s_mutex);
|
|
|
|
|
|
|
|
/* kick calling process */
|
|
|
|
complete_request(mdsc, req);
|
|
|
|
out:
|
|
|
|
ceph_mdsc_put_request(req);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* handle mds notification that our request has been forwarded.
|
|
|
|
*/
|
2010-02-22 23:12:16 +00:00
|
|
|
static void handle_forward(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_session *session,
|
|
|
|
struct ceph_msg *msg)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
|
|
|
struct ceph_mds_request *req;
|
2010-02-23 22:02:44 +00:00
|
|
|
u64 tid = le64_to_cpu(msg->hdr.tid);
|
2009-10-06 18:31:09 +00:00
|
|
|
u32 next_mds;
|
|
|
|
u32 fwd_seq;
|
|
|
|
int err = -EINVAL;
|
|
|
|
void *p = msg->front.iov_base;
|
|
|
|
void *end = p + msg->front.iov_len;
|
|
|
|
|
2010-02-23 22:02:44 +00:00
|
|
|
ceph_decode_need(&p, end, 2*sizeof(u32), bad);
|
2009-10-14 16:59:09 +00:00
|
|
|
next_mds = ceph_decode_32(&p);
|
|
|
|
fwd_seq = ceph_decode_32(&p);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
mutex_lock(&mdsc->mutex);
|
2016-04-28 14:07:22 +00:00
|
|
|
req = lookup_get_request(mdsc, tid);
|
2009-10-06 18:31:09 +00:00
|
|
|
if (!req) {
|
2010-05-28 23:43:16 +00:00
|
|
|
dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
|
2009-10-06 18:31:09 +00:00
|
|
|
goto out; /* dup reply? */
|
|
|
|
}
|
|
|
|
|
2017-02-01 18:49:09 +00:00
|
|
|
if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
|
2010-05-28 23:43:16 +00:00
|
|
|
dout("forward tid %llu aborted, unregistering\n", tid);
|
|
|
|
__unregister_request(mdsc, req);
|
|
|
|
} else if (fwd_seq <= req->r_num_fwd) {
|
|
|
|
dout("forward tid %llu to mds%d - old seq %d <= %d\n",
|
2009-10-06 18:31:09 +00:00
|
|
|
tid, next_mds, req->r_num_fwd, fwd_seq);
|
|
|
|
} else {
|
|
|
|
/* resend. forward race not possible; mds would drop */
|
2010-05-28 23:43:16 +00:00
|
|
|
dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
|
|
|
|
BUG_ON(req->r_err);
|
2017-02-01 18:49:09 +00:00
|
|
|
BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
|
2015-02-04 06:26:22 +00:00
|
|
|
req->r_attempts = 0;
|
2009-10-06 18:31:09 +00:00
|
|
|
req->r_num_fwd = fwd_seq;
|
|
|
|
req->r_resend_mds = next_mds;
|
|
|
|
put_request_session(req);
|
|
|
|
__do_request(mdsc, req);
|
|
|
|
}
|
|
|
|
ceph_mdsc_put_request(req);
|
|
|
|
out:
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
return;
|
|
|
|
|
|
|
|
bad:
|
|
|
|
pr_err("mdsc_handle_forward decode error err=%d\n", err);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* handle a mds session control message
|
|
|
|
*/
|
|
|
|
static void handle_session(struct ceph_mds_session *session,
|
|
|
|
struct ceph_msg *msg)
|
|
|
|
{
|
|
|
|
struct ceph_mds_client *mdsc = session->s_mdsc;
|
|
|
|
u32 op;
|
|
|
|
u64 seq;
|
2010-02-22 23:12:16 +00:00
|
|
|
int mds = session->s_mds;
|
2009-10-06 18:31:09 +00:00
|
|
|
struct ceph_mds_session_head *h = msg->front.iov_base;
|
|
|
|
int wake = 0;
|
|
|
|
|
|
|
|
/* decode */
|
|
|
|
if (msg->front.iov_len != sizeof(*h))
|
|
|
|
goto bad;
|
|
|
|
op = le32_to_cpu(h->op);
|
|
|
|
seq = le64_to_cpu(h->seq);
|
|
|
|
|
|
|
|
mutex_lock(&mdsc->mutex);
|
2017-03-29 07:30:24 +00:00
|
|
|
if (op == CEPH_SESSION_CLOSE) {
|
|
|
|
get_session(session);
|
2010-02-22 23:12:16 +00:00
|
|
|
__unregister_session(mdsc, session);
|
2017-03-29 07:30:24 +00:00
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
/* FIXME: this ttl calculation is generous */
|
|
|
|
session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
|
|
|
|
mutex_lock(&session->s_mutex);
|
|
|
|
|
|
|
|
dout("handle_session mds%d %s %p state %s seq %llu\n",
|
|
|
|
mds, ceph_session_op_name(op), session,
|
2014-09-19 12:51:08 +00:00
|
|
|
ceph_session_state_name(session->s_state), seq);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
if (session->s_state == CEPH_MDS_SESSION_HUNG) {
|
|
|
|
session->s_state = CEPH_MDS_SESSION_OPEN;
|
|
|
|
pr_info("mds%d came back\n", session->s_mds);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (op) {
|
|
|
|
case CEPH_SESSION_OPEN:
|
2010-03-18 21:45:05 +00:00
|
|
|
if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
|
|
|
|
pr_info("mds%d reconnect success\n", session->s_mds);
|
2009-10-06 18:31:09 +00:00
|
|
|
session->s_state = CEPH_MDS_SESSION_OPEN;
|
|
|
|
renewed_caps(mdsc, session, 0);
|
|
|
|
wake = 1;
|
|
|
|
if (mdsc->stopping)
|
|
|
|
__close_session(mdsc, session);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CEPH_SESSION_RENEWCAPS:
|
|
|
|
if (session->s_renew_seq == seq)
|
|
|
|
renewed_caps(mdsc, session, 1);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CEPH_SESSION_CLOSE:
|
2010-03-18 21:45:05 +00:00
|
|
|
if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
|
|
|
|
pr_info("mds%d reconnect denied\n", session->s_mds);
|
2015-03-24 12:15:36 +00:00
|
|
|
cleanup_session_requests(mdsc, session);
|
2009-10-06 18:31:09 +00:00
|
|
|
remove_session_caps(session);
|
2014-09-11 06:25:18 +00:00
|
|
|
wake = 2; /* for good measure */
|
2010-08-11 21:51:23 +00:00
|
|
|
wake_up_all(&mdsc->session_close_wq);
|
2009-10-06 18:31:09 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case CEPH_SESSION_STALE:
|
|
|
|
pr_info("mds%d caps went stale, renewing\n",
|
|
|
|
session->s_mds);
|
2012-01-13 01:48:10 +00:00
|
|
|
spin_lock(&session->s_gen_ttl_lock);
|
2009-10-06 18:31:09 +00:00
|
|
|
session->s_cap_gen++;
|
2012-01-13 01:48:11 +00:00
|
|
|
session->s_cap_ttl = jiffies - 1;
|
2012-01-13 01:48:10 +00:00
|
|
|
spin_unlock(&session->s_gen_ttl_lock);
|
2009-10-06 18:31:09 +00:00
|
|
|
send_renew_caps(mdsc, session);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CEPH_SESSION_RECALL_STATE:
|
|
|
|
trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
|
|
|
|
break;
|
|
|
|
|
2013-11-22 06:48:37 +00:00
|
|
|
case CEPH_SESSION_FLUSHMSG:
|
|
|
|
send_flushmsg_ack(mdsc, session, seq);
|
|
|
|
break;
|
|
|
|
|
2015-01-05 03:04:04 +00:00
|
|
|
case CEPH_SESSION_FORCE_RO:
|
|
|
|
dout("force_session_readonly %p\n", session);
|
|
|
|
spin_lock(&session->s_cap_lock);
|
|
|
|
session->s_readonly = true;
|
|
|
|
spin_unlock(&session->s_cap_lock);
|
|
|
|
wake_up_session_caps(session, 0);
|
|
|
|
break;
|
|
|
|
|
2016-09-14 08:39:51 +00:00
|
|
|
case CEPH_SESSION_REJECT:
|
|
|
|
WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
|
|
|
|
pr_info("mds%d rejected session\n", session->s_mds);
|
|
|
|
session->s_state = CEPH_MDS_SESSION_REJECTED;
|
|
|
|
cleanup_session_requests(mdsc, session);
|
|
|
|
remove_session_caps(session);
|
|
|
|
wake = 2; /* for good measure */
|
|
|
|
break;
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
default:
|
|
|
|
pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
|
|
|
|
WARN_ON(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_unlock(&session->s_mutex);
|
|
|
|
if (wake) {
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
__wake_requests(mdsc, &session->s_waiting);
|
2014-09-11 06:25:18 +00:00
|
|
|
if (wake == 2)
|
|
|
|
kick_requests(mdsc, mds);
|
2009-10-06 18:31:09 +00:00
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
}
|
2017-03-29 07:30:24 +00:00
|
|
|
if (op == CEPH_SESSION_CLOSE)
|
|
|
|
ceph_put_mds_session(session);
|
2009-10-06 18:31:09 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
bad:
|
|
|
|
pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
|
|
|
|
(int)msg->front.iov_len);
|
2009-12-14 23:13:47 +00:00
|
|
|
ceph_msg_dump(msg);
|
2009-10-06 18:31:09 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* called under session->mutex.
|
|
|
|
*/
|
|
|
|
static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_session *session)
|
|
|
|
{
|
|
|
|
struct ceph_mds_request *req, *nreq;
|
2015-02-04 06:26:22 +00:00
|
|
|
struct rb_node *p;
|
2009-10-06 18:31:09 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
dout("replay_unsafe_requests mds%d\n", session->s_mds);
|
|
|
|
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
|
2015-02-27 00:54:08 +00:00
|
|
|
err = __prepare_send_request(mdsc, req, session->s_mds, true);
|
2009-10-06 18:31:09 +00:00
|
|
|
if (!err) {
|
|
|
|
ceph_msg_get(req->r_request);
|
|
|
|
ceph_con_send(&session->s_con, req->r_request);
|
|
|
|
}
|
|
|
|
}
|
2015-02-04 06:26:22 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* also re-send old requests when MDS enters reconnect stage. So that MDS
|
|
|
|
* can process completed request in clientreplay stage.
|
|
|
|
*/
|
|
|
|
p = rb_first(&mdsc->request_tree);
|
|
|
|
while (p) {
|
|
|
|
req = rb_entry(p, struct ceph_mds_request, r_node);
|
|
|
|
p = rb_next(p);
|
2017-02-01 18:49:09 +00:00
|
|
|
if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
|
2015-02-04 06:26:22 +00:00
|
|
|
continue;
|
|
|
|
if (req->r_attempts == 0)
|
|
|
|
continue; /* only old requests */
|
|
|
|
if (req->r_session &&
|
|
|
|
req->r_session->s_mds == session->s_mds) {
|
2015-02-27 00:54:08 +00:00
|
|
|
err = __prepare_send_request(mdsc, req,
|
|
|
|
session->s_mds, true);
|
2015-02-04 06:26:22 +00:00
|
|
|
if (!err) {
|
|
|
|
ceph_msg_get(req->r_request);
|
|
|
|
ceph_con_send(&session->s_con, req->r_request);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Encode information about a cap for a reconnect with the MDS.
|
|
|
|
*/
|
|
|
|
static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
|
|
|
|
void *arg)
|
|
|
|
{
|
2010-05-12 22:21:32 +00:00
|
|
|
union {
|
|
|
|
struct ceph_mds_cap_reconnect v2;
|
|
|
|
struct ceph_mds_cap_reconnect_v1 v1;
|
|
|
|
} rec;
|
2009-10-06 18:31:09 +00:00
|
|
|
struct ceph_inode_info *ci;
|
2010-05-12 22:21:32 +00:00
|
|
|
struct ceph_reconnect_state *recon_state = arg;
|
|
|
|
struct ceph_pagelist *pagelist = recon_state->pagelist;
|
2009-10-06 18:31:09 +00:00
|
|
|
char *path;
|
|
|
|
int pathlen, err;
|
|
|
|
u64 pathbase;
|
2016-07-05 01:32:31 +00:00
|
|
|
u64 snap_follows;
|
2009-10-06 18:31:09 +00:00
|
|
|
struct dentry *dentry;
|
|
|
|
|
|
|
|
ci = cap->ci;
|
|
|
|
|
|
|
|
dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
|
|
|
|
inode, ceph_vinop(inode), cap, cap->cap_id,
|
|
|
|
ceph_cap_string(cap->issued));
|
2009-12-23 20:21:51 +00:00
|
|
|
err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
|
|
|
|
if (err)
|
|
|
|
return err;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
dentry = d_find_alias(inode);
|
|
|
|
if (dentry) {
|
|
|
|
path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
|
|
|
|
if (IS_ERR(path)) {
|
|
|
|
err = PTR_ERR(path);
|
2010-08-26 16:26:37 +00:00
|
|
|
goto out_dput;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
path = NULL;
|
|
|
|
pathlen = 0;
|
2016-08-09 14:12:09 +00:00
|
|
|
pathbase = 0;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
2011-11-30 17:47:09 +00:00
|
|
|
spin_lock(&ci->i_ceph_lock);
|
2009-10-06 18:31:09 +00:00
|
|
|
cap->seq = 0; /* reset cap seq */
|
|
|
|
cap->issue_seq = 0; /* and issue_seq */
|
2013-05-31 08:25:36 +00:00
|
|
|
cap->mseq = 0; /* and migrate_seq */
|
2013-09-22 03:08:14 +00:00
|
|
|
cap->cap_gen = cap->session->s_cap_gen;
|
2010-05-12 22:21:32 +00:00
|
|
|
|
2016-07-04 14:05:18 +00:00
|
|
|
if (recon_state->msg_version >= 2) {
|
2010-05-12 22:21:32 +00:00
|
|
|
rec.v2.cap_id = cpu_to_le64(cap->cap_id);
|
|
|
|
rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
|
|
|
|
rec.v2.issued = cpu_to_le32(cap->issued);
|
|
|
|
rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
|
|
|
|
rec.v2.pathbase = cpu_to_le64(pathbase);
|
|
|
|
rec.v2.flock_len = 0;
|
|
|
|
} else {
|
|
|
|
rec.v1.cap_id = cpu_to_le64(cap->cap_id);
|
|
|
|
rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
|
|
|
|
rec.v1.issued = cpu_to_le32(cap->issued);
|
|
|
|
rec.v1.size = cpu_to_le64(inode->i_size);
|
|
|
|
ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime);
|
|
|
|
ceph_encode_timespec(&rec.v1.atime, &inode->i_atime);
|
|
|
|
rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
|
|
|
|
rec.v1.pathbase = cpu_to_le64(pathbase);
|
|
|
|
}
|
2016-07-05 01:32:31 +00:00
|
|
|
|
|
|
|
if (list_empty(&ci->i_cap_snaps)) {
|
|
|
|
snap_follows = 0;
|
|
|
|
} else {
|
|
|
|
struct ceph_cap_snap *capsnap =
|
|
|
|
list_first_entry(&ci->i_cap_snaps,
|
|
|
|
struct ceph_cap_snap, ci_item);
|
|
|
|
snap_follows = capsnap->follows;
|
2010-05-12 22:21:32 +00:00
|
|
|
}
|
2011-11-30 17:47:09 +00:00
|
|
|
spin_unlock(&ci->i_ceph_lock);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2016-07-04 14:05:18 +00:00
|
|
|
if (recon_state->msg_version >= 2) {
|
2010-08-02 22:34:23 +00:00
|
|
|
int num_fcntl_locks, num_flock_locks;
|
2013-05-15 18:03:35 +00:00
|
|
|
struct ceph_filelock *flocks;
|
2016-07-04 14:05:18 +00:00
|
|
|
size_t struct_len, total_len = 0;
|
|
|
|
u8 struct_v = 0;
|
2013-05-15 18:03:35 +00:00
|
|
|
|
|
|
|
encode_again:
|
|
|
|
ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
|
|
|
|
flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
|
|
|
|
sizeof(struct ceph_filelock), GFP_NOFS);
|
|
|
|
if (!flocks) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
err = ceph_encode_locks_to_buffer(inode, flocks,
|
|
|
|
num_fcntl_locks,
|
|
|
|
num_flock_locks);
|
|
|
|
if (err) {
|
|
|
|
kfree(flocks);
|
|
|
|
if (err == -ENOSPC)
|
|
|
|
goto encode_again;
|
|
|
|
goto out_free;
|
|
|
|
}
|
2016-07-04 14:05:18 +00:00
|
|
|
|
|
|
|
if (recon_state->msg_version >= 3) {
|
|
|
|
/* version, compat_version and struct_len */
|
|
|
|
total_len = 2 * sizeof(u8) + sizeof(u32);
|
2016-07-05 01:32:31 +00:00
|
|
|
struct_v = 2;
|
2016-07-04 14:05:18 +00:00
|
|
|
}
|
2013-05-15 18:03:35 +00:00
|
|
|
/*
|
|
|
|
* number of encoded locks is stable, so copy to pagelist
|
|
|
|
*/
|
2016-07-04 14:05:18 +00:00
|
|
|
struct_len = 2 * sizeof(u32) +
|
|
|
|
(num_fcntl_locks + num_flock_locks) *
|
|
|
|
sizeof(struct ceph_filelock);
|
|
|
|
rec.v2.flock_len = cpu_to_le32(struct_len);
|
|
|
|
|
|
|
|
struct_len += sizeof(rec.v2);
|
|
|
|
struct_len += sizeof(u32) + pathlen;
|
|
|
|
|
2016-07-05 01:32:31 +00:00
|
|
|
if (struct_v >= 2)
|
|
|
|
struct_len += sizeof(u64); /* snap_follows */
|
|
|
|
|
2016-07-04 14:05:18 +00:00
|
|
|
total_len += struct_len;
|
|
|
|
err = ceph_pagelist_reserve(pagelist, total_len);
|
|
|
|
|
|
|
|
if (!err) {
|
|
|
|
if (recon_state->msg_version >= 3) {
|
|
|
|
ceph_pagelist_encode_8(pagelist, struct_v);
|
|
|
|
ceph_pagelist_encode_8(pagelist, 1);
|
|
|
|
ceph_pagelist_encode_32(pagelist, struct_len);
|
|
|
|
}
|
|
|
|
ceph_pagelist_encode_string(pagelist, path, pathlen);
|
|
|
|
ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
|
|
|
|
ceph_locks_to_pagelist(flocks, pagelist,
|
|
|
|
num_fcntl_locks,
|
|
|
|
num_flock_locks);
|
2016-07-05 01:32:31 +00:00
|
|
|
if (struct_v >= 2)
|
|
|
|
ceph_pagelist_encode_64(pagelist, snap_follows);
|
2016-07-04 14:05:18 +00:00
|
|
|
}
|
2013-05-15 18:03:35 +00:00
|
|
|
kfree(flocks);
|
2010-09-07 22:59:27 +00:00
|
|
|
} else {
|
2016-07-04 14:05:18 +00:00
|
|
|
size_t size = sizeof(u32) + pathlen + sizeof(rec.v1);
|
|
|
|
err = ceph_pagelist_reserve(pagelist, size);
|
|
|
|
if (!err) {
|
|
|
|
ceph_pagelist_encode_string(pagelist, path, pathlen);
|
|
|
|
ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
|
|
|
|
}
|
2010-08-02 22:34:23 +00:00
|
|
|
}
|
2013-09-22 02:28:10 +00:00
|
|
|
|
|
|
|
recon_state->nr_caps++;
|
2010-08-26 16:26:37 +00:00
|
|
|
out_free:
|
2009-10-06 18:31:09 +00:00
|
|
|
kfree(path);
|
2010-08-26 16:26:37 +00:00
|
|
|
out_dput:
|
2009-10-06 18:31:09 +00:00
|
|
|
dput(dentry);
|
2009-12-23 20:21:51 +00:00
|
|
|
return err;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If an MDS fails and recovers, clients need to reconnect in order to
|
|
|
|
* reestablish shared state. This includes all caps issued through
|
|
|
|
* this session _and_ the snap_realm hierarchy. Because it's not
|
|
|
|
* clear which snap realms the mds cares about, we send everything we
|
|
|
|
* know about.. that ensures we'll then get any new info the
|
|
|
|
* recovering MDS might have.
|
|
|
|
*
|
|
|
|
* This is a relatively heavyweight operation, but it's rare.
|
|
|
|
*
|
|
|
|
* called with mdsc->mutex held.
|
|
|
|
*/
|
2010-05-10 23:31:25 +00:00
|
|
|
static void send_mds_reconnect(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_session *session)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
|
|
|
struct ceph_msg *reply;
|
2010-02-15 22:37:55 +00:00
|
|
|
struct rb_node *p;
|
2010-05-10 23:31:25 +00:00
|
|
|
int mds = session->s_mds;
|
2010-05-11 04:58:38 +00:00
|
|
|
int err = -ENOMEM;
|
2013-09-22 02:28:10 +00:00
|
|
|
int s_nr_caps;
|
2009-12-23 20:21:51 +00:00
|
|
|
struct ceph_pagelist *pagelist;
|
2010-05-12 22:21:32 +00:00
|
|
|
struct ceph_reconnect_state recon_state;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2010-05-10 23:31:25 +00:00
|
|
|
pr_info("mds%d reconnect start\n", mds);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2009-12-23 20:21:51 +00:00
|
|
|
pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
|
|
|
|
if (!pagelist)
|
|
|
|
goto fail_nopagelist;
|
|
|
|
ceph_pagelist_init(pagelist);
|
|
|
|
|
2011-08-09 22:03:46 +00:00
|
|
|
reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false);
|
2010-04-01 23:06:19 +00:00
|
|
|
if (!reply)
|
2009-12-23 20:21:51 +00:00
|
|
|
goto fail_nomsg;
|
|
|
|
|
2010-05-10 23:31:25 +00:00
|
|
|
mutex_lock(&session->s_mutex);
|
|
|
|
session->s_state = CEPH_MDS_SESSION_RECONNECTING;
|
|
|
|
session->s_seq = 0;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
dout("session %p state %s\n", session,
|
2014-09-19 12:51:08 +00:00
|
|
|
ceph_session_state_name(session->s_state));
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2013-09-22 03:08:14 +00:00
|
|
|
spin_lock(&session->s_gen_ttl_lock);
|
|
|
|
session->s_cap_gen++;
|
|
|
|
spin_unlock(&session->s_gen_ttl_lock);
|
|
|
|
|
|
|
|
spin_lock(&session->s_cap_lock);
|
2015-01-05 03:04:04 +00:00
|
|
|
/* don't know if session is readonly */
|
|
|
|
session->s_readonly = 0;
|
2013-09-22 03:08:14 +00:00
|
|
|
/*
|
|
|
|
* notify __ceph_remove_cap() that we are composing cap reconnect.
|
|
|
|
* If a cap get released before being added to the cap reconnect,
|
|
|
|
* __ceph_remove_cap() should skip queuing cap release.
|
|
|
|
*/
|
|
|
|
session->s_cap_reconnect = 1;
|
2010-05-10 22:36:44 +00:00
|
|
|
/* drop old cap expires; we're about to reestablish that state */
|
2015-05-14 09:22:42 +00:00
|
|
|
cleanup_cap_releases(mdsc, session);
|
2010-05-10 22:36:44 +00:00
|
|
|
|
2014-09-10 08:56:23 +00:00
|
|
|
/* trim unused caps to reduce MDS's cache rejoin time */
|
2015-04-07 07:51:08 +00:00
|
|
|
if (mdsc->fsc->sb->s_root)
|
|
|
|
shrink_dcache_parent(mdsc->fsc->sb->s_root);
|
2014-09-10 08:56:23 +00:00
|
|
|
|
|
|
|
ceph_con_close(&session->s_con);
|
|
|
|
ceph_con_open(&session->s_con,
|
|
|
|
CEPH_ENTITY_TYPE_MDS, mds,
|
|
|
|
ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
|
|
|
|
|
|
|
|
/* replay unsafe requests */
|
|
|
|
replay_unsafe_requests(mdsc, session);
|
|
|
|
|
|
|
|
down_read(&mdsc->snap_rwsem);
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
/* traverse this session's caps */
|
2013-09-22 02:28:10 +00:00
|
|
|
s_nr_caps = session->s_nr_caps;
|
|
|
|
err = ceph_pagelist_encode_32(pagelist, s_nr_caps);
|
2009-12-23 20:21:51 +00:00
|
|
|
if (err)
|
|
|
|
goto fail;
|
2010-05-12 22:21:32 +00:00
|
|
|
|
2013-09-22 02:28:10 +00:00
|
|
|
recon_state.nr_caps = 0;
|
2010-05-12 22:21:32 +00:00
|
|
|
recon_state.pagelist = pagelist;
|
2016-07-04 14:05:18 +00:00
|
|
|
if (session->s_con.peer_features & CEPH_FEATURE_MDSENC)
|
|
|
|
recon_state.msg_version = 3;
|
|
|
|
else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK)
|
|
|
|
recon_state.msg_version = 2;
|
|
|
|
else
|
|
|
|
recon_state.msg_version = 1;
|
2010-05-12 22:21:32 +00:00
|
|
|
err = iterate_session_caps(session, encode_caps_cb, &recon_state);
|
2009-10-06 18:31:09 +00:00
|
|
|
if (err < 0)
|
2010-05-11 04:58:38 +00:00
|
|
|
goto fail;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2013-09-22 03:08:14 +00:00
|
|
|
spin_lock(&session->s_cap_lock);
|
|
|
|
session->s_cap_reconnect = 0;
|
|
|
|
spin_unlock(&session->s_cap_lock);
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
/*
|
|
|
|
* snaprealms. we provide mds with the ino, seq (version), and
|
|
|
|
* parent for all of our realms. If the mds has any newer info,
|
|
|
|
* it will tell us.
|
|
|
|
*/
|
2010-02-15 22:37:55 +00:00
|
|
|
for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
|
|
|
|
struct ceph_snap_realm *realm =
|
|
|
|
rb_entry(p, struct ceph_snap_realm, node);
|
2009-12-23 20:21:51 +00:00
|
|
|
struct ceph_mds_snaprealm_reconnect sr_rec;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
dout(" adding snap realm %llx seq %lld parent %llx\n",
|
|
|
|
realm->ino, realm->seq, realm->parent_ino);
|
2009-12-23 20:21:51 +00:00
|
|
|
sr_rec.ino = cpu_to_le64(realm->ino);
|
|
|
|
sr_rec.seq = cpu_to_le64(realm->seq);
|
|
|
|
sr_rec.parent = cpu_to_le64(realm->parent_ino);
|
|
|
|
err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
|
|
|
|
if (err)
|
|
|
|
goto fail;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
2016-07-04 14:05:18 +00:00
|
|
|
reply->hdr.version = cpu_to_le16(recon_state.msg_version);
|
2013-09-22 02:28:10 +00:00
|
|
|
|
|
|
|
/* raced with cap release? */
|
|
|
|
if (s_nr_caps != recon_state.nr_caps) {
|
|
|
|
struct page *page = list_first_entry(&pagelist->head,
|
|
|
|
struct page, lru);
|
|
|
|
__le32 *addr = kmap_atomic(page);
|
|
|
|
*addr = cpu_to_le32(recon_state.nr_caps);
|
|
|
|
kunmap_atomic(addr);
|
2013-03-05 04:29:57 +00:00
|
|
|
}
|
2013-09-22 02:28:10 +00:00
|
|
|
|
|
|
|
reply->hdr.data_len = cpu_to_le32(pagelist->length);
|
|
|
|
ceph_msg_data_add_pagelist(reply, pagelist);
|
2015-06-10 07:17:56 +00:00
|
|
|
|
|
|
|
ceph_early_kick_flushing_caps(mdsc, session);
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
ceph_con_send(&session->s_con, reply);
|
|
|
|
|
2010-05-11 04:58:38 +00:00
|
|
|
mutex_unlock(&session->s_mutex);
|
|
|
|
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
__wake_requests(mdsc, &session->s_waiting);
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
up_read(&mdsc->snap_rwsem);
|
|
|
|
return;
|
|
|
|
|
2009-12-23 20:21:51 +00:00
|
|
|
fail:
|
2009-10-06 18:31:09 +00:00
|
|
|
ceph_msg_put(reply);
|
2010-05-11 04:58:38 +00:00
|
|
|
up_read(&mdsc->snap_rwsem);
|
|
|
|
mutex_unlock(&session->s_mutex);
|
2009-12-23 20:21:51 +00:00
|
|
|
fail_nomsg:
|
|
|
|
ceph_pagelist_release(pagelist);
|
|
|
|
fail_nopagelist:
|
2010-05-11 04:58:38 +00:00
|
|
|
pr_err("error %d preparing reconnect for mds%d\n", err, mds);
|
|
|
|
return;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* compare old and new mdsmaps, kicking requests
|
|
|
|
* and closing out old connections as necessary
|
|
|
|
*
|
|
|
|
* called under mdsc->mutex.
|
|
|
|
*/
|
|
|
|
static void check_new_map(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mdsmap *newmap,
|
|
|
|
struct ceph_mdsmap *oldmap)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int oldstate, newstate;
|
|
|
|
struct ceph_mds_session *s;
|
|
|
|
|
|
|
|
dout("check_new_map new %u old %u\n",
|
|
|
|
newmap->m_epoch, oldmap->m_epoch);
|
|
|
|
|
2017-03-28 09:04:13 +00:00
|
|
|
for (i = 0; i < oldmap->m_num_mds && i < mdsc->max_sessions; i++) {
|
2009-10-06 18:31:09 +00:00
|
|
|
if (mdsc->sessions[i] == NULL)
|
|
|
|
continue;
|
|
|
|
s = mdsc->sessions[i];
|
|
|
|
oldstate = ceph_mdsmap_get_state(oldmap, i);
|
|
|
|
newstate = ceph_mdsmap_get_state(newmap, i);
|
|
|
|
|
2010-06-17 21:19:01 +00:00
|
|
|
dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
|
2009-10-06 18:31:09 +00:00
|
|
|
i, ceph_mds_state_name(oldstate),
|
2010-06-17 21:19:01 +00:00
|
|
|
ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
|
2009-10-06 18:31:09 +00:00
|
|
|
ceph_mds_state_name(newstate),
|
2010-06-17 21:19:01 +00:00
|
|
|
ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
|
2014-09-19 12:51:08 +00:00
|
|
|
ceph_session_state_name(s->s_state));
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2017-03-28 09:04:13 +00:00
|
|
|
if (i >= newmap->m_num_mds ||
|
2012-09-20 09:42:25 +00:00
|
|
|
memcmp(ceph_mdsmap_get_addr(oldmap, i),
|
2009-10-06 18:31:09 +00:00
|
|
|
ceph_mdsmap_get_addr(newmap, i),
|
|
|
|
sizeof(struct ceph_entity_addr))) {
|
|
|
|
if (s->s_state == CEPH_MDS_SESSION_OPENING) {
|
|
|
|
/* the session never opened, just close it
|
|
|
|
* out now */
|
2017-03-29 07:30:24 +00:00
|
|
|
get_session(s);
|
2010-02-22 23:12:16 +00:00
|
|
|
__unregister_session(mdsc, s);
|
2017-03-29 07:30:24 +00:00
|
|
|
__wake_requests(mdsc, &s->s_waiting);
|
|
|
|
ceph_put_mds_session(s);
|
2017-03-28 09:56:29 +00:00
|
|
|
} else if (i >= newmap->m_num_mds) {
|
|
|
|
/* force close session for stopped mds */
|
|
|
|
get_session(s);
|
|
|
|
__unregister_session(mdsc, s);
|
|
|
|
__wake_requests(mdsc, &s->s_waiting);
|
|
|
|
kick_requests(mdsc, i);
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
|
|
|
|
mutex_lock(&s->s_mutex);
|
|
|
|
cleanup_session_requests(mdsc, s);
|
|
|
|
remove_session_caps(s);
|
|
|
|
mutex_unlock(&s->s_mutex);
|
|
|
|
|
|
|
|
ceph_put_mds_session(s);
|
|
|
|
|
|
|
|
mutex_lock(&mdsc->mutex);
|
2009-10-06 18:31:09 +00:00
|
|
|
} else {
|
|
|
|
/* just close it */
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
mutex_lock(&s->s_mutex);
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
ceph_con_close(&s->s_con);
|
|
|
|
mutex_unlock(&s->s_mutex);
|
|
|
|
s->s_state = CEPH_MDS_SESSION_RESTARTING;
|
|
|
|
}
|
|
|
|
} else if (oldstate == newstate) {
|
|
|
|
continue; /* nothing new with this mds */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* send reconnect?
|
|
|
|
*/
|
|
|
|
if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
|
2010-05-10 23:31:25 +00:00
|
|
|
newstate >= CEPH_MDS_STATE_RECONNECT) {
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
send_mds_reconnect(mdsc, s);
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
/*
|
2010-03-18 21:45:05 +00:00
|
|
|
* kick request on any mds that has gone active.
|
2009-10-06 18:31:09 +00:00
|
|
|
*/
|
|
|
|
if (oldstate < CEPH_MDS_STATE_ACTIVE &&
|
|
|
|
newstate >= CEPH_MDS_STATE_ACTIVE) {
|
2010-03-18 21:45:05 +00:00
|
|
|
if (oldstate != CEPH_MDS_STATE_CREATING &&
|
|
|
|
oldstate != CEPH_MDS_STATE_STARTING)
|
|
|
|
pr_info("mds%d recovery completed\n", s->s_mds);
|
|
|
|
kick_requests(mdsc, i);
|
2009-10-06 18:31:09 +00:00
|
|
|
ceph_kick_flushing_caps(mdsc, s);
|
2009-11-20 21:43:45 +00:00
|
|
|
wake_up_session_caps(s, 1);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
}
|
2010-06-21 20:38:35 +00:00
|
|
|
|
2017-03-28 09:04:13 +00:00
|
|
|
for (i = 0; i < newmap->m_num_mds && i < mdsc->max_sessions; i++) {
|
2010-06-21 20:38:35 +00:00
|
|
|
s = mdsc->sessions[i];
|
|
|
|
if (!s)
|
|
|
|
continue;
|
|
|
|
if (!ceph_mdsmap_is_laggy(newmap, i))
|
|
|
|
continue;
|
|
|
|
if (s->s_state == CEPH_MDS_SESSION_OPEN ||
|
|
|
|
s->s_state == CEPH_MDS_SESSION_HUNG ||
|
|
|
|
s->s_state == CEPH_MDS_SESSION_CLOSING) {
|
|
|
|
dout(" connecting to export targets of laggy mds%d\n",
|
|
|
|
i);
|
|
|
|
__open_export_target_sessions(mdsc, s);
|
|
|
|
}
|
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* leases
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* caller must hold session s_mutex, dentry->d_lock
|
|
|
|
*/
|
|
|
|
void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
|
|
|
|
{
|
|
|
|
struct ceph_dentry_info *di = ceph_dentry(dentry);
|
|
|
|
|
|
|
|
ceph_put_mds_session(di->lease_session);
|
|
|
|
di->lease_session = NULL;
|
|
|
|
}
|
|
|
|
|
2010-02-22 23:12:16 +00:00
|
|
|
static void handle_lease(struct ceph_mds_client *mdsc,
|
|
|
|
struct ceph_mds_session *session,
|
|
|
|
struct ceph_msg *msg)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
2010-04-06 22:14:15 +00:00
|
|
|
struct super_block *sb = mdsc->fsc->sb;
|
2009-10-06 18:31:09 +00:00
|
|
|
struct inode *inode;
|
|
|
|
struct dentry *parent, *dentry;
|
|
|
|
struct ceph_dentry_info *di;
|
2010-02-22 23:12:16 +00:00
|
|
|
int mds = session->s_mds;
|
2009-10-06 18:31:09 +00:00
|
|
|
struct ceph_mds_lease *h = msg->front.iov_base;
|
2010-06-04 17:05:40 +00:00
|
|
|
u32 seq;
|
2009-10-06 18:31:09 +00:00
|
|
|
struct ceph_vino vino;
|
|
|
|
struct qstr dname;
|
|
|
|
int release = 0;
|
|
|
|
|
|
|
|
dout("handle_lease from mds%d\n", mds);
|
|
|
|
|
|
|
|
/* decode */
|
|
|
|
if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
|
|
|
|
goto bad;
|
|
|
|
vino.ino = le64_to_cpu(h->ino);
|
|
|
|
vino.snap = CEPH_NOSNAP;
|
2010-06-04 17:05:40 +00:00
|
|
|
seq = le32_to_cpu(h->seq);
|
2009-10-06 18:31:09 +00:00
|
|
|
dname.name = (void *)h + sizeof(*h) + sizeof(u32);
|
|
|
|
dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32);
|
|
|
|
if (dname.len != get_unaligned_le32(h+1))
|
|
|
|
goto bad;
|
|
|
|
|
|
|
|
/* lookup inode */
|
|
|
|
inode = ceph_find_inode(sb, vino);
|
2011-07-26 18:28:25 +00:00
|
|
|
dout("handle_lease %s, ino %llx %p %.*s\n",
|
|
|
|
ceph_lease_op_name(h->action), vino.ino, inode,
|
2010-06-04 17:05:40 +00:00
|
|
|
dname.len, dname.name);
|
2014-09-16 23:45:12 +00:00
|
|
|
|
|
|
|
mutex_lock(&session->s_mutex);
|
|
|
|
session->s_seq++;
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
if (inode == NULL) {
|
|
|
|
dout("handle_lease no inode %llx\n", vino.ino);
|
|
|
|
goto release;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* dentry */
|
|
|
|
parent = d_find_alias(inode);
|
|
|
|
if (!parent) {
|
|
|
|
dout("no parent dentry on inode %p\n", inode);
|
|
|
|
WARN_ON(1);
|
|
|
|
goto release; /* hrm... */
|
|
|
|
}
|
2016-06-10 14:51:30 +00:00
|
|
|
dname.hash = full_name_hash(parent, dname.name, dname.len);
|
2009-10-06 18:31:09 +00:00
|
|
|
dentry = d_lookup(parent, &dname);
|
|
|
|
dput(parent);
|
|
|
|
if (!dentry)
|
|
|
|
goto release;
|
|
|
|
|
|
|
|
spin_lock(&dentry->d_lock);
|
|
|
|
di = ceph_dentry(dentry);
|
|
|
|
switch (h->action) {
|
|
|
|
case CEPH_MDS_LEASE_REVOKE:
|
2011-11-11 17:48:53 +00:00
|
|
|
if (di->lease_session == session) {
|
2010-06-04 17:05:40 +00:00
|
|
|
if (ceph_seq_cmp(di->lease_seq, seq) > 0)
|
|
|
|
h->seq = cpu_to_le32(di->lease_seq);
|
2009-10-06 18:31:09 +00:00
|
|
|
__ceph_mdsc_drop_dentry_lease(dentry);
|
|
|
|
}
|
|
|
|
release = 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CEPH_MDS_LEASE_RENEW:
|
2011-11-11 17:48:53 +00:00
|
|
|
if (di->lease_session == session &&
|
2009-10-06 18:31:09 +00:00
|
|
|
di->lease_gen == session->s_cap_gen &&
|
|
|
|
di->lease_renew_from &&
|
|
|
|
di->lease_renew_after == 0) {
|
|
|
|
unsigned long duration =
|
2015-02-06 11:52:17 +00:00
|
|
|
msecs_to_jiffies(le32_to_cpu(h->duration_ms));
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2010-06-04 17:05:40 +00:00
|
|
|
di->lease_seq = seq;
|
2016-06-22 14:35:04 +00:00
|
|
|
di->time = di->lease_renew_from + duration;
|
2009-10-06 18:31:09 +00:00
|
|
|
di->lease_renew_after = di->lease_renew_from +
|
|
|
|
(duration >> 1);
|
|
|
|
di->lease_renew_from = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
|
dput(dentry);
|
|
|
|
|
|
|
|
if (!release)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
release:
|
|
|
|
/* let's just reuse the same message */
|
|
|
|
h->action = CEPH_MDS_LEASE_REVOKE_ACK;
|
|
|
|
ceph_msg_get(msg);
|
|
|
|
ceph_con_send(&session->s_con, msg);
|
|
|
|
|
|
|
|
out:
|
|
|
|
iput(inode);
|
|
|
|
mutex_unlock(&session->s_mutex);
|
|
|
|
return;
|
|
|
|
|
|
|
|
bad:
|
|
|
|
pr_err("corrupt lease message\n");
|
2009-12-14 23:13:47 +00:00
|
|
|
ceph_msg_dump(msg);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
|
|
|
|
struct inode *inode,
|
|
|
|
struct dentry *dentry, char action,
|
|
|
|
u32 seq)
|
|
|
|
{
|
|
|
|
struct ceph_msg *msg;
|
|
|
|
struct ceph_mds_lease *lease;
|
|
|
|
int len = sizeof(*lease) + sizeof(u32);
|
|
|
|
int dnamelen = 0;
|
|
|
|
|
|
|
|
dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
|
|
|
|
inode, dentry, ceph_lease_op_name(action), session->s_mds);
|
|
|
|
dnamelen = dentry->d_name.len;
|
|
|
|
len += dnamelen;
|
|
|
|
|
2011-08-09 22:03:46 +00:00
|
|
|
msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
|
2010-04-01 23:06:19 +00:00
|
|
|
if (!msg)
|
2009-10-06 18:31:09 +00:00
|
|
|
return;
|
|
|
|
lease = msg->front.iov_base;
|
|
|
|
lease->action = action;
|
|
|
|
lease->ino = cpu_to_le64(ceph_vino(inode).ino);
|
|
|
|
lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap);
|
|
|
|
lease->seq = cpu_to_le32(seq);
|
|
|
|
put_unaligned_le32(dnamelen, lease + 1);
|
|
|
|
memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if this is a preemptive lease RELEASE, no need to
|
|
|
|
* flush request stream, since the actual request will
|
|
|
|
* soon follow.
|
|
|
|
*/
|
|
|
|
msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
|
|
|
|
|
|
|
|
ceph_con_send(&session->s_con, msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* drop all leases (and dentry refs) in preparation for umount
|
|
|
|
*/
|
|
|
|
static void drop_leases(struct ceph_mds_client *mdsc)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
dout("drop_leases\n");
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
for (i = 0; i < mdsc->max_sessions; i++) {
|
|
|
|
struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
|
|
|
|
if (!s)
|
|
|
|
continue;
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
mutex_lock(&s->s_mutex);
|
|
|
|
mutex_unlock(&s->s_mutex);
|
|
|
|
ceph_put_mds_session(s);
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
}
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* delayed work -- periodically trim expired leases, renew caps with mds
|
|
|
|
*/
|
|
|
|
static void schedule_delayed(struct ceph_mds_client *mdsc)
|
|
|
|
{
|
|
|
|
int delay = 5;
|
|
|
|
unsigned hz = round_jiffies_relative(HZ * delay);
|
|
|
|
schedule_delayed_work(&mdsc->delayed_work, hz);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void delayed_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct ceph_mds_client *mdsc =
|
|
|
|
container_of(work, struct ceph_mds_client, delayed_work.work);
|
|
|
|
int renew_interval;
|
|
|
|
int renew_caps;
|
|
|
|
|
|
|
|
dout("mdsc delayed_work\n");
|
2009-10-14 21:27:38 +00:00
|
|
|
ceph_check_delayed_caps(mdsc);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
|
|
|
|
renew_caps = time_after_eq(jiffies, HZ*renew_interval +
|
|
|
|
mdsc->last_renew_caps);
|
|
|
|
if (renew_caps)
|
|
|
|
mdsc->last_renew_caps = jiffies;
|
|
|
|
|
|
|
|
for (i = 0; i < mdsc->max_sessions; i++) {
|
|
|
|
struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
|
|
|
|
if (s == NULL)
|
|
|
|
continue;
|
|
|
|
if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
|
|
|
|
dout("resending session close request for mds%d\n",
|
|
|
|
s->s_mds);
|
|
|
|
request_close_session(mdsc, s);
|
|
|
|
ceph_put_mds_session(s);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
|
|
|
|
if (s->s_state == CEPH_MDS_SESSION_OPEN) {
|
|
|
|
s->s_state = CEPH_MDS_SESSION_HUNG;
|
|
|
|
pr_info("mds%d hung\n", s->s_mds);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (s->s_state < CEPH_MDS_SESSION_OPEN) {
|
|
|
|
/* this mds is failed or recovering, just wait */
|
|
|
|
ceph_put_mds_session(s);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
|
|
|
|
mutex_lock(&s->s_mutex);
|
|
|
|
if (renew_caps)
|
|
|
|
send_renew_caps(mdsc, s);
|
|
|
|
else
|
|
|
|
ceph_con_keepalive(&s->s_con);
|
2010-03-17 23:30:21 +00:00
|
|
|
if (s->s_state == CEPH_MDS_SESSION_OPEN ||
|
|
|
|
s->s_state == CEPH_MDS_SESSION_HUNG)
|
2010-06-09 23:47:10 +00:00
|
|
|
ceph_send_cap_releases(mdsc, s);
|
2009-10-06 18:31:09 +00:00
|
|
|
mutex_unlock(&s->s_mutex);
|
|
|
|
ceph_put_mds_session(s);
|
|
|
|
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
}
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
|
|
|
|
schedule_delayed(mdsc);
|
|
|
|
}
|
|
|
|
|
2010-04-06 22:14:15 +00:00
|
|
|
int ceph_mdsc_init(struct ceph_fs_client *fsc)
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
{
|
2010-04-06 22:14:15 +00:00
|
|
|
struct ceph_mds_client *mdsc;
|
|
|
|
|
|
|
|
mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
|
|
|
|
if (!mdsc)
|
|
|
|
return -ENOMEM;
|
|
|
|
mdsc->fsc = fsc;
|
|
|
|
fsc->mdsc = mdsc;
|
2009-10-06 18:31:09 +00:00
|
|
|
mutex_init(&mdsc->mutex);
|
|
|
|
mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
|
2013-06-25 06:48:19 +00:00
|
|
|
if (mdsc->mdsmap == NULL) {
|
|
|
|
kfree(mdsc);
|
2010-03-26 10:04:40 +00:00
|
|
|
return -ENOMEM;
|
2013-06-25 06:48:19 +00:00
|
|
|
}
|
2010-03-26 10:04:40 +00:00
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
init_completion(&mdsc->safe_umount_waiters);
|
2010-08-11 21:51:23 +00:00
|
|
|
init_waitqueue_head(&mdsc->session_close_wq);
|
2009-10-06 18:31:09 +00:00
|
|
|
INIT_LIST_HEAD(&mdsc->waiting_for_map);
|
|
|
|
mdsc->sessions = NULL;
|
2015-01-09 09:00:42 +00:00
|
|
|
atomic_set(&mdsc->num_sessions, 0);
|
2009-10-06 18:31:09 +00:00
|
|
|
mdsc->max_sessions = 0;
|
|
|
|
mdsc->stopping = 0;
|
2015-05-05 13:22:13 +00:00
|
|
|
mdsc->last_snap_seq = 0;
|
2009-10-06 18:31:09 +00:00
|
|
|
init_rwsem(&mdsc->snap_rwsem);
|
2010-02-15 22:37:55 +00:00
|
|
|
mdsc->snap_realms = RB_ROOT;
|
2009-10-06 18:31:09 +00:00
|
|
|
INIT_LIST_HEAD(&mdsc->snap_empty);
|
|
|
|
spin_lock_init(&mdsc->snap_empty_lock);
|
|
|
|
mdsc->last_tid = 0;
|
2015-05-19 10:54:40 +00:00
|
|
|
mdsc->oldest_tid = 0;
|
2010-02-15 20:08:46 +00:00
|
|
|
mdsc->request_tree = RB_ROOT;
|
2009-10-06 18:31:09 +00:00
|
|
|
INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
|
|
|
|
mdsc->last_renew_caps = jiffies;
|
|
|
|
INIT_LIST_HEAD(&mdsc->cap_delay_list);
|
|
|
|
spin_lock_init(&mdsc->cap_delay_lock);
|
|
|
|
INIT_LIST_HEAD(&mdsc->snap_flush_list);
|
|
|
|
spin_lock_init(&mdsc->snap_flush_lock);
|
2015-06-09 07:48:57 +00:00
|
|
|
mdsc->last_cap_flush_tid = 1;
|
2016-07-06 03:12:56 +00:00
|
|
|
INIT_LIST_HEAD(&mdsc->cap_flush_list);
|
2009-10-06 18:31:09 +00:00
|
|
|
INIT_LIST_HEAD(&mdsc->cap_dirty);
|
2011-05-24 18:46:31 +00:00
|
|
|
INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
|
2009-10-06 18:31:09 +00:00
|
|
|
mdsc->num_cap_flushing = 0;
|
|
|
|
spin_lock_init(&mdsc->cap_dirty_lock);
|
|
|
|
init_waitqueue_head(&mdsc->cap_flushing_wq);
|
|
|
|
spin_lock_init(&mdsc->dentry_lru_lock);
|
|
|
|
INIT_LIST_HEAD(&mdsc->dentry_lru);
|
2010-03-26 10:04:40 +00:00
|
|
|
|
2010-06-17 23:16:12 +00:00
|
|
|
ceph_caps_init(mdsc);
|
2010-04-06 22:14:15 +00:00
|
|
|
ceph_adjust_min_caps(mdsc, fsc->min_caps);
|
2010-06-17 23:16:12 +00:00
|
|
|
|
2015-04-27 07:33:28 +00:00
|
|
|
init_rwsem(&mdsc->pool_perm_rwsem);
|
|
|
|
mdsc->pool_perm_tree = RB_ROOT;
|
|
|
|
|
2009-11-18 22:52:18 +00:00
|
|
|
return 0;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for safe replies on open mds requests. If we time out, drop
|
|
|
|
* all requests from the tree to avoid dangling dentry refs.
|
|
|
|
*/
|
|
|
|
static void wait_requests(struct ceph_mds_client *mdsc)
|
|
|
|
{
|
2015-05-15 09:02:17 +00:00
|
|
|
struct ceph_options *opts = mdsc->fsc->client->options;
|
2009-10-06 18:31:09 +00:00
|
|
|
struct ceph_mds_request *req;
|
|
|
|
|
|
|
|
mutex_lock(&mdsc->mutex);
|
2010-02-15 20:08:46 +00:00
|
|
|
if (__get_oldest_req(mdsc)) {
|
2009-10-06 18:31:09 +00:00
|
|
|
mutex_unlock(&mdsc->mutex);
|
2010-02-15 20:08:46 +00:00
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
dout("wait_requests waiting for requests\n");
|
|
|
|
wait_for_completion_timeout(&mdsc->safe_umount_waiters,
|
2015-05-15 09:02:17 +00:00
|
|
|
ceph_timeout_jiffies(opts->mount_timeout));
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
/* tear down remaining requests */
|
2010-02-15 20:08:46 +00:00
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
while ((req = __get_oldest_req(mdsc))) {
|
2009-10-06 18:31:09 +00:00
|
|
|
dout("wait_requests timed out on tid %llu\n",
|
|
|
|
req->r_tid);
|
2010-02-15 20:08:46 +00:00
|
|
|
__unregister_request(mdsc, req);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
dout("wait_requests done\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* called before mount is ro, and before dentries are torn down.
|
|
|
|
* (hmm, does this still race with new lookups?)
|
|
|
|
*/
|
|
|
|
void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
|
|
|
|
{
|
|
|
|
dout("pre_umount\n");
|
|
|
|
mdsc->stopping = 1;
|
|
|
|
|
|
|
|
drop_leases(mdsc);
|
2009-10-14 21:27:38 +00:00
|
|
|
ceph_flush_dirty_caps(mdsc);
|
2009-10-06 18:31:09 +00:00
|
|
|
wait_requests(mdsc);
|
2010-06-21 23:12:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* wait for reply handlers to drop their request refs and
|
|
|
|
* their inode/dcache refs
|
|
|
|
*/
|
|
|
|
ceph_msgr_flush();
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* wait for all write mds requests to flush.
|
|
|
|
*/
|
|
|
|
static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
|
|
|
|
{
|
2010-03-16 22:28:54 +00:00
|
|
|
struct ceph_mds_request *req = NULL, *nextreq;
|
2010-02-15 20:08:46 +00:00
|
|
|
struct rb_node *n;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
dout("wait_unsafe_requests want %lld\n", want_tid);
|
2010-03-16 22:28:54 +00:00
|
|
|
restart:
|
2010-02-15 20:08:46 +00:00
|
|
|
req = __get_oldest_req(mdsc);
|
|
|
|
while (req && req->r_tid <= want_tid) {
|
2010-03-16 22:28:54 +00:00
|
|
|
/* find next request */
|
|
|
|
n = rb_next(&req->r_node);
|
|
|
|
if (n)
|
|
|
|
nextreq = rb_entry(n, struct ceph_mds_request, r_node);
|
|
|
|
else
|
|
|
|
nextreq = NULL;
|
2015-05-19 10:54:40 +00:00
|
|
|
if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
|
|
|
|
(req->r_op & CEPH_MDS_OP_WRITE)) {
|
2010-02-15 20:08:46 +00:00
|
|
|
/* write op */
|
|
|
|
ceph_mdsc_get_request(req);
|
2010-03-16 22:28:54 +00:00
|
|
|
if (nextreq)
|
|
|
|
ceph_mdsc_get_request(nextreq);
|
2010-02-15 20:08:46 +00:00
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
dout("wait_unsafe_requests wait on %llu (want %llu)\n",
|
|
|
|
req->r_tid, want_tid);
|
|
|
|
wait_for_completion(&req->r_safe_completion);
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
ceph_mdsc_put_request(req);
|
2010-03-16 22:28:54 +00:00
|
|
|
if (!nextreq)
|
|
|
|
break; /* next dne before, so we're done! */
|
|
|
|
if (RB_EMPTY_NODE(&nextreq->r_node)) {
|
|
|
|
/* next request was removed from tree */
|
|
|
|
ceph_mdsc_put_request(nextreq);
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
ceph_mdsc_put_request(nextreq); /* won't go away */
|
2010-02-15 20:08:46 +00:00
|
|
|
}
|
2010-03-16 22:28:54 +00:00
|
|
|
req = nextreq;
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
dout("wait_unsafe_requests done\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
|
|
|
|
{
|
2016-07-04 10:06:41 +00:00
|
|
|
u64 want_tid, want_flush;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2016-12-26 09:26:34 +00:00
|
|
|
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
|
2010-05-03 22:22:00 +00:00
|
|
|
return;
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
dout("sync\n");
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
want_tid = mdsc->last_tid;
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
|
2009-10-14 21:27:38 +00:00
|
|
|
ceph_flush_dirty_caps(mdsc);
|
2015-01-08 13:30:12 +00:00
|
|
|
spin_lock(&mdsc->cap_dirty_lock);
|
2015-06-09 09:20:12 +00:00
|
|
|
want_flush = mdsc->last_cap_flush_tid;
|
2016-07-07 07:22:38 +00:00
|
|
|
if (!list_empty(&mdsc->cap_flush_list)) {
|
|
|
|
struct ceph_cap_flush *cf =
|
|
|
|
list_last_entry(&mdsc->cap_flush_list,
|
|
|
|
struct ceph_cap_flush, g_list);
|
|
|
|
cf->wake = true;
|
|
|
|
}
|
2015-01-08 13:30:12 +00:00
|
|
|
spin_unlock(&mdsc->cap_dirty_lock);
|
|
|
|
|
2016-07-04 10:06:41 +00:00
|
|
|
dout("sync want tid %lld flush_seq %lld\n",
|
|
|
|
want_tid, want_flush);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
wait_unsafe_requests(mdsc, want_tid);
|
2016-07-04 10:06:41 +00:00
|
|
|
wait_caps_flush(mdsc, want_flush);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
2010-08-11 21:51:23 +00:00
|
|
|
/*
|
|
|
|
* true if all sessions are closed, or we force unmount
|
|
|
|
*/
|
2016-09-14 08:39:51 +00:00
|
|
|
static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
|
2010-08-11 21:51:23 +00:00
|
|
|
{
|
2016-12-26 09:26:34 +00:00
|
|
|
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
|
2010-08-11 21:51:23 +00:00
|
|
|
return true;
|
2016-09-14 08:39:51 +00:00
|
|
|
return atomic_read(&mdsc->num_sessions) <= skipped;
|
2010-08-11 21:51:23 +00:00
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* called after sb is ro.
|
|
|
|
*/
|
|
|
|
void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
|
|
|
|
{
|
2015-05-15 09:02:17 +00:00
|
|
|
struct ceph_options *opts = mdsc->fsc->client->options;
|
2009-10-06 18:31:09 +00:00
|
|
|
struct ceph_mds_session *session;
|
|
|
|
int i;
|
2016-09-14 08:39:51 +00:00
|
|
|
int skipped = 0;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
dout("close_sessions\n");
|
|
|
|
|
|
|
|
/* close sessions */
|
2010-08-11 21:51:23 +00:00
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
for (i = 0; i < mdsc->max_sessions; i++) {
|
|
|
|
session = __ceph_lookup_mds_session(mdsc, i);
|
|
|
|
if (!session)
|
|
|
|
continue;
|
2009-10-06 18:31:09 +00:00
|
|
|
mutex_unlock(&mdsc->mutex);
|
2010-08-11 21:51:23 +00:00
|
|
|
mutex_lock(&session->s_mutex);
|
2016-09-14 08:39:51 +00:00
|
|
|
if (__close_session(mdsc, session) <= 0)
|
|
|
|
skipped++;
|
2010-08-11 21:51:23 +00:00
|
|
|
mutex_unlock(&session->s_mutex);
|
|
|
|
ceph_put_mds_session(session);
|
2009-10-06 18:31:09 +00:00
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
}
|
2010-08-11 21:51:23 +00:00
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
|
|
|
|
dout("waiting for sessions to close\n");
|
2016-09-14 08:39:51 +00:00
|
|
|
wait_event_timeout(mdsc->session_close_wq,
|
|
|
|
done_closing_sessions(mdsc, skipped),
|
2015-05-15 09:02:17 +00:00
|
|
|
ceph_timeout_jiffies(opts->mount_timeout));
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
/* tear down remaining sessions */
|
2010-08-11 21:51:23 +00:00
|
|
|
mutex_lock(&mdsc->mutex);
|
2009-10-06 18:31:09 +00:00
|
|
|
for (i = 0; i < mdsc->max_sessions; i++) {
|
|
|
|
if (mdsc->sessions[i]) {
|
|
|
|
session = get_session(mdsc->sessions[i]);
|
2010-02-22 23:12:16 +00:00
|
|
|
__unregister_session(mdsc, session);
|
2009-10-06 18:31:09 +00:00
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
mutex_lock(&session->s_mutex);
|
|
|
|
remove_session_caps(session);
|
|
|
|
mutex_unlock(&session->s_mutex);
|
|
|
|
ceph_put_mds_session(session);
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
WARN_ON(!list_empty(&mdsc->cap_delay_list));
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
|
|
|
|
ceph_cleanup_empty_realms(mdsc);
|
|
|
|
|
|
|
|
cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
|
|
|
|
|
|
|
|
dout("stopped\n");
|
|
|
|
}
|
|
|
|
|
2015-07-01 08:27:46 +00:00
|
|
|
void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
|
|
|
|
{
|
|
|
|
struct ceph_mds_session *session;
|
|
|
|
int mds;
|
|
|
|
|
|
|
|
dout("force umount\n");
|
|
|
|
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
for (mds = 0; mds < mdsc->max_sessions; mds++) {
|
|
|
|
session = __ceph_lookup_mds_session(mdsc, mds);
|
|
|
|
if (!session)
|
|
|
|
continue;
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
mutex_lock(&session->s_mutex);
|
|
|
|
__close_session(mdsc, session);
|
|
|
|
if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
|
|
|
|
cleanup_session_requests(mdsc, session);
|
|
|
|
remove_session_caps(session);
|
|
|
|
}
|
|
|
|
mutex_unlock(&session->s_mutex);
|
|
|
|
ceph_put_mds_session(session);
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
kick_requests(mdsc, mds);
|
|
|
|
}
|
|
|
|
__wake_requests(mdsc, &mdsc->waiting_for_map);
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
}
|
|
|
|
|
2010-04-06 22:14:15 +00:00
|
|
|
static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
|
|
|
dout("stop\n");
|
|
|
|
cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
|
|
|
|
if (mdsc->mdsmap)
|
|
|
|
ceph_mdsmap_destroy(mdsc->mdsmap);
|
|
|
|
kfree(mdsc->sessions);
|
2010-06-17 23:16:12 +00:00
|
|
|
ceph_caps_finalize(mdsc);
|
2015-04-27 07:33:28 +00:00
|
|
|
ceph_pool_perm_destroy(mdsc);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
2010-04-06 22:14:15 +00:00
|
|
|
void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
|
|
|
|
{
|
|
|
|
struct ceph_mds_client *mdsc = fsc->mdsc;
|
|
|
|
|
2011-03-25 20:27:48 +00:00
|
|
|
dout("mdsc_destroy %p\n", mdsc);
|
2010-04-06 22:14:15 +00:00
|
|
|
ceph_mdsc_stop(mdsc);
|
2011-03-25 20:27:48 +00:00
|
|
|
|
|
|
|
/* flush out any connection work with references to us */
|
|
|
|
ceph_msgr_flush();
|
|
|
|
|
2010-04-06 22:14:15 +00:00
|
|
|
fsc->mdsc = NULL;
|
|
|
|
kfree(mdsc);
|
2011-03-25 20:27:48 +00:00
|
|
|
dout("mdsc_destroy %p done\n", mdsc);
|
2010-04-06 22:14:15 +00:00
|
|
|
}
|
|
|
|
|
2016-07-08 03:25:38 +00:00
|
|
|
void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
|
|
|
|
{
|
|
|
|
struct ceph_fs_client *fsc = mdsc->fsc;
|
|
|
|
const char *mds_namespace = fsc->mount_options->mds_namespace;
|
|
|
|
void *p = msg->front.iov_base;
|
|
|
|
void *end = p + msg->front.iov_len;
|
|
|
|
u32 epoch;
|
|
|
|
u32 map_len;
|
|
|
|
u32 num_fs;
|
|
|
|
u32 mount_fscid = (u32)-1;
|
|
|
|
u8 struct_v, struct_cv;
|
|
|
|
int err = -EINVAL;
|
|
|
|
|
|
|
|
ceph_decode_need(&p, end, sizeof(u32), bad);
|
|
|
|
epoch = ceph_decode_32(&p);
|
|
|
|
|
|
|
|
dout("handle_fsmap epoch %u\n", epoch);
|
|
|
|
|
|
|
|
ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
|
|
|
|
struct_v = ceph_decode_8(&p);
|
|
|
|
struct_cv = ceph_decode_8(&p);
|
|
|
|
map_len = ceph_decode_32(&p);
|
|
|
|
|
|
|
|
ceph_decode_need(&p, end, sizeof(u32) * 3, bad);
|
|
|
|
p += sizeof(u32) * 2; /* skip epoch and legacy_client_fscid */
|
|
|
|
|
|
|
|
num_fs = ceph_decode_32(&p);
|
|
|
|
while (num_fs-- > 0) {
|
|
|
|
void *info_p, *info_end;
|
|
|
|
u32 info_len;
|
|
|
|
u8 info_v, info_cv;
|
|
|
|
u32 fscid, namelen;
|
|
|
|
|
|
|
|
ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
|
|
|
|
info_v = ceph_decode_8(&p);
|
|
|
|
info_cv = ceph_decode_8(&p);
|
|
|
|
info_len = ceph_decode_32(&p);
|
|
|
|
ceph_decode_need(&p, end, info_len, bad);
|
|
|
|
info_p = p;
|
|
|
|
info_end = p + info_len;
|
|
|
|
p = info_end;
|
|
|
|
|
|
|
|
ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
|
|
|
|
fscid = ceph_decode_32(&info_p);
|
|
|
|
namelen = ceph_decode_32(&info_p);
|
|
|
|
ceph_decode_need(&info_p, info_end, namelen, bad);
|
|
|
|
|
|
|
|
if (mds_namespace &&
|
|
|
|
strlen(mds_namespace) == namelen &&
|
|
|
|
!strncmp(mds_namespace, (char *)info_p, namelen)) {
|
|
|
|
mount_fscid = fscid;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
|
|
|
|
if (mount_fscid != (u32)-1) {
|
|
|
|
fsc->client->monc.fs_cluster_id = mount_fscid;
|
|
|
|
ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
|
|
|
|
0, true);
|
|
|
|
ceph_monc_renew_subs(&fsc->client->monc);
|
|
|
|
} else {
|
|
|
|
err = -ENOENT;
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
bad:
|
|
|
|
pr_err("error decoding fsmap\n");
|
|
|
|
err_out:
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
mdsc->mdsmap_err = -ENOENT;
|
|
|
|
__wake_requests(mdsc, &mdsc->waiting_for_map);
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
return;
|
|
|
|
}
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* handle mds map update.
|
|
|
|
*/
|
2016-07-08 03:25:38 +00:00
|
|
|
void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
|
2009-10-06 18:31:09 +00:00
|
|
|
{
|
|
|
|
u32 epoch;
|
|
|
|
u32 maplen;
|
|
|
|
void *p = msg->front.iov_base;
|
|
|
|
void *end = p + msg->front.iov_len;
|
|
|
|
struct ceph_mdsmap *newmap, *oldmap;
|
|
|
|
struct ceph_fsid fsid;
|
|
|
|
int err = -EINVAL;
|
|
|
|
|
|
|
|
ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
|
|
|
|
ceph_decode_copy(&p, &fsid, sizeof(fsid));
|
2010-04-06 22:14:15 +00:00
|
|
|
if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
|
2009-11-19 00:50:41 +00:00
|
|
|
return;
|
2009-10-14 16:59:09 +00:00
|
|
|
epoch = ceph_decode_32(&p);
|
|
|
|
maplen = ceph_decode_32(&p);
|
2009-10-06 18:31:09 +00:00
|
|
|
dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
|
|
|
|
|
|
|
|
/* do we need it? */
|
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
|
|
|
|
dout("handle_map epoch %u <= our %u\n",
|
|
|
|
epoch, mdsc->mdsmap->m_epoch);
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
newmap = ceph_mdsmap_decode(&p, end);
|
|
|
|
if (IS_ERR(newmap)) {
|
|
|
|
err = PTR_ERR(newmap);
|
|
|
|
goto bad_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* swap into place */
|
|
|
|
if (mdsc->mdsmap) {
|
|
|
|
oldmap = mdsc->mdsmap;
|
|
|
|
mdsc->mdsmap = newmap;
|
|
|
|
check_new_map(mdsc, newmap, oldmap);
|
|
|
|
ceph_mdsmap_destroy(oldmap);
|
|
|
|
} else {
|
|
|
|
mdsc->mdsmap = newmap; /* first mds map */
|
|
|
|
}
|
2010-04-06 22:14:15 +00:00
|
|
|
mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
__wake_requests(mdsc, &mdsc->waiting_for_map);
|
2016-01-19 15:19:06 +00:00
|
|
|
ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
|
|
|
|
mdsc->mdsmap->m_epoch);
|
2009-10-06 18:31:09 +00:00
|
|
|
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
schedule_delayed(mdsc);
|
|
|
|
return;
|
|
|
|
|
|
|
|
bad_unlock:
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
bad:
|
|
|
|
pr_err("error decoding mdsmap %d\n", err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ceph_connection *con_get(struct ceph_connection *con)
|
|
|
|
{
|
|
|
|
struct ceph_mds_session *s = con->private;
|
|
|
|
|
|
|
|
if (get_session(s)) {
|
2017-03-03 09:15:06 +00:00
|
|
|
dout("mdsc con_get %p ok (%d)\n", s, refcount_read(&s->s_ref));
|
2009-10-06 18:31:09 +00:00
|
|
|
return con;
|
|
|
|
}
|
|
|
|
dout("mdsc con_get %p FAIL\n", s);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void con_put(struct ceph_connection *con)
|
|
|
|
{
|
|
|
|
struct ceph_mds_session *s = con->private;
|
|
|
|
|
2017-03-03 09:15:06 +00:00
|
|
|
dout("mdsc con_put %p (%d)\n", s, refcount_read(&s->s_ref) - 1);
|
2009-10-06 18:31:09 +00:00
|
|
|
ceph_put_mds_session(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if the client is unresponsive for long enough, the mds will kill
|
|
|
|
* the session entirely.
|
|
|
|
*/
|
|
|
|
static void peer_reset(struct ceph_connection *con)
|
|
|
|
{
|
|
|
|
struct ceph_mds_session *s = con->private;
|
ceph: attempt mds reconnect if mds closes our session
Currently, if our session is closed (due to a timeout, or explicit close,
or whatever), we just sit there doing nothing unless/until the MDS
restarts, at which point we try to reconnect.
Change client to attempt an immediate reconnect if our session is closed.
Note that currently the MDS doesn't support this, and our attempt will
fail. We'll get a session CLOSE, our caps and dirty cap state will be
dropped, and the client will be free to attempt to reconnect. That's
clearly not as nice as a successful reconnect, but it at least allows us
to try to carry on, and in the future the MDS will support a reconnect
and we will fare better.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-03-18 20:59:12 +00:00
|
|
|
struct ceph_mds_client *mdsc = s->s_mdsc;
|
2009-10-06 18:31:09 +00:00
|
|
|
|
2014-06-06 21:35:37 +00:00
|
|
|
pr_warn("mds%d closed our session\n", s->s_mds);
|
ceph: attempt mds reconnect if mds closes our session
Currently, if our session is closed (due to a timeout, or explicit close,
or whatever), we just sit there doing nothing unless/until the MDS
restarts, at which point we try to reconnect.
Change client to attempt an immediate reconnect if our session is closed.
Note that currently the MDS doesn't support this, and our attempt will
fail. We'll get a session CLOSE, our caps and dirty cap state will be
dropped, and the client will be free to attempt to reconnect. That's
clearly not as nice as a successful reconnect, but it at least allows us
to try to carry on, and in the future the MDS will support a reconnect
and we will fare better.
Signed-off-by: Sage Weil <sage@newdream.net>
2010-03-18 20:59:12 +00:00
|
|
|
send_mds_reconnect(mdsc, s);
|
2009-10-06 18:31:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
|
|
|
|
{
|
|
|
|
struct ceph_mds_session *s = con->private;
|
|
|
|
struct ceph_mds_client *mdsc = s->s_mdsc;
|
|
|
|
int type = le16_to_cpu(msg->hdr.type);
|
|
|
|
|
2010-02-22 23:12:16 +00:00
|
|
|
mutex_lock(&mdsc->mutex);
|
|
|
|
if (__verify_registered_session(mdsc, s) < 0) {
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
mutex_unlock(&mdsc->mutex);
|
|
|
|
|
2009-10-06 18:31:09 +00:00
|
|
|
switch (type) {
|
|
|
|
case CEPH_MSG_MDS_MAP:
|
2016-07-08 03:25:38 +00:00
|
|
|
ceph_mdsc_handle_mdsmap(mdsc, msg);
|
|
|
|
break;
|
|
|
|
case CEPH_MSG_FS_MAP_USER:
|
|
|
|
ceph_mdsc_handle_fsmap(mdsc, msg);
|
2009-10-06 18:31:09 +00:00
|
|
|
break;
|
|
|
|
case CEPH_MSG_CLIENT_SESSION:
|
|
|
|
handle_session(s, msg);
|
|
|
|
break;
|
|
|
|
case CEPH_MSG_CLIENT_REPLY:
|
|
|
|
handle_reply(s, msg);
|
|
|
|
break;
|
|
|
|
case CEPH_MSG_CLIENT_REQUEST_FORWARD:
|
2010-02-22 23:12:16 +00:00
|
|
|
handle_forward(mdsc, s, msg);
|
2009-10-06 18:31:09 +00:00
|
|
|
break;
|
|
|
|
case CEPH_MSG_CLIENT_CAPS:
|
|
|
|
ceph_handle_caps(s, msg);
|
|
|
|
break;
|
|
|
|
case CEPH_MSG_CLIENT_SNAP:
|
2010-02-22 23:12:16 +00:00
|
|
|
ceph_handle_snap(mdsc, s, msg);
|
2009-10-06 18:31:09 +00:00
|
|
|
break;
|
|
|
|
case CEPH_MSG_CLIENT_LEASE:
|
2010-02-22 23:12:16 +00:00
|
|
|
handle_lease(mdsc, s, msg);
|
2009-10-06 18:31:09 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
pr_err("received unknown message type %d %s\n", type,
|
|
|
|
ceph_msg_type_name(type));
|
|
|
|
}
|
2010-02-22 23:12:16 +00:00
|
|
|
out:
|
2009-10-06 18:31:09 +00:00
|
|
|
ceph_msg_put(msg);
|
|
|
|
}
|
|
|
|
|
2009-11-19 00:19:57 +00:00
|
|
|
/*
|
|
|
|
* authentication
|
|
|
|
*/
|
2012-05-16 20:16:39 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: returned pointer is the address of a structure that's
|
|
|
|
* managed separately. Caller must *not* attempt to free it.
|
|
|
|
*/
|
|
|
|
static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
|
2012-05-16 20:16:39 +00:00
|
|
|
int *proto, int force_new)
|
2009-11-19 00:19:57 +00:00
|
|
|
{
|
|
|
|
struct ceph_mds_session *s = con->private;
|
|
|
|
struct ceph_mds_client *mdsc = s->s_mdsc;
|
2010-04-06 22:14:15 +00:00
|
|
|
struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
|
2012-05-16 20:16:39 +00:00
|
|
|
struct ceph_auth_handshake *auth = &s->s_auth;
|
2009-11-19 00:19:57 +00:00
|
|
|
|
2012-05-16 20:16:39 +00:00
|
|
|
if (force_new && auth->authorizer) {
|
2016-04-11 17:34:49 +00:00
|
|
|
ceph_auth_destroy_authorizer(auth->authorizer);
|
2012-05-16 20:16:39 +00:00
|
|
|
auth->authorizer = NULL;
|
2009-11-19 00:19:57 +00:00
|
|
|
}
|
2013-03-25 17:26:14 +00:00
|
|
|
if (!auth->authorizer) {
|
|
|
|
int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
|
|
|
|
auth);
|
2013-03-25 17:26:01 +00:00
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
2013-03-25 17:26:14 +00:00
|
|
|
} else {
|
|
|
|
int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
|
|
|
|
auth);
|
2012-05-16 20:16:39 +00:00
|
|
|
if (ret)
|
2012-05-16 20:16:39 +00:00
|
|
|
return ERR_PTR(ret);
|
2009-11-19 00:19:57 +00:00
|
|
|
}
|
|
|
|
*proto = ac->protocol;
|
2012-05-16 20:16:39 +00:00
|
|
|
|
2012-05-16 20:16:39 +00:00
|
|
|
return auth;
|
2009-11-19 00:19:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-12-02 15:35:09 +00:00
|
|
|
static int verify_authorizer_reply(struct ceph_connection *con)
|
2009-11-19 00:19:57 +00:00
|
|
|
{
|
|
|
|
struct ceph_mds_session *s = con->private;
|
|
|
|
struct ceph_mds_client *mdsc = s->s_mdsc;
|
2010-04-06 22:14:15 +00:00
|
|
|
struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
|
2009-11-19 00:19:57 +00:00
|
|
|
|
2016-12-02 15:35:09 +00:00
|
|
|
return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer);
|
2009-11-19 00:19:57 +00:00
|
|
|
}
|
|
|
|
|
2010-02-03 00:21:06 +00:00
|
|
|
static int invalidate_authorizer(struct ceph_connection *con)
|
|
|
|
{
|
|
|
|
struct ceph_mds_session *s = con->private;
|
|
|
|
struct ceph_mds_client *mdsc = s->s_mdsc;
|
2010-04-06 22:14:15 +00:00
|
|
|
struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
|
2010-02-03 00:21:06 +00:00
|
|
|
|
2013-03-25 17:26:14 +00:00
|
|
|
ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
|
2010-02-03 00:21:06 +00:00
|
|
|
|
2010-04-06 22:14:15 +00:00
|
|
|
return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
|
2010-02-03 00:21:06 +00:00
|
|
|
}
|
|
|
|
|
2013-03-02 00:00:14 +00:00
|
|
|
static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
|
|
|
|
struct ceph_msg_header *hdr, int *skip)
|
|
|
|
{
|
|
|
|
struct ceph_msg *msg;
|
|
|
|
int type = (int) le16_to_cpu(hdr->type);
|
|
|
|
int front_len = (int) le32_to_cpu(hdr->front_len);
|
|
|
|
|
|
|
|
if (con->in_msg)
|
|
|
|
return con->in_msg;
|
|
|
|
|
|
|
|
*skip = 0;
|
|
|
|
msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
|
|
|
|
if (!msg) {
|
|
|
|
pr_err("unable to allocate msg type %d len %d\n",
|
|
|
|
type, front_len);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return msg;
|
|
|
|
}
|
|
|
|
|
2015-10-26 21:23:56 +00:00
|
|
|
static int mds_sign_message(struct ceph_msg *msg)
|
2014-11-04 08:33:37 +00:00
|
|
|
{
|
2015-10-26 21:23:56 +00:00
|
|
|
struct ceph_mds_session *s = msg->con->private;
|
2014-11-04 08:33:37 +00:00
|
|
|
struct ceph_auth_handshake *auth = &s->s_auth;
|
2015-10-26 21:23:56 +00:00
|
|
|
|
2014-11-04 08:33:37 +00:00
|
|
|
return ceph_auth_sign_message(auth, msg);
|
|
|
|
}
|
|
|
|
|
2015-10-26 21:23:56 +00:00
|
|
|
static int mds_check_message_signature(struct ceph_msg *msg)
|
2014-11-04 08:33:37 +00:00
|
|
|
{
|
2015-10-26 21:23:56 +00:00
|
|
|
struct ceph_mds_session *s = msg->con->private;
|
2014-11-04 08:33:37 +00:00
|
|
|
struct ceph_auth_handshake *auth = &s->s_auth;
|
2015-10-26 21:23:56 +00:00
|
|
|
|
2014-11-04 08:33:37 +00:00
|
|
|
return ceph_auth_check_message_signature(auth, msg);
|
|
|
|
}
|
|
|
|
|
2010-05-20 08:40:19 +00:00
|
|
|
static const struct ceph_connection_operations mds_con_ops = {
|
2009-10-06 18:31:09 +00:00
|
|
|
.get = con_get,
|
|
|
|
.put = con_put,
|
|
|
|
.dispatch = dispatch,
|
2009-11-19 00:19:57 +00:00
|
|
|
.get_authorizer = get_authorizer,
|
|
|
|
.verify_authorizer_reply = verify_authorizer_reply,
|
2010-02-03 00:21:06 +00:00
|
|
|
.invalidate_authorizer = invalidate_authorizer,
|
2009-10-06 18:31:09 +00:00
|
|
|
.peer_reset = peer_reset,
|
2013-03-02 00:00:14 +00:00
|
|
|
.alloc_msg = mds_alloc_msg,
|
2015-10-26 21:23:56 +00:00
|
|
|
.sign_message = mds_sign_message,
|
|
|
|
.check_message_signature = mds_check_message_signature,
|
2009-10-06 18:31:09 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* eof */
|