ceph: save cap_auths in MDS client when session is opened

Save the cap_auths, which have been parsed by the MDS, in the opened
session.

[ idryomov: use s64 and u32 instead of int64_t and uint32_t, switch to
            bool for root_squash, readable and writeable ]

Link: https://tracker.ceph.com/issues/61333
Signed-off-by: Xiubo Li <xiubli@redhat.com>
Reviewed-by: Milind Changire <mchangir@redhat.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
This commit is contained in:
Xiubo Li 2023-09-25 16:25:20 +08:00 committed by Ilya Dryomov
parent a38297e3fb
commit 1d17de9534
2 changed files with 126 additions and 1 deletions

View File

@ -4112,10 +4112,13 @@ static void handle_session(struct ceph_mds_session *session,
void *p = msg->front.iov_base;
void *end = p + msg->front.iov_len;
struct ceph_mds_session_head *h;
u32 op;
struct ceph_mds_cap_auth *cap_auths = NULL;
u32 op, cap_auths_num = 0;
u64 seq, features = 0;
int wake = 0;
bool blocklisted = false;
u32 i;
/* decode */
ceph_decode_need(&p, end, sizeof(*h), bad);
@ -4160,7 +4163,101 @@ static void handle_session(struct ceph_mds_session *session,
}
}
if (msg_version >= 6) {
ceph_decode_32_safe(&p, end, cap_auths_num, bad);
doutc(cl, "cap_auths_num %d\n", cap_auths_num);
if (cap_auths_num && op != CEPH_SESSION_OPEN) {
WARN_ON_ONCE(op != CEPH_SESSION_OPEN);
goto skip_cap_auths;
}
cap_auths = kcalloc(cap_auths_num,
sizeof(struct ceph_mds_cap_auth),
GFP_KERNEL);
if (!cap_auths) {
pr_err_client(cl, "No memory for cap_auths\n");
return;
}
for (i = 0; i < cap_auths_num; i++) {
u32 _len, j;
/* struct_v, struct_compat, and struct_len in MDSCapAuth */
ceph_decode_skip_n(&p, end, 2 + sizeof(u32), bad);
/* struct_v, struct_compat, and struct_len in MDSCapMatch */
ceph_decode_skip_n(&p, end, 2 + sizeof(u32), bad);
ceph_decode_64_safe(&p, end, cap_auths[i].match.uid, bad);
ceph_decode_32_safe(&p, end, _len, bad);
if (_len) {
cap_auths[i].match.gids = kcalloc(_len, sizeof(u32),
GFP_KERNEL);
if (!cap_auths[i].match.gids) {
pr_err_client(cl, "No memory for gids\n");
goto fail;
}
cap_auths[i].match.num_gids = _len;
for (j = 0; j < _len; j++)
ceph_decode_32_safe(&p, end,
cap_auths[i].match.gids[j],
bad);
}
ceph_decode_32_safe(&p, end, _len, bad);
if (_len) {
cap_auths[i].match.path = kcalloc(_len + 1, sizeof(char),
GFP_KERNEL);
if (!cap_auths[i].match.path) {
pr_err_client(cl, "No memory for path\n");
goto fail;
}
ceph_decode_copy(&p, cap_auths[i].match.path, _len);
/* Remove the tailing '/' */
while (_len && cap_auths[i].match.path[_len - 1] == '/') {
cap_auths[i].match.path[_len - 1] = '\0';
_len -= 1;
}
}
ceph_decode_32_safe(&p, end, _len, bad);
if (_len) {
cap_auths[i].match.fs_name = kcalloc(_len + 1, sizeof(char),
GFP_KERNEL);
if (!cap_auths[i].match.fs_name) {
pr_err_client(cl, "No memory for fs_name\n");
goto fail;
}
ceph_decode_copy(&p, cap_auths[i].match.fs_name, _len);
}
ceph_decode_8_safe(&p, end, cap_auths[i].match.root_squash, bad);
ceph_decode_8_safe(&p, end, cap_auths[i].readable, bad);
ceph_decode_8_safe(&p, end, cap_auths[i].writeable, bad);
doutc(cl, "uid %lld, num_gids %u, path %s, fs_name %s, root_squash %d, readable %d, writeable %d\n",
cap_auths[i].match.uid, cap_auths[i].match.num_gids,
cap_auths[i].match.path, cap_auths[i].match.fs_name,
cap_auths[i].match.root_squash,
cap_auths[i].readable, cap_auths[i].writeable);
}
}
skip_cap_auths:
mutex_lock(&mdsc->mutex);
if (op == CEPH_SESSION_OPEN) {
if (mdsc->s_cap_auths) {
for (i = 0; i < mdsc->s_cap_auths_num; i++) {
kfree(mdsc->s_cap_auths[i].match.gids);
kfree(mdsc->s_cap_auths[i].match.path);
kfree(mdsc->s_cap_auths[i].match.fs_name);
}
kfree(mdsc->s_cap_auths);
}
mdsc->s_cap_auths_num = cap_auths_num;
mdsc->s_cap_auths = cap_auths;
}
if (op == CEPH_SESSION_CLOSE) {
ceph_get_mds_session(session);
__unregister_session(mdsc, session);
@ -4290,6 +4387,13 @@ bad:
pr_err_client(cl, "corrupt message mds%d len %d\n", mds,
(int)msg->front.iov_len);
ceph_msg_dump(msg);
fail:
for (i = 0; i < cap_auths_num; i++) {
kfree(cap_auths[i].match.gids);
kfree(cap_auths[i].match.path);
kfree(cap_auths[i].match.fs_name);
}
kfree(cap_auths);
return;
}

View File

@ -71,6 +71,24 @@ enum ceph_feature_type {
struct ceph_fs_client;
struct ceph_cap;
#define MDS_AUTH_UID_ANY -1
struct ceph_mds_cap_match {
s64 uid; /* default to MDS_AUTH_UID_ANY */
u32 num_gids;
u32 *gids; /* use these GIDs */
char *path; /* require path to be child of this
(may be "" or "/" for any) */
char *fs_name;
bool root_squash; /* default to false */
};
struct ceph_mds_cap_auth {
struct ceph_mds_cap_match match;
bool readable;
bool writeable;
};
/*
* parsed info about a single inode. pointers are into the encoded
* on-wire structures within the mds reply message payload.
@ -513,6 +531,9 @@ struct ceph_mds_client {
struct rw_semaphore pool_perm_rwsem;
struct rb_root pool_perm_tree;
u32 s_cap_auths_num;
struct ceph_mds_cap_auth *s_cap_auths;
char nodename[__NEW_UTS_LEN + 1];
};