8 smb3 client fixes

-----BEGIN PGP SIGNATURE-----
 
 iQGzBAABCgAdFiEE6fsu8pdIjtWE/DpLiiy9cAdyT1EFAmPK6WoACgkQiiy9cAdy
 T1G0kwv8CrtMwfk/DXhDoWKM5xCkw8at+LSI7KaL9A/xt+w2whU/bi87cC0usuiH
 ofdIoQnUiaTxsdcg3PZby9cX7PNPiF+B7pD+BYfIcsE4yV7xkB2B6bNpz5Yf/7d6
 gx7HchkZBmGSbbYn5dBZobWiLiWMYsPn5B/0W1bpya5HvXZkhBUwLUMncHcfhgcU
 B3g+qxnEDuuxJlI9+t+FCRvrLmz6Wfme9FDMzEtgoH4/ym5Vx8RzUjFLSbNfcP1m
 zJSADjUQ8CIntvE5egGefmojO6w9Urmg1x8ZJFb37CvlC00X/a2af1i3YhpBYIpU
 ae0+4os+6RluJnrV9rWHQ0AZKm0ZzgLakCjyas2dyXHUC42ytBRPdCPjUKVA6fAM
 FhhITe7Xcu+VWN1s7mAqmbHTC2H8dzqqxOom/497msU9jKBUzETsf7Agzof+VP0m
 3c7aRdKpLEBgvsst8a8sWkJZb5LuGG4EgyQXMPJ9+dfqwFkCmVXHUzGMnNnbUDLU
 c7k81xnp
 =k4Xk
 -----END PGP SIGNATURE-----

Merge tag '6.2-rc4-smb3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6

Pull cifs fixes from Steve French:

 - important fix for packet signature calculation error

 - three fixes to correct DFS deadlock, and DFS refresh problem

 - remove an unused DFS function, and duplicate tcon refresh code

 - DFS cache lookup fix

 - uninitialized rc fix

* tag '6.2-rc4-smb3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6:
  cifs: remove unused function
  cifs: do not include page data when checking signature
  cifs: fix return of uninitialized rc in dfs_cache_update_tgthint()
  cifs: handle cache lookup errors different than -ENOENT
  cifs: remove duplicate code in __refresh_tcon()
  cifs: don't take exclusive lock for updating target hints
  cifs: avoid re-lookups in dfs_cache_find()
  cifs: fix potential deadlock in cache_refresh_path()
This commit is contained in:
Linus Torvalds 2023-01-20 14:28:49 -08:00
commit 4e31badaa1
3 changed files with 104 additions and 155 deletions

View File

@ -269,7 +269,7 @@ static int dfscache_proc_show(struct seq_file *m, void *v)
list_for_each_entry(t, &ce->tlist, list) { list_for_each_entry(t, &ce->tlist, list) {
seq_printf(m, " %s%s\n", seq_printf(m, " %s%s\n",
t->name, t->name,
ce->tgthint == t ? " (target hint)" : ""); READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
} }
} }
} }
@ -321,7 +321,7 @@ static inline void dump_tgts(const struct cache_entry *ce)
cifs_dbg(FYI, "target list:\n"); cifs_dbg(FYI, "target list:\n");
list_for_each_entry(t, &ce->tlist, list) { list_for_each_entry(t, &ce->tlist, list) {
cifs_dbg(FYI, " %s%s\n", t->name, cifs_dbg(FYI, " %s%s\n", t->name,
ce->tgthint == t ? " (target hint)" : ""); READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
} }
} }
@ -427,7 +427,7 @@ static int cache_entry_hash(const void *data, int size, unsigned int *hash)
/* Return target hint of a DFS cache entry */ /* Return target hint of a DFS cache entry */
static inline char *get_tgt_name(const struct cache_entry *ce) static inline char *get_tgt_name(const struct cache_entry *ce)
{ {
struct cache_dfs_tgt *t = ce->tgthint; struct cache_dfs_tgt *t = READ_ONCE(ce->tgthint);
return t ? t->name : ERR_PTR(-ENOENT); return t ? t->name : ERR_PTR(-ENOENT);
} }
@ -470,6 +470,7 @@ static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs, static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
struct cache_entry *ce, const char *tgthint) struct cache_entry *ce, const char *tgthint)
{ {
struct cache_dfs_tgt *target;
int i; int i;
ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL); ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
@ -496,8 +497,9 @@ static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
ce->numtgts++; ce->numtgts++;
} }
ce->tgthint = list_first_entry_or_null(&ce->tlist, target = list_first_entry_or_null(&ce->tlist, struct cache_dfs_tgt,
struct cache_dfs_tgt, list); list);
WRITE_ONCE(ce->tgthint, target);
return 0; return 0;
} }
@ -558,7 +560,8 @@ static void remove_oldest_entry_locked(void)
} }
/* Add a new DFS cache entry */ /* Add a new DFS cache entry */
static int add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs) static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
int numrefs)
{ {
int rc; int rc;
struct cache_entry *ce; struct cache_entry *ce;
@ -573,11 +576,11 @@ static int add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs)
rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash); rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
if (rc) if (rc)
return rc; return ERR_PTR(rc);
ce = alloc_cache_entry(refs, numrefs); ce = alloc_cache_entry(refs, numrefs);
if (IS_ERR(ce)) if (IS_ERR(ce))
return PTR_ERR(ce); return ce;
spin_lock(&cache_ttl_lock); spin_lock(&cache_ttl_lock);
if (!cache_ttl) { if (!cache_ttl) {
@ -594,7 +597,7 @@ static int add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs)
atomic_inc(&cache_count); atomic_inc(&cache_count);
return 0; return ce;
} }
/* Check if two DFS paths are equal. @s1 and @s2 are expected to be in @cache_cp's charset */ /* Check if two DFS paths are equal. @s1 and @s2 are expected to be in @cache_cp's charset */
@ -641,7 +644,9 @@ static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int h
* *
* Use whole path components in the match. Must be called with htable_rw_lock held. * Use whole path components in the match. Must be called with htable_rw_lock held.
* *
* Return cached entry if successful.
* Return ERR_PTR(-ENOENT) if the entry is not found. * Return ERR_PTR(-ENOENT) if the entry is not found.
* Return error ptr otherwise.
*/ */
static struct cache_entry *lookup_cache_entry(const char *path) static struct cache_entry *lookup_cache_entry(const char *path)
{ {
@ -711,14 +716,15 @@ void dfs_cache_destroy(void)
static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs, static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
int numrefs) int numrefs)
{ {
struct cache_dfs_tgt *target;
char *th = NULL;
int rc; int rc;
char *s, *th = NULL;
WARN_ON(!rwsem_is_locked(&htable_rw_lock)); WARN_ON(!rwsem_is_locked(&htable_rw_lock));
if (ce->tgthint) { target = READ_ONCE(ce->tgthint);
s = ce->tgthint->name; if (target) {
th = kstrdup(s, GFP_ATOMIC); th = kstrdup(target->name, GFP_ATOMIC);
if (!th) if (!th)
return -ENOMEM; return -ENOMEM;
} }
@ -767,51 +773,75 @@ static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const
* *
* For interlinks, cifs_mount() and expand_dfs_referral() are supposed to * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
* handle them properly. * handle them properly.
*
* On success, return entry with acquired lock for reading, otherwise error ptr.
*/ */
static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, const char *path) static struct cache_entry *cache_refresh_path(const unsigned int xid,
struct cifs_ses *ses,
const char *path,
bool force_refresh)
{ {
int rc;
struct cache_entry *ce;
struct dfs_info3_param *refs = NULL; struct dfs_info3_param *refs = NULL;
struct cache_entry *ce;
int numrefs = 0; int numrefs = 0;
bool newent = false; int rc;
cifs_dbg(FYI, "%s: search path: %s\n", __func__, path); cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
down_write(&htable_rw_lock); down_read(&htable_rw_lock);
ce = lookup_cache_entry(path); ce = lookup_cache_entry(path);
if (!IS_ERR(ce)) { if (!IS_ERR(ce)) {
if (!cache_entry_expired(ce)) { if (!force_refresh && !cache_entry_expired(ce))
dump_ce(ce); return ce;
up_write(&htable_rw_lock); } else if (PTR_ERR(ce) != -ENOENT) {
return 0; up_read(&htable_rw_lock);
} return ce;
} else {
newent = true;
} }
/* /*
* Either the entry was not found, or it is expired. * Unlock shared access as we don't want to hold any locks while getting
* a new referral. The @ses used for performing the I/O could be
* reconnecting and it acquires @htable_rw_lock to look up the dfs cache
* in order to failover -- if necessary.
*/
up_read(&htable_rw_lock);
/*
* Either the entry was not found, or it is expired, or it is a forced
* refresh.
* Request a new DFS referral in order to create or update a cache entry. * Request a new DFS referral in order to create or update a cache entry.
*/ */
rc = get_dfs_referral(xid, ses, path, &refs, &numrefs); rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
if (rc) if (rc) {
goto out_unlock; ce = ERR_PTR(rc);
goto out;
}
dump_refs(refs, numrefs); dump_refs(refs, numrefs);
if (!newent) { down_write(&htable_rw_lock);
rc = update_cache_entry_locked(ce, refs, numrefs); /* Re-check as another task might have it added or refreshed already */
goto out_unlock; ce = lookup_cache_entry(path);
if (!IS_ERR(ce)) {
if (force_refresh || cache_entry_expired(ce)) {
rc = update_cache_entry_locked(ce, refs, numrefs);
if (rc)
ce = ERR_PTR(rc);
}
} else if (PTR_ERR(ce) == -ENOENT) {
ce = add_cache_entry_locked(refs, numrefs);
} }
rc = add_cache_entry_locked(refs, numrefs); if (IS_ERR(ce)) {
up_write(&htable_rw_lock);
goto out;
}
out_unlock: downgrade_write(&htable_rw_lock);
up_write(&htable_rw_lock); out:
free_dfs_info_array(refs, numrefs); free_dfs_info_array(refs, numrefs);
return rc; return ce;
} }
/* /*
@ -878,7 +908,7 @@ static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
} }
it->it_path_consumed = t->path_consumed; it->it_path_consumed = t->path_consumed;
if (ce->tgthint == t) if (READ_ONCE(ce->tgthint) == t)
list_add(&it->it_list, head); list_add(&it->it_list, head);
else else
list_add_tail(&it->it_list, head); list_add_tail(&it->it_list, head);
@ -931,15 +961,8 @@ int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nl
if (IS_ERR(npath)) if (IS_ERR(npath))
return PTR_ERR(npath); return PTR_ERR(npath);
rc = cache_refresh_path(xid, ses, npath); ce = cache_refresh_path(xid, ses, npath, false);
if (rc)
goto out_free_path;
down_read(&htable_rw_lock);
ce = lookup_cache_entry(npath);
if (IS_ERR(ce)) { if (IS_ERR(ce)) {
up_read(&htable_rw_lock);
rc = PTR_ERR(ce); rc = PTR_ERR(ce);
goto out_free_path; goto out_free_path;
} }
@ -1002,72 +1025,6 @@ out_unlock:
return rc; return rc;
} }
/**
* dfs_cache_update_tgthint - update target hint of a DFS cache entry
*
* If it doesn't find the cache entry, then it will get a DFS referral for @path
* and create a new entry.
*
* In case the cache entry exists but expired, it will get a DFS referral
* for @path and then update the respective cache entry.
*
* @xid: syscall id
* @ses: smb session
* @cp: codepage
* @remap: type of character remapping for paths
* @path: path to lookup in DFS referral cache
* @it: DFS target iterator
*
* Return zero if the target hint was updated successfully, otherwise non-zero.
*/
int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
const struct nls_table *cp, int remap, const char *path,
const struct dfs_cache_tgt_iterator *it)
{
int rc;
const char *npath;
struct cache_entry *ce;
struct cache_dfs_tgt *t;
npath = dfs_cache_canonical_path(path, cp, remap);
if (IS_ERR(npath))
return PTR_ERR(npath);
cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
rc = cache_refresh_path(xid, ses, npath);
if (rc)
goto out_free_path;
down_write(&htable_rw_lock);
ce = lookup_cache_entry(npath);
if (IS_ERR(ce)) {
rc = PTR_ERR(ce);
goto out_unlock;
}
t = ce->tgthint;
if (likely(!strcasecmp(it->it_name, t->name)))
goto out_unlock;
list_for_each_entry(t, &ce->tlist, list) {
if (!strcasecmp(t->name, it->it_name)) {
ce->tgthint = t;
cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
it->it_name);
break;
}
}
out_unlock:
up_write(&htable_rw_lock);
out_free_path:
kfree(npath);
return rc;
}
/** /**
* dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
* without sending any requests to the currently connected server. * without sending any requests to the currently connected server.
@ -1092,21 +1049,20 @@ void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt
cifs_dbg(FYI, "%s: path: %s\n", __func__, path); cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
if (!down_write_trylock(&htable_rw_lock)) down_read(&htable_rw_lock);
return;
ce = lookup_cache_entry(path); ce = lookup_cache_entry(path);
if (IS_ERR(ce)) if (IS_ERR(ce))
goto out_unlock; goto out_unlock;
t = ce->tgthint; t = READ_ONCE(ce->tgthint);
if (unlikely(!strcasecmp(it->it_name, t->name))) if (unlikely(!strcasecmp(it->it_name, t->name)))
goto out_unlock; goto out_unlock;
list_for_each_entry(t, &ce->tlist, list) { list_for_each_entry(t, &ce->tlist, list) {
if (!strcasecmp(t->name, it->it_name)) { if (!strcasecmp(t->name, it->it_name)) {
ce->tgthint = t; WRITE_ONCE(ce->tgthint, t);
cifs_dbg(FYI, "%s: new target hint: %s\n", __func__, cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
it->it_name); it->it_name);
break; break;
@ -1114,7 +1070,7 @@ void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt
} }
out_unlock: out_unlock:
up_write(&htable_rw_lock); up_read(&htable_rw_lock);
} }
/** /**
@ -1320,35 +1276,37 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
* Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
* target shares in @refs. * target shares in @refs.
*/ */
static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cache_tgt_list *tl, static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
const struct dfs_info3_param *refs, int numrefs) struct dfs_cache_tgt_list *old_tl,
struct dfs_cache_tgt_list *new_tl)
{ {
struct dfs_cache_tgt_iterator *it; struct dfs_cache_tgt_iterator *oit, *nit;
int i;
for (it = dfs_cache_get_tgt_iterator(tl); it; it = dfs_cache_get_next_tgt(tl, it)) { for (oit = dfs_cache_get_tgt_iterator(old_tl); oit;
for (i = 0; i < numrefs; i++) { oit = dfs_cache_get_next_tgt(old_tl, oit)) {
if (target_share_equal(tcon->ses->server, dfs_cache_get_tgt_name(it), for (nit = dfs_cache_get_tgt_iterator(new_tl); nit;
refs[i].node_name)) nit = dfs_cache_get_next_tgt(new_tl, nit)) {
if (target_share_equal(server,
dfs_cache_get_tgt_name(oit),
dfs_cache_get_tgt_name(nit)))
return; return;
} }
} }
cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__); cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
cifs_signal_cifsd_for_reconnect(tcon->ses->server, true); cifs_signal_cifsd_for_reconnect(server, true);
} }
/* Refresh dfs referral of tcon and mark it for reconnect if needed */ /* Refresh dfs referral of tcon and mark it for reconnect if needed */
static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_refresh) static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_refresh)
{ {
struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl); struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl);
struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl);
struct cifs_ses *ses = CIFS_DFS_ROOT_SES(tcon->ses); struct cifs_ses *ses = CIFS_DFS_ROOT_SES(tcon->ses);
struct cifs_tcon *ipc = ses->tcon_ipc; struct cifs_tcon *ipc = ses->tcon_ipc;
struct dfs_info3_param *refs = NULL;
bool needs_refresh = false; bool needs_refresh = false;
struct cache_entry *ce; struct cache_entry *ce;
unsigned int xid; unsigned int xid;
int numrefs = 0;
int rc = 0; int rc = 0;
xid = get_xid(); xid = get_xid();
@ -1357,9 +1315,8 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
ce = lookup_cache_entry(path); ce = lookup_cache_entry(path);
needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce); needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
if (!IS_ERR(ce)) { if (!IS_ERR(ce)) {
rc = get_targets(ce, &tl); rc = get_targets(ce, &old_tl);
if (rc) cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
cifs_dbg(FYI, "%s: could not get dfs targets: %d\n", __func__, rc);
} }
up_read(&htable_rw_lock); up_read(&htable_rw_lock);
@ -1376,26 +1333,18 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
} }
spin_unlock(&ipc->tc_lock); spin_unlock(&ipc->tc_lock);
rc = get_dfs_referral(xid, ses, path, &refs, &numrefs); ce = cache_refresh_path(xid, ses, path, true);
if (!rc) { if (!IS_ERR(ce)) {
/* Create or update a cache entry with the new referral */ rc = get_targets(ce, &new_tl);
dump_refs(refs, numrefs); up_read(&htable_rw_lock);
cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
down_write(&htable_rw_lock); mark_for_reconnect_if_needed(tcon->ses->server, &old_tl, &new_tl);
ce = lookup_cache_entry(path);
if (IS_ERR(ce))
add_cache_entry_locked(refs, numrefs);
else if (force_refresh || cache_entry_expired(ce))
update_cache_entry_locked(ce, refs, numrefs);
up_write(&htable_rw_lock);
mark_for_reconnect_if_needed(tcon, &tl, refs, numrefs);
} }
out: out:
free_xid(xid); free_xid(xid);
dfs_cache_free_tgts(&tl); dfs_cache_free_tgts(&old_tl);
free_dfs_info_array(refs, numrefs); dfs_cache_free_tgts(&new_tl);
return rc; return rc;
} }

View File

@ -35,9 +35,6 @@ int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nl
struct dfs_cache_tgt_list *tgt_list); struct dfs_cache_tgt_list *tgt_list);
int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref, int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
struct dfs_cache_tgt_list *tgt_list); struct dfs_cache_tgt_list *tgt_list);
int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
const struct nls_table *cp, int remap, const char *path,
const struct dfs_cache_tgt_iterator *it);
void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it); void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it);
int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it, int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
struct dfs_info3_param *ref); struct dfs_info3_param *ref);

View File

@ -4163,12 +4163,15 @@ smb2_readv_callback(struct mid_q_entry *mid)
(struct smb2_hdr *)rdata->iov[0].iov_base; (struct smb2_hdr *)rdata->iov[0].iov_base;
struct cifs_credits credits = { .value = 0, .instance = 0 }; struct cifs_credits credits = { .value = 0, .instance = 0 };
struct smb_rqst rqst = { .rq_iov = &rdata->iov[1], struct smb_rqst rqst = { .rq_iov = &rdata->iov[1],
.rq_nvec = 1, .rq_nvec = 1, };
.rq_pages = rdata->pages,
.rq_offset = rdata->page_offset, if (rdata->got_bytes) {
.rq_npages = rdata->nr_pages, rqst.rq_pages = rdata->pages;
.rq_pagesz = rdata->pagesz, rqst.rq_offset = rdata->page_offset;
.rq_tailsz = rdata->tailsz }; rqst.rq_npages = rdata->nr_pages;
rqst.rq_pagesz = rdata->pagesz;
rqst.rq_tailsz = rdata->tailsz;
}
WARN_ONCE(rdata->server != mid->server, WARN_ONCE(rdata->server != mid->server,
"rdata server %p != mid server %p", "rdata server %p != mid server %p",