diff options
Diffstat (limited to 'fs/smb/client/dfs_cache.c')
-rw-r--r-- | fs/smb/client/dfs_cache.c | 227 |
1 files changed, 122 insertions, 105 deletions
diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c index 11c8efecf7aa..4dada26d56b5 100644 --- a/fs/smb/client/dfs_cache.c +++ b/fs/smb/client/dfs_cache.c @@ -24,8 +24,8 @@ #include "dfs_cache.h" -#define CACHE_HTABLE_SIZE 32 -#define CACHE_MAX_ENTRIES 64 +#define CACHE_HTABLE_SIZE 512 +#define CACHE_MAX_ENTRIES 1024 #define CACHE_MIN_TTL 120 /* 2 minutes */ #define CACHE_DEFAULT_TTL 300 /* 5 minutes */ @@ -126,6 +126,7 @@ static inline void free_tgts(struct cache_entry *ce) static inline void flush_cache_ent(struct cache_entry *ce) { + cifs_dbg(FYI, "%s: %s\n", __func__, ce->path); hlist_del_init(&ce->hlist); kfree(ce->path); free_tgts(ce); @@ -172,8 +173,8 @@ static int dfscache_proc_show(struct seq_file *m, void *v) "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n", ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags, - DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no", - ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no"); + str_yes_no(DFS_INTERLINK(ce->hdr_flags)), + ce->path_consumed, str_yes_no(cache_entry_expired(ce))); list_for_each_entry(t, &ce->tlist, list) { seq_printf(m, " %s%s\n", @@ -241,9 +242,9 @@ static inline void dump_ce(const struct cache_entry *ce) ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags, - DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no", + str_yes_no(DFS_INTERLINK(ce->hdr_flags)), ce->path_consumed, - cache_entry_expired(ce) ? "yes" : "no"); + str_yes_no(cache_entry_expired(ce))); dump_tgts(ce); } @@ -441,34 +442,31 @@ static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int n return ce; } -static void remove_oldest_entry_locked(void) +/* Remove all referrals that have a single target or oldest entry */ +static void purge_cache(void) { int i; struct cache_entry *ce; - struct cache_entry *to_del = NULL; - - WARN_ON(!rwsem_is_locked(&htable_rw_lock)); + struct cache_entry *oldest = NULL; for (i = 0; i < CACHE_HTABLE_SIZE; i++) { struct hlist_head *l = &cache_htable[i]; + struct hlist_node *n; - hlist_for_each_entry(ce, l, hlist) { + hlist_for_each_entry_safe(ce, n, l, hlist) { if (hlist_unhashed(&ce->hlist)) continue; - if (!to_del || timespec64_compare(&ce->etime, - &to_del->etime) < 0) - to_del = ce; + if (ce->numtgts == 1) + flush_cache_ent(ce); + else if (!oldest || + timespec64_compare(&ce->etime, + &oldest->etime) < 0) + oldest = ce; } } - if (!to_del) { - cifs_dbg(FYI, "%s: no entry to remove\n", __func__); - return; - } - - cifs_dbg(FYI, "%s: removing entry\n", __func__); - dump_ce(to_del); - flush_cache_ent(to_del); + if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES && oldest) + flush_cache_ent(oldest); } /* Add a new DFS cache entry */ @@ -484,7 +482,7 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs, if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) { cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES); - remove_oldest_entry_locked(); + purge_cache(); } rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash); @@ -1095,67 +1093,33 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, return 0; } -static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2) +static bool target_share_equal(struct cifs_tcon *tcon, const char *s1) { - char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0}; - const char *host; - size_t hostlen; + struct TCP_Server_Info *server = tcon->ses->server; + const char *s2 = &tcon->tree_name[1]; struct sockaddr_storage ss; bool match; int rc; - if (strcasecmp(s1, s2)) + if (strcasecmp(s2, s1)) return false; /* * Resolve share's hostname and check if server address matches. Otherwise just ignore it * as we could not have upcall to resolve hostname or failed to convert ip address. */ - extract_unc_hostname(s1, &host, &hostlen); - scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host); - - rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL); - if (rc < 0) { - cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n", - __func__, (int)hostlen, host); + rc = dns_resolve_unc(server->dns_dom, s1, (struct sockaddr *)&ss); + if (rc < 0) return true; - } cifs_server_lock(server); match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss); + cifs_dbg(FYI, "%s: [share=%s] ipaddr matched: %s\n", __func__, s1, str_yes_no(match)); cifs_server_unlock(server); return match; } -/* - * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new - * target shares in @refs. - */ -static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server, - const char *path, - struct dfs_cache_tgt_list *old_tl, - struct dfs_cache_tgt_list *new_tl) -{ - struct dfs_cache_tgt_iterator *oit, *nit; - - for (oit = dfs_cache_get_tgt_iterator(old_tl); oit; - oit = dfs_cache_get_next_tgt(old_tl, oit)) { - for (nit = dfs_cache_get_tgt_iterator(new_tl); nit; - nit = dfs_cache_get_next_tgt(new_tl, nit)) { - if (target_share_equal(server, - dfs_cache_get_tgt_name(oit), - dfs_cache_get_tgt_name(nit))) { - dfs_cache_noreq_update_tgthint(path, nit); - return; - } - } - } - - cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__); - cifs_signal_cifsd_for_reconnect(server, true); -} - static bool is_ses_good(struct cifs_ses *ses) { struct TCP_Server_Info *server = ses->server; @@ -1172,43 +1136,109 @@ static bool is_ses_good(struct cifs_ses *ses) return ret; } -/* Refresh dfs referral of @ses and mark it for reconnect if needed */ -static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh) +/* Refresh dfs referral of @ses */ +static void refresh_ses_referral(struct cifs_ses *ses) { - struct TCP_Server_Info *server = ses->server; - DFS_CACHE_TGT_LIST(old_tl); - DFS_CACHE_TGT_LIST(new_tl); - bool needs_refresh = false; struct cache_entry *ce; unsigned int xid; - char *path = NULL; + const char *path; int rc = 0; xid = get_xid(); - mutex_lock(&server->refpath_lock); - if (server->leaf_fullpath) { - path = kstrdup(server->leaf_fullpath + 1, GFP_ATOMIC); - if (!path) - rc = -ENOMEM; + path = dfs_ses_refpath(ses); + if (IS_ERR(path)) { + rc = PTR_ERR(path); + goto out; } - mutex_unlock(&server->refpath_lock); - if (!path) + + ses = CIFS_DFS_ROOT_SES(ses); + if (!is_ses_good(ses)) { + cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", + __func__); goto out; + } - down_read(&htable_rw_lock); + ce = cache_refresh_path(xid, ses, path, false); + if (!IS_ERR(ce)) + up_read(&htable_rw_lock); + else + rc = PTR_ERR(ce); + +out: + free_xid(xid); +} + +static int __refresh_tcon_referral(struct cifs_tcon *tcon, + const char *path, + struct dfs_info3_param *refs, + int numrefs, bool force_refresh) +{ + struct cache_entry *ce; + bool reconnect = force_refresh; + int rc = 0; + int i; + + if (unlikely(!numrefs)) + return 0; + + if (force_refresh) { + for (i = 0; i < numrefs; i++) { + /* TODO: include prefix paths in the matching */ + if (target_share_equal(tcon, refs[i].node_name)) { + reconnect = false; + break; + } + } + } + + down_write(&htable_rw_lock); ce = lookup_cache_entry(path); - needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce); if (!IS_ERR(ce)) { - rc = get_targets(ce, &old_tl); - cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc); + if (force_refresh || cache_entry_expired(ce)) + rc = update_cache_entry_locked(ce, refs, numrefs); + } else if (PTR_ERR(ce) == -ENOENT) { + ce = add_cache_entry_locked(refs, numrefs); } - up_read(&htable_rw_lock); + up_write(&htable_rw_lock); + + if (IS_ERR(ce)) + rc = PTR_ERR(ce); + if (reconnect) { + cifs_tcon_dbg(FYI, "%s: mark for reconnect\n", __func__); + cifs_signal_cifsd_for_reconnect(tcon->ses->server, true); + } + return rc; +} +static void refresh_tcon_referral(struct cifs_tcon *tcon, bool force_refresh) +{ + struct dfs_info3_param *refs = NULL; + struct cache_entry *ce; + struct cifs_ses *ses; + bool needs_refresh; + const char *path; + unsigned int xid; + int numrefs = 0; + int rc = 0; + + xid = get_xid(); + ses = tcon->ses; + + path = dfs_ses_refpath(ses); + if (IS_ERR(path)) { + rc = PTR_ERR(path); + goto out; + } + + down_read(&htable_rw_lock); + ce = lookup_cache_entry(path); + needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce); if (!needs_refresh) { - rc = 0; + up_read(&htable_rw_lock); goto out; } + up_read(&htable_rw_lock); ses = CIFS_DFS_ROOT_SES(ses); if (!is_ses_good(ses)) { @@ -1217,29 +1247,15 @@ static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh) goto out; } - ce = cache_refresh_path(xid, ses, path, true); - if (!IS_ERR(ce)) { - rc = get_targets(ce, &new_tl); - up_read(&htable_rw_lock); - cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc); - mark_for_reconnect_if_needed(server, path, &old_tl, &new_tl); + rc = get_dfs_referral(xid, ses, path, &refs, &numrefs); + if (!rc) { + rc = __refresh_tcon_referral(tcon, path, refs, + numrefs, force_refresh); } out: free_xid(xid); - dfs_cache_free_tgts(&old_tl); - dfs_cache_free_tgts(&new_tl); - kfree(path); -} - -static inline void refresh_ses_referral(struct cifs_ses *ses) -{ - __refresh_ses_referral(ses, false); -} - -static inline void force_refresh_ses_referral(struct cifs_ses *ses) -{ - __refresh_ses_referral(ses, true); + free_dfs_info_array(refs, numrefs); } /** @@ -1280,7 +1296,7 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb) */ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; - force_refresh_ses_referral(tcon->ses); + refresh_tcon_referral(tcon, true); return 0; } @@ -1292,8 +1308,9 @@ void dfs_cache_refresh(struct work_struct *work) tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work); - for (ses = tcon->ses; ses; ses = ses->dfs_root_ses) + list_for_each_entry(ses, &tcon->dfs_ses_list, dlist) refresh_ses_referral(ses); + refresh_tcon_referral(tcon, false); queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work, atomic_read(&dfs_cache_ttl) * HZ); |