diff options
Diffstat (limited to 'fs/smb/client/cached_dir.c')
-rw-r--r-- | fs/smb/client/cached_dir.c | 290 |
1 files changed, 202 insertions, 88 deletions
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c index 0ff2491c311d..368e870624da 100644 --- a/fs/smb/client/cached_dir.c +++ b/fs/smb/client/cached_dir.c @@ -17,6 +17,11 @@ static void free_cached_dir(struct cached_fid *cfid); static void smb2_close_cached_fid(struct kref *ref); static void cfids_laundromat_worker(struct work_struct *work); +struct cached_dir_dentry { + struct list_head entry; + struct dentry *dentry; +}; + static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, const char *path, bool lookup_only, @@ -24,7 +29,6 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, { struct cached_fid *cfid; - spin_lock(&cfids->cfid_list_lock); list_for_each_entry(cfid, &cfids->entries, entry) { if (!strcmp(cfid->path, path)) { /* @@ -33,25 +37,20 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, * being deleted due to a lease break. */ if (!cfid->time || !cfid->has_lease) { - spin_unlock(&cfids->cfid_list_lock); return NULL; } kref_get(&cfid->refcount); - spin_unlock(&cfids->cfid_list_lock); return cfid; } } if (lookup_only) { - spin_unlock(&cfids->cfid_list_lock); return NULL; } if (cfids->num_entries >= max_cached_dirs) { - spin_unlock(&cfids->cfid_list_lock); return NULL; } cfid = init_cached_dir(path); if (cfid == NULL) { - spin_unlock(&cfids->cfid_list_lock); return NULL; } cfid->cfids = cfids; @@ -59,7 +58,16 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, list_add(&cfid->entry, &cfids->entries); cfid->on_list = true; kref_get(&cfid->refcount); - spin_unlock(&cfids->cfid_list_lock); + /* + * Set @cfid->has_lease to true during construction so that the lease + * reference can be put in cached_dir_lease_break() due to a potential + * lease break right after the request is sent or while @cfid is still + * being cached, or if a reconnection is triggered during construction. + * Concurrent processes won't be to use it yet due to @cfid->time being + * zero. + */ + cfid->has_lease = true; + return cfid; } @@ -94,7 +102,8 @@ path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path) while (*s && *s != sep) s++; - child = lookup_positive_unlocked(p, dentry, s - p); + child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p), + dentry); dput(dentry); dentry = child; } while (!IS_ERR(dentry)); @@ -146,16 +155,19 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, struct cached_fids *cfids; const char *npath; int retries = 0, cur_sleep = 1; + __le32 lease_flags = 0; + + if (cifs_sb->root == NULL) + return -ENOENT; - if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache || - is_smb1_server(tcon->ses->server) || (dir_cache_timeout == 0)) + if (tcon == NULL) return -EOPNOTSUPP; ses = tcon->ses; cfids = tcon->cfids; - if (cifs_sb->root == NULL) - return -ENOENT; + if (cfids == NULL) + return -EOPNOTSUPP; replay_again: /* reinitialize for possible replay */ @@ -170,18 +182,19 @@ replay_again: if (!utf16_path) return -ENOMEM; + spin_lock(&cfids->cfid_list_lock); cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs); if (cfid == NULL) { + spin_unlock(&cfids->cfid_list_lock); kfree(utf16_path); return -ENOENT; } /* - * Return cached fid if it has a lease. Otherwise, it is either a new - * entry or laundromat worker removed it from @cfids->entries. Caller - * will put last reference if the latter. + * Return cached fid if it is valid (has a lease and has a time). + * Otherwise, it is either a new entry or laundromat worker removed it + * from @cfids->entries. Caller will put last reference if the latter. */ - spin_lock(&cfids->cfid_list_lock); - if (cfid->has_lease) { + if (cfid->has_lease && cfid->time) { spin_unlock(&cfids->cfid_list_lock); *ret_cfid = cfid; kfree(utf16_path); @@ -189,8 +202,10 @@ replay_again: } spin_unlock(&cfids->cfid_list_lock); + pfid = &cfid->fid; + /* - * Skip any prefix paths in @path as lookup_positive_unlocked() ends up + * Skip any prefix paths in @path as lookup_noperm_positive_unlocked() ends up * calling ->lookup() which already adds those through * build_path_from_dentry(). Also, do it earlier as we might reconnect * below when trying to send compounded request and then potentially @@ -210,8 +225,28 @@ replay_again: rc = -ENOENT; goto out; } + if (dentry->d_parent && server->dialect >= SMB30_PROT_ID) { + struct cached_fid *parent_cfid; + + spin_lock(&cfids->cfid_list_lock); + list_for_each_entry(parent_cfid, &cfids->entries, entry) { + if (parent_cfid->dentry == dentry->d_parent) { + cifs_dbg(FYI, "found a parent cached file handle\n"); + if (parent_cfid->has_lease && parent_cfid->time) { + lease_flags + |= SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE; + memcpy(pfid->parent_lease_key, + parent_cfid->fid.lease_key, + SMB2_LEASE_KEY_SIZE); + } + break; + } + } + spin_unlock(&cfids->cfid_list_lock); + } } cfid->dentry = dentry; + cfid->tcon = tcon; /* * We do not hold the lock for the open because in case @@ -223,7 +258,6 @@ replay_again: if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; - pfid = &cfid->fid; server->ops->new_lease_key(pfid); memset(rqst, 0, sizeof(rqst)); @@ -243,6 +277,7 @@ replay_again: FILE_READ_EA, .disposition = FILE_OPEN, .fid = pfid, + .lease_flags = lease_flags, .replay = !!(retries), }; @@ -267,15 +302,6 @@ replay_again: smb2_set_related(&rqst[1]); - /* - * Set @cfid->has_lease to true before sending out compounded request so - * its lease reference can be put in cached_dir_lease_break() due to a - * potential lease break right after the request is sent or while @cfid - * is still being cached. Concurrent processes won't be to use it yet - * due to @cfid->time being zero. - */ - cfid->has_lease = true; - if (retries) { smb2_set_replay(server, &rqst[0]); smb2_set_replay(server, &rqst[1]); @@ -292,7 +318,6 @@ replay_again: } goto oshr_free; } - cfid->tcon = tcon; cfid->is_open = true; spin_lock(&cfids->cfid_list_lock); @@ -347,6 +372,7 @@ oshr_free: SMB2_query_info_free(&rqst[1]); free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); +out: if (rc) { spin_lock(&cfids->cfid_list_lock); if (cfid->on_list) { @@ -358,23 +384,14 @@ oshr_free: /* * We are guaranteed to have two references at this * point. One for the caller and one for a potential - * lease. Release the Lease-ref so that the directory - * will be closed when the caller closes the cached - * handle. + * lease. Release one here, and the second below. */ cfid->has_lease = false; - spin_unlock(&cfids->cfid_list_lock); kref_put(&cfid->refcount, smb2_close_cached_fid); - goto out; } spin_unlock(&cfids->cfid_list_lock); - } -out: - if (rc) { - if (cfid->is_open) - SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid, - cfid->fid.volatile_fid); - free_cached_dir(cfid); + + kref_put(&cfid->refcount, smb2_close_cached_fid); } else { *ret_cfid = cfid; atomic_inc(&tcon->num_remote_opens); @@ -396,12 +413,12 @@ int open_cached_dir_by_dentry(struct cifs_tcon *tcon, struct cached_fids *cfids = tcon->cfids; if (cfids == NULL) - return -ENOENT; + return -EOPNOTSUPP; spin_lock(&cfids->cfid_list_lock); list_for_each_entry(cfid, &cfids->entries, entry) { if (dentry && cfid->dentry == dentry) { - cifs_dbg(FYI, "found a cached root file handle by dentry\n"); + cifs_dbg(FYI, "found a cached file handle by dentry\n"); kref_get(&cfid->refcount); *ret_cfid = cfid; spin_unlock(&cfids->cfid_list_lock); @@ -477,7 +494,10 @@ void close_all_cached_dirs(struct cifs_sb_info *cifs_sb) struct cifs_tcon *tcon; struct tcon_link *tlink; struct cached_fids *cfids; + struct cached_dir_dentry *tmp_list, *q; + LIST_HEAD(entry); + spin_lock(&cifs_sb->tlink_tree_lock); for (node = rb_first(root); node; node = rb_next(node)) { tlink = rb_entry(node, struct tcon_link, tl_rbnode); tcon = tlink_tcon(tlink); @@ -486,11 +506,40 @@ void close_all_cached_dirs(struct cifs_sb_info *cifs_sb) cfids = tcon->cfids; if (cfids == NULL) continue; + spin_lock(&cfids->cfid_list_lock); list_for_each_entry(cfid, &cfids->entries, entry) { - dput(cfid->dentry); + tmp_list = kmalloc(sizeof(*tmp_list), GFP_ATOMIC); + if (tmp_list == NULL) { + /* + * If the malloc() fails, we won't drop all + * dentries, and unmounting is likely to trigger + * a 'Dentry still in use' error. + */ + cifs_tcon_dbg(VFS, "Out of memory while dropping dentries\n"); + spin_unlock(&cfids->cfid_list_lock); + spin_unlock(&cifs_sb->tlink_tree_lock); + goto done; + } + spin_lock(&cfid->fid_lock); + tmp_list->dentry = cfid->dentry; cfid->dentry = NULL; + spin_unlock(&cfid->fid_lock); + + list_add_tail(&tmp_list->entry, &entry); } + spin_unlock(&cfids->cfid_list_lock); + } + spin_unlock(&cifs_sb->tlink_tree_lock); + +done: + list_for_each_entry_safe(tmp_list, q, &entry, entry) { + list_del(&tmp_list->entry); + dput(tmp_list->dentry); + kfree(tmp_list); } + + /* Flush any pending work that will drop dentries */ + flush_workqueue(cfid_put_wq); } /* @@ -501,50 +550,71 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon) { struct cached_fids *cfids = tcon->cfids; struct cached_fid *cfid, *q; - LIST_HEAD(entry); if (cfids == NULL) return; + /* + * Mark all the cfids as closed, and move them to the cfids->dying list. + * They'll be cleaned up later by cfids_invalidation_worker. Take + * a reference to each cfid during this process. + */ spin_lock(&cfids->cfid_list_lock); list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { - list_move(&cfid->entry, &entry); + list_move(&cfid->entry, &cfids->dying); cfids->num_entries--; cfid->is_open = false; cfid->on_list = false; - /* To prevent race with smb2_cached_lease_break() */ - kref_get(&cfid->refcount); - } - spin_unlock(&cfids->cfid_list_lock); - - list_for_each_entry_safe(cfid, q, &entry, entry) { - list_del(&cfid->entry); - cancel_work_sync(&cfid->lease_break); if (cfid->has_lease) { /* - * We lease was never cancelled from the server so we - * need to drop the reference. + * The lease was never cancelled from the server, + * so steal that reference. */ - spin_lock(&cfids->cfid_list_lock); cfid->has_lease = false; - spin_unlock(&cfids->cfid_list_lock); - kref_put(&cfid->refcount, smb2_close_cached_fid); - } - /* Drop the extra reference opened above*/ - kref_put(&cfid->refcount, smb2_close_cached_fid); + } else + kref_get(&cfid->refcount); } + /* + * Queue dropping of the dentries once locks have been dropped + */ + if (!list_empty(&cfids->dying)) + queue_work(cfid_put_wq, &cfids->invalidation_work); + spin_unlock(&cfids->cfid_list_lock); } static void -smb2_cached_lease_break(struct work_struct *work) +cached_dir_offload_close(struct work_struct *work) { struct cached_fid *cfid = container_of(work, - struct cached_fid, lease_break); + struct cached_fid, close_work); + struct cifs_tcon *tcon = cfid->tcon; + + WARN_ON(cfid->on_list); - spin_lock(&cfid->cfids->cfid_list_lock); - cfid->has_lease = false; - spin_unlock(&cfid->cfids->cfid_list_lock); kref_put(&cfid->refcount, smb2_close_cached_fid); + cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close); +} + +/* + * Release the cached directory's dentry, and then queue work to drop cached + * directory itself (closing on server if needed). + * + * Must be called with a reference to the cached_fid and a reference to the + * tcon. + */ +static void cached_dir_put_work(struct work_struct *work) +{ + struct cached_fid *cfid = container_of(work, struct cached_fid, + put_work); + struct dentry *dentry; + + spin_lock(&cfid->fid_lock); + dentry = cfid->dentry; + cfid->dentry = NULL; + spin_unlock(&cfid->fid_lock); + + dput(dentry); + queue_work(serverclose_wq, &cfid->close_work); } int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]) @@ -561,6 +631,7 @@ int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]) !memcmp(lease_key, cfid->fid.lease_key, SMB2_LEASE_KEY_SIZE)) { + cfid->has_lease = false; cfid->time = 0; /* * We found a lease remove it from the list @@ -570,8 +641,10 @@ int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]) cfid->on_list = false; cfids->num_entries--; - queue_work(cifsiod_wq, - &cfid->lease_break); + ++tcon->tc_count; + trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, + netfs_trace_tcon_ref_get_cached_lease_break); + queue_work(cfid_put_wq, &cfid->put_work); spin_unlock(&cfids->cfid_list_lock); return true; } @@ -593,7 +666,8 @@ static struct cached_fid *init_cached_dir(const char *path) return NULL; } - INIT_WORK(&cfid->lease_break, smb2_cached_lease_break); + INIT_WORK(&cfid->close_work, cached_dir_offload_close); + INIT_WORK(&cfid->put_work, cached_dir_put_work); INIT_LIST_HEAD(&cfid->entry); INIT_LIST_HEAD(&cfid->dirents.entries); mutex_init(&cfid->dirents.de_mutex); @@ -606,6 +680,9 @@ static void free_cached_dir(struct cached_fid *cfid) { struct cached_dirent *dirent, *q; + WARN_ON(work_pending(&cfid->close_work)); + WARN_ON(work_pending(&cfid->put_work)); + dput(cfid->dentry); cfid->dentry = NULL; @@ -623,10 +700,30 @@ static void free_cached_dir(struct cached_fid *cfid) kfree(cfid); } +static void cfids_invalidation_worker(struct work_struct *work) +{ + struct cached_fids *cfids = container_of(work, struct cached_fids, + invalidation_work); + struct cached_fid *cfid, *q; + LIST_HEAD(entry); + + spin_lock(&cfids->cfid_list_lock); + /* move cfids->dying to the local list */ + list_cut_before(&entry, &cfids->dying, &cfids->dying); + spin_unlock(&cfids->cfid_list_lock); + + list_for_each_entry_safe(cfid, q, &entry, entry) { + list_del(&cfid->entry); + /* Drop the ref-count acquired in invalidate_all_cached_dirs */ + kref_put(&cfid->refcount, smb2_close_cached_fid); + } +} + static void cfids_laundromat_worker(struct work_struct *work) { struct cached_fids *cfids; struct cached_fid *cfid, *q; + struct dentry *dentry; LIST_HEAD(entry); cfids = container_of(work, struct cached_fids, laundromat_work.work); @@ -638,33 +735,42 @@ static void cfids_laundromat_worker(struct work_struct *work) cfid->on_list = false; list_move(&cfid->entry, &entry); cfids->num_entries--; - /* To prevent race with smb2_cached_lease_break() */ - kref_get(&cfid->refcount); + if (cfid->has_lease) { + /* + * Our lease has not yet been cancelled from the + * server. Steal that reference. + */ + cfid->has_lease = false; + } else + kref_get(&cfid->refcount); } } spin_unlock(&cfids->cfid_list_lock); list_for_each_entry_safe(cfid, q, &entry, entry) { list_del(&cfid->entry); - /* - * Cancel and wait for the work to finish in case we are racing - * with it. - */ - cancel_work_sync(&cfid->lease_break); - if (cfid->has_lease) { + + spin_lock(&cfid->fid_lock); + dentry = cfid->dentry; + cfid->dentry = NULL; + spin_unlock(&cfid->fid_lock); + + dput(dentry); + if (cfid->is_open) { + spin_lock(&cifs_tcp_ses_lock); + ++cfid->tcon->tc_count; + trace_smb3_tcon_ref(cfid->tcon->debug_id, cfid->tcon->tc_count, + netfs_trace_tcon_ref_get_cached_laundromat); + spin_unlock(&cifs_tcp_ses_lock); + queue_work(serverclose_wq, &cfid->close_work); + } else /* - * Our lease has not yet been cancelled from the server - * so we need to drop the reference. + * Drop the ref-count from above, either the lease-ref (if there + * was one) or the extra one acquired. */ - spin_lock(&cfids->cfid_list_lock); - cfid->has_lease = false; - spin_unlock(&cfids->cfid_list_lock); kref_put(&cfid->refcount, smb2_close_cached_fid); - } - /* Drop the extra reference opened above */ - kref_put(&cfid->refcount, smb2_close_cached_fid); } - queue_delayed_work(cifsiod_wq, &cfids->laundromat_work, + queue_delayed_work(cfid_put_wq, &cfids->laundromat_work, dir_cache_timeout * HZ); } @@ -677,9 +783,11 @@ struct cached_fids *init_cached_dirs(void) return NULL; spin_lock_init(&cfids->cfid_list_lock); INIT_LIST_HEAD(&cfids->entries); + INIT_LIST_HEAD(&cfids->dying); + INIT_WORK(&cfids->invalidation_work, cfids_invalidation_worker); INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker); - queue_delayed_work(cifsiod_wq, &cfids->laundromat_work, + queue_delayed_work(cfid_put_wq, &cfids->laundromat_work, dir_cache_timeout * HZ); return cfids; @@ -698,6 +806,7 @@ void free_cached_dirs(struct cached_fids *cfids) return; cancel_delayed_work_sync(&cfids->laundromat_work); + cancel_work_sync(&cfids->invalidation_work); spin_lock(&cfids->cfid_list_lock); list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { @@ -705,6 +814,11 @@ void free_cached_dirs(struct cached_fids *cfids) cfid->is_open = false; list_move(&cfid->entry, &entry); } + list_for_each_entry_safe(cfid, q, &cfids->dying, entry) { + cfid->on_list = false; + cfid->is_open = false; + list_move(&cfid->entry, &entry); + } spin_unlock(&cfids->cfid_list_lock); list_for_each_entry_safe(cfid, q, &entry, entry) { |