diff options
Diffstat (limited to 'fs/nfsd/nfs4proc.c')
-rw-r--r-- | fs/nfsd/nfs4proc.c | 333 |
1 files changed, 259 insertions, 74 deletions
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 2927b1263f08..f13abbb13b38 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -57,6 +57,8 @@ module_param(inter_copy_offload_enable, bool, 0644); MODULE_PARM_DESC(inter_copy_offload_enable, "Enable inter server to server copy offload. Default: false"); +static void cleanup_async_copy(struct nfsd4_copy *copy); + #ifdef CONFIG_NFSD_V4_2_INTER_SSC static int nfsd4_ssc_umount_timeout = 900000; /* default to 15 mins */ module_param(nfsd4_ssc_umount_timeout, int, 0644); @@ -158,7 +160,7 @@ do_open_permission(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfs return fh_verify(rqstp, current_fh, S_IFREG, accmode); } -static __be32 nfsd_check_obj_isreg(struct svc_fh *fh) +static __be32 nfsd_check_obj_isreg(struct svc_fh *fh, u32 minor_version) { umode_t mode = d_inode(fh->fh_dentry)->i_mode; @@ -166,14 +168,15 @@ static __be32 nfsd_check_obj_isreg(struct svc_fh *fh) return nfs_ok; if (S_ISDIR(mode)) return nfserr_isdir; - /* - * Using err_symlink as our catch-all case may look odd; but - * there's no other obvious error for this case in 4.0, and we - * happen to know that it will cause the linux v4 client to do - * the right thing on attempts to open something other than a - * regular file. - */ - return nfserr_symlink; + if (S_ISLNK(mode)) + return nfserr_symlink; + + /* RFC 7530 - 16.16.6 */ + if (minor_version == 0) + return nfserr_symlink; + else + return nfserr_wrong_type; + } static void nfsd4_set_open_owner_reply_cache(struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct svc_fh *resfh) @@ -263,7 +266,9 @@ nfsd4_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp, inode_lock_nested(inode, I_MUTEX_PARENT); - child = lookup_one_len(open->op_fname, parent, open->op_fnamelen); + child = lookup_one(&nop_mnt_idmap, + &QSTR_LEN(open->op_fname, open->op_fnamelen), + parent); if (IS_ERR(child)) { status = nfserrno(PTR_ERR(child)); goto out; @@ -466,7 +471,7 @@ do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stru } if (status) goto out; - status = nfsd_check_obj_isreg(*resfh); + status = nfsd_check_obj_isreg(*resfh, cstate->minorversion); if (status) goto out; @@ -751,15 +756,6 @@ nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, &access->ac_supported); } -static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net) -{ - __be32 *verf = (__be32 *)verifier->data; - - BUILD_BUG_ON(2*sizeof(*verf) != sizeof(verifier->data)); - - nfsd_copy_write_verifier(verf, net_generic(net, nfsd_net_id)); -} - static __be32 nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) @@ -882,6 +878,8 @@ nfsd4_getattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_getattr *getattr = &u->getattr; __be32 status; + trace_nfsd_vfs_getattr(rqstp, &cstate->current_fh); + status = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_NOP); if (status) return status; @@ -1004,6 +1002,9 @@ nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, u64 cookie = readdir->rd_cookie; static const nfs4_verifier zeroverf; + trace_nfsd_vfs_readdir(rqstp, &cstate->current_fh, + readdir->rd_maxcount, readdir->rd_cookie); + /* no need to check permission - this will be done in nfsd_readdir() */ if (readdir->rd_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1) @@ -1141,18 +1142,43 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, .na_iattr = &setattr->sa_iattr, .na_seclabel = &setattr->sa_label, }; + bool save_no_wcc, deleg_attrs; + struct nfs4_stid *st = NULL; struct inode *inode; __be32 status = nfs_ok; - bool save_no_wcc; int err; - if (setattr->sa_iattr.ia_valid & ATTR_SIZE) { + deleg_attrs = setattr->sa_bmval[2] & (FATTR4_WORD2_TIME_DELEG_ACCESS | + FATTR4_WORD2_TIME_DELEG_MODIFY); + + if (deleg_attrs || (setattr->sa_iattr.ia_valid & ATTR_SIZE)) { + int flags = WR_STATE; + + if (setattr->sa_bmval[2] & FATTR4_WORD2_TIME_DELEG_ACCESS) + flags |= RD_STATE; + status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh, &setattr->sa_stateid, - WR_STATE, NULL, NULL); + flags, NULL, &st); if (status) return status; } + + if (deleg_attrs) { + status = nfserr_bad_stateid; + if (st->sc_type & SC_TYPE_DELEG) { + struct nfs4_delegation *dp = delegstateid(st); + + /* Only for *_ATTRS_DELEG flavors */ + if (deleg_attrs_deleg(dp->dl_type)) + status = nfs_ok; + } + } + if (st) + nfs4_put_stid(st); + if (status) + return status; + err = fh_want_write(&cstate->current_fh); if (err) return nfserrno(err); @@ -1192,7 +1218,6 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd_file *nf = NULL; __be32 status = nfs_ok; unsigned long cnt; - int nvecs; if (write->wr_offset > (u64)OFFSET_MAX || write->wr_offset + write->wr_buflen > (u64)OFFSET_MAX) @@ -1207,13 +1232,9 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, return status; write->wr_how_written = write->wr_stable_how; - - nvecs = svc_fill_write_vector(rqstp, &write->wr_payload); - WARN_ON_ONCE(nvecs > ARRAY_SIZE(rqstp->rq_vec)); - status = nfsd_vfs_write(rqstp, &cstate->current_fh, nf, - write->wr_offset, rqstp->rq_vec, nvecs, &cnt, - write->wr_how_written, + write->wr_offset, &write->wr_payload, + &cnt, write->wr_how_written, (__be32 *)write->wr_verifier.data); nfsd_file_put(nf); @@ -1284,6 +1305,71 @@ out: return status; } +/** + * nfsd4_has_active_async_copies - Check for ongoing copy operations + * @clp: Client to be checked + * + * NFSD maintains state for async COPY operations after they complete, + * and this state remains in the nfs4_client's async_copies list. + * Ongoing copies should block the destruction of the nfs4_client, but + * completed copies should not. + * + * Return values: + * %true: At least one active async COPY is ongoing + * %false: No active async COPY operations were found + */ +bool nfsd4_has_active_async_copies(struct nfs4_client *clp) +{ + struct nfsd4_copy *copy; + bool result = false; + + spin_lock(&clp->async_lock); + list_for_each_entry(copy, &clp->async_copies, copies) { + if (!test_bit(NFSD4_COPY_F_COMPLETED, ©->cp_flags) && + !test_bit(NFSD4_COPY_F_STOPPED, ©->cp_flags)) { + result = true; + break; + } + } + spin_unlock(&clp->async_lock); + return result; +} + +/** + * nfsd4_async_copy_reaper - Purge completed copies + * @nn: Network namespace with possible active copy information + */ +void nfsd4_async_copy_reaper(struct nfsd_net *nn) +{ + struct nfs4_client *clp; + struct nfsd4_copy *copy; + LIST_HEAD(reaplist); + + spin_lock(&nn->client_lock); + list_for_each_entry(clp, &nn->client_lru, cl_lru) { + struct list_head *pos, *next; + + spin_lock(&clp->async_lock); + list_for_each_safe(pos, next, &clp->async_copies) { + copy = list_entry(pos, struct nfsd4_copy, copies); + if (test_bit(NFSD4_COPY_F_OFFLOAD_DONE, ©->cp_flags)) { + if (--copy->cp_ttl) { + list_del_init(©->copies); + list_add(©->copies, &reaplist); + } + } + } + spin_unlock(&clp->async_lock); + } + spin_unlock(&nn->client_lock); + + while (!list_empty(&reaplist)) { + copy = list_first_entry(&reaplist, struct nfsd4_copy, copies); + list_del_init(©->copies); + cleanup_async_copy(copy); + } +} + static void nfs4_put_copy(struct nfsd4_copy *copy) { if (!refcount_dec_and_test(©->refcount)) @@ -1294,12 +1380,16 @@ static void nfs4_put_copy(struct nfsd4_copy *copy) static void nfsd4_stop_copy(struct nfsd4_copy *copy) { - if (!test_and_set_bit(NFSD4_COPY_F_STOPPED, ©->cp_flags)) + trace_nfsd_copy_async_cancel(copy); + if (!test_and_set_bit(NFSD4_COPY_F_STOPPED, ©->cp_flags)) { kthread_stop(copy->copy_task); + copy->nfserr = nfs_ok; + set_bit(NFSD4_COPY_F_COMPLETED, ©->cp_flags); + } nfs4_put_copy(copy); } -static struct nfsd4_copy *nfsd4_get_copy(struct nfs4_client *clp) +static struct nfsd4_copy *nfsd4_unhash_copy(struct nfs4_client *clp) { struct nfsd4_copy *copy = NULL; @@ -1308,6 +1398,9 @@ static struct nfsd4_copy *nfsd4_get_copy(struct nfs4_client *clp) copy = list_first_entry(&clp->async_copies, struct nfsd4_copy, copies); refcount_inc(©->refcount); + copy->cp_clp = NULL; + if (!list_empty(©->copies)) + list_del_init(©->copies); } spin_unlock(&clp->async_lock); return copy; @@ -1317,7 +1410,7 @@ void nfsd4_shutdown_copy(struct nfs4_client *clp) { struct nfsd4_copy *copy; - while ((copy = nfsd4_get_copy(clp)) != NULL) + while ((copy = nfsd4_unhash_copy(clp)) != NULL) nfsd4_stop_copy(copy); } #ifdef CONFIG_NFSD_V4_2_INTER_SSC @@ -1605,8 +1698,10 @@ static void nfsd4_cb_offload_release(struct nfsd4_callback *cb) { struct nfsd4_cb_offload *cbo = container_of(cb, struct nfsd4_cb_offload, co_cb); + struct nfsd4_copy *copy = + container_of(cbo, struct nfsd4_copy, cp_cb_offload); - kfree(cbo); + set_bit(NFSD4_COPY_F_OFFLOAD_DONE, ©->cp_flags); } static int nfsd4_cb_offload_done(struct nfsd4_callback *cb, @@ -1616,12 +1711,21 @@ static int nfsd4_cb_offload_done(struct nfsd4_callback *cb, container_of(cb, struct nfsd4_cb_offload, co_cb); trace_nfsd_cb_offload_done(&cbo->co_res.cb_stateid, task); + switch (task->tk_status) { + case -NFS4ERR_DELAY: + if (cbo->co_retries--) { + rpc_delay(task, HZ / 5); + return 0; + } + } + nfsd41_cb_destroy_referring_call_list(cb); return 1; } static const struct nfsd4_callback_ops nfsd4_cb_offload_ops = { .release = nfsd4_cb_offload_release, - .done = nfsd4_cb_offload_done + .done = nfsd4_cb_offload_done, + .opcode = OP_CB_OFFLOAD, }; static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync) @@ -1630,7 +1734,6 @@ static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync) test_bit(NFSD4_COPY_F_COMMITTED, ©->cp_flags) ? NFS_FILE_SYNC : NFS_UNSTABLE; nfsd4_copy_set_sync(copy, sync); - gen_boot_verifier(©->cp_res.wr_verifier, copy->cp_clp->net); } static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy, @@ -1737,23 +1840,23 @@ static void cleanup_async_copy(struct nfsd4_copy *copy) nfs4_put_copy(copy); } -static void nfsd4_send_cb_offload(struct nfsd4_copy *copy, __be32 nfserr) +static void nfsd4_send_cb_offload(struct nfsd4_copy *copy) { - struct nfsd4_cb_offload *cbo; - - cbo = kzalloc(sizeof(*cbo), GFP_KERNEL); - if (!cbo) - return; + struct nfsd4_cb_offload *cbo = ©->cp_cb_offload; memcpy(&cbo->co_res, ©->cp_res, sizeof(copy->cp_res)); memcpy(&cbo->co_fh, ©->fh, sizeof(copy->fh)); - cbo->co_nfserr = nfserr; + cbo->co_nfserr = copy->nfserr; + cbo->co_retries = 5; nfsd4_init_cb(&cbo->co_cb, copy->cp_clp, &nfsd4_cb_offload_ops, NFSPROC4_CLNT_CB_OFFLOAD); + nfsd41_cb_referring_call(&cbo->co_cb, &cbo->co_referring_sessionid, + cbo->co_referring_slotid, + cbo->co_referring_seqno); trace_nfsd_cb_offload(copy->cp_clp, &cbo->co_res.cb_stateid, - &cbo->co_fh, copy->cp_count, nfserr); - nfsd4_run_cb(&cbo->co_cb); + &cbo->co_fh, copy->cp_count, copy->nfserr); + nfsd4_try_run_cb(&cbo->co_cb); } /** @@ -1766,9 +1869,8 @@ static void nfsd4_send_cb_offload(struct nfsd4_copy *copy, __be32 nfserr) static int nfsd4_do_async_copy(void *data) { struct nfsd4_copy *copy = (struct nfsd4_copy *)data; - __be32 nfserr; - trace_nfsd_copy_do_async(copy); + trace_nfsd_copy_async(copy); if (nfsd4_ssc_is_inter(copy)) { struct file *filp; @@ -1777,25 +1879,31 @@ static int nfsd4_do_async_copy(void *data) if (IS_ERR(filp)) { switch (PTR_ERR(filp)) { case -EBADF: - nfserr = nfserr_wrong_type; + copy->nfserr = nfserr_wrong_type; break; default: - nfserr = nfserr_offload_denied; + copy->nfserr = nfserr_offload_denied; } /* ss_mnt will be unmounted by the laundromat */ goto do_callback; } - nfserr = nfsd4_do_copy(copy, filp, copy->nf_dst->nf_file, - false); + copy->nfserr = nfsd4_do_copy(copy, filp, copy->nf_dst->nf_file, + false); nfsd4_cleanup_inter_ssc(copy->ss_nsui, filp, copy->nf_dst); } else { - nfserr = nfsd4_do_copy(copy, copy->nf_src->nf_file, - copy->nf_dst->nf_file, false); + copy->nfserr = nfsd4_do_copy(copy, copy->nf_src->nf_file, + copy->nf_dst->nf_file, false); } do_callback: - nfsd4_send_cb_offload(copy, nfserr); - cleanup_async_copy(copy); + /* The kthread exits forthwith. Ensure that a subsequent + * OFFLOAD_CANCEL won't try to kill it again. */ + set_bit(NFSD4_COPY_F_STOPPED, ©->cp_flags); + + set_bit(NFSD4_COPY_F_COMPLETED, ©->cp_flags); + trace_nfsd_copy_async_done(copy); + nfsd4_send_cb_offload(copy); + atomic_dec(©->cp_nn->pending_async_copies); return 0; } @@ -1803,9 +1911,21 @@ static __be32 nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { + struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); + struct nfsd4_copy *async_copy = NULL; struct nfsd4_copy *copy = &u->copy; + struct nfsd42_write_res *result; __be32 status; - struct nfsd4_copy *async_copy = NULL; + + /* + * Currently, async COPY is not reliable. Force all COPY + * requests to be synchronous to avoid client application + * hangs waiting for COPY completion. + */ + nfsd4_copy_set_sync(copy, true); + + result = ©->cp_res; + nfsd_copy_write_verifier((__be32 *)&result->wr_verifier.data, nn); copy->cp_clp = cstate->clp; if (nfsd4_ssc_is_inter(copy)) { @@ -1831,26 +1951,34 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, memcpy(©->fh, &cstate->current_fh.fh_handle, sizeof(struct knfsd_fh)); if (nfsd4_copy_is_async(copy)) { - struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); - - status = nfserrno(-ENOMEM); async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL); if (!async_copy) goto out_err; + async_copy->cp_nn = nn; INIT_LIST_HEAD(&async_copy->copies); refcount_set(&async_copy->refcount, 1); + async_copy->cp_ttl = NFSD_COPY_INITIAL_TTL; + /* Arbitrary cap on number of pending async copy operations */ + if (atomic_inc_return(&nn->pending_async_copies) > + (int)rqstp->rq_pool->sp_nrthreads) + goto out_dec_async_copy_err; async_copy->cp_src = kmalloc(sizeof(*async_copy->cp_src), GFP_KERNEL); if (!async_copy->cp_src) - goto out_err; + goto out_dec_async_copy_err; if (!nfs4_init_copy_state(nn, copy)) - goto out_err; - memcpy(©->cp_res.cb_stateid, ©->cp_stateid.cs_stid, - sizeof(copy->cp_res.cb_stateid)); + goto out_dec_async_copy_err; + memcpy(&result->cb_stateid, ©->cp_stateid.cs_stid, + sizeof(result->cb_stateid)); dup_copy_fields(copy, async_copy); + memcpy(async_copy->cp_cb_offload.co_referring_sessionid.data, + cstate->session->se_sessionid.data, + NFS4_MAX_SESSIONID_LEN); + async_copy->cp_cb_offload.co_referring_slotid = cstate->slot->sl_index; + async_copy->cp_cb_offload.co_referring_seqno = cstate->slot->sl_seqid; async_copy->copy_task = kthread_create(nfsd4_do_async_copy, async_copy, "%s", "copy thread"); if (IS_ERR(async_copy->copy_task)) - goto out_err; + goto out_dec_async_copy_err; spin_lock(&async_copy->cp_clp->async_lock); list_add(&async_copy->copies, &async_copy->cp_clp->async_copies); @@ -1865,6 +1993,9 @@ out: trace_nfsd_copy_done(copy, status); release_copy_files(copy); return status; +out_dec_async_copy_err: + if (async_copy) + atomic_dec(&nn->pending_async_copies); out_err: if (nfsd4_ssc_is_inter(copy)) { /* @@ -1876,7 +2007,7 @@ out_err: } if (async_copy) cleanup_async_copy(async_copy); - status = nfserrno(-ENOMEM); + status = nfserr_jukebox; goto out; } @@ -1935,7 +2066,7 @@ nfsd4_copy_notify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_copy_notify *cn = &u->copy_notify; __be32 status; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); - struct nfs4_stid *stid; + struct nfs4_stid *stid = NULL; struct nfs4_cpntf_state *cps; struct nfs4_client *clp = cstate->clp; @@ -1944,6 +2075,8 @@ nfsd4_copy_notify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, &stid); if (status) return status; + if (!stid) + return nfserr_bad_stateid; cn->cpn_lease_time.tv_sec = nn->nfsd4_lease; cn->cpn_lease_time.tv_nsec = 0; @@ -2003,11 +2136,16 @@ nfsd4_offload_status(struct svc_rqst *rqstp, struct nfsd4_copy *copy; struct nfs4_client *clp = cstate->clp; + os->completed = false; spin_lock(&clp->async_lock); copy = find_async_copy_locked(clp, &os->stateid); - if (copy) + if (copy) { os->count = copy->cp_res.wr_bytes_written; - else + if (test_bit(NFSD4_COPY_F_COMPLETED, ©->cp_flags)) { + os->completed = true; + os->status = copy->nfserr; + } + } else status = nfserr_bad_stateid; spin_unlock(&clp->async_lock); @@ -2154,6 +2292,29 @@ nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, return status == nfserr_same ? nfs_ok : status; } +static __be32 +nfsd4_get_dir_delegation(struct svc_rqst *rqstp, + struct nfsd4_compound_state *cstate, + union nfsd4_op_u *u) +{ + struct nfsd4_get_dir_delegation *gdd = &u->get_dir_delegation; + + /* + * RFC 8881, section 18.39.3 says: + * + * "The server may refuse to grant the delegation. In that case, the + * server will return NFS4ERR_DIRDELEG_UNAVAIL." + * + * This is sub-optimal, since it means that the server would need to + * abort compound processing just because the delegation wasn't + * available. RFC8881bis should change this to allow the server to + * return NFS4_OK with a non-fatal status of GDD4_UNAVAIL in this + * situation. + */ + gdd->gddrnf_status = GDD4_UNAVAIL; + return nfs_ok; +} + #ifdef CONFIG_NFSD_PNFS static const struct nfsd4_layout_ops * nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type) @@ -2196,7 +2357,9 @@ nfsd4_getdeviceinfo(struct svc_rqst *rqstp, return nfserr_noent; } - exp = rqst_exp_find(rqstp, map->fsid_type, map->fsid); + exp = rqst_exp_find(&rqstp->rq_chandle, SVC_NET(rqstp), + rqstp->rq_client, rqstp->rq_gssclient, + map->fsid_type, map->fsid); if (IS_ERR(exp)) { dprintk("%s: could not find device id\n", __func__); return nfserr_noent; @@ -2234,7 +2397,7 @@ nfsd4_layoutget(struct svc_rqst *rqstp, const struct nfsd4_layout_ops *ops; struct nfs4_layout_stateid *ls; __be32 nfserr; - int accmode = NFSD_MAY_READ_IF_EXEC; + int accmode = NFSD_MAY_READ_IF_EXEC | NFSD_MAY_OWNER_OVERRIDE; switch (lgp->lg_seg.iomode) { case IOMODE_READ: @@ -2324,7 +2487,8 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp, struct nfs4_layout_stateid *ls; __be32 nfserr; - nfserr = fh_verify(rqstp, current_fh, 0, NFSD_MAY_WRITE); + nfserr = fh_verify(rqstp, current_fh, 0, + NFSD_MAY_WRITE | NFSD_MAY_OWNER_OVERRIDE); if (nfserr) goto out; @@ -2739,6 +2903,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp) if (op->opdesc->op_get_currentstateid) op->opdesc->op_get_currentstateid(cstate, &op->u); op->status = op->opdesc->op_func(rqstp, cstate, &op->u); + trace_nfsd_compound_op_err(rqstp, op->opnum, op->status); /* Only from SEQUENCE */ if (cstate->status == nfserr_replay_cache) { @@ -2755,7 +2920,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp) if (current_fh->fh_export && need_wrongsec_check(rqstp)) - op->status = check_nfsd_access(current_fh->fh_export, rqstp); + op->status = check_nfsd_access(current_fh->fh_export, rqstp, false); } encode_op: if (op->status == nfserr_replay_me) { @@ -3082,6 +3247,18 @@ static u32 nfsd4_copy_notify_rsize(const struct svc_rqst *rqstp, * sizeof(__be32); } +static u32 nfsd4_get_dir_delegation_rsize(const struct svc_rqst *rqstp, + const struct nfsd4_op *op) +{ + return (op_encode_hdr_size + + 1 /* gddr_status */ + + op_encode_verifier_maxsz + + op_encode_stateid_maxsz + + 2 /* gddr_notification */ + + 2 /* gddr_child_attributes */ + + 2 /* gddr_dir_attributes */); +} + #ifdef CONFIG_NFSD_PNFS static u32 nfsd4_getdeviceinfo_rsize(const struct svc_rqst *rqstp, const struct nfsd4_op *op) @@ -3399,6 +3576,7 @@ static const struct nfsd4_operation nfsd4_ops[] = { /* NFSv4.1 operations */ [OP_EXCHANGE_ID] = { .op_func = nfsd4_exchange_id, + .op_release = nfsd4_exchange_id_release, .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP | OP_MODIFIES_SOMETHING, .op_name = "OP_EXCHANGE_ID", @@ -3470,6 +3648,12 @@ static const struct nfsd4_operation nfsd4_ops[] = { .op_get_currentstateid = nfsd4_get_freestateid, .op_rsize_bop = nfsd4_only_status_rsize, }, + [OP_GET_DIR_DELEGATION] = { + .op_func = nfsd4_get_dir_delegation, + .op_flags = OP_MODIFIES_SOMETHING, + .op_name = "OP_GET_DIR_DELEGATION", + .op_rsize_bop = nfsd4_get_dir_delegation_rsize, + }, #ifdef CONFIG_NFSD_PNFS [OP_GETDEVICEINFO] = { .op_func = nfsd4_getdeviceinfo, @@ -3596,7 +3780,8 @@ bool nfsd4_spo_must_allow(struct svc_rqst *rqstp) struct nfs4_op_map *allow = &cstate->clp->cl_spo_must_allow; u32 opiter; - if (!cstate->minorversion) + if (rqstp->rq_procinfo != &nfsd_version4.vs_proc[NFSPROC4_COMPOUND] || + cstate->minorversion == 0) return false; if (cstate->spo_must_allowed) @@ -3662,7 +3847,7 @@ static const struct svc_procedure nfsd_procedures4[2] = { .pc_ressize = sizeof(struct nfsd4_compoundres), .pc_release = nfsd4_release_compoundargs, .pc_cachetype = RC_NOCACHE, - .pc_xdrressize = NFSD_BUFSIZE/4, + .pc_xdrressize = 3+NFSSVC_MAXBLKSIZE/4, .pc_name = "COMPOUND", }, }; |