diff options
Diffstat (limited to 'drivers/target/target_core_transport.c')
| -rw-r--r-- | drivers/target/target_core_transport.c | 3364 |
1 files changed, 2093 insertions, 1271 deletions
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 7172d005d063..e8b7955d40f2 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1,26 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /******************************************************************************* * Filename: target_core_transport.c * * This file contains the Generic Target Engine Core. * - * (c) Copyright 2002-2012 RisingTide Systems LLC. + * (c) Copyright 2002-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * ******************************************************************************/ #include <linux/net.h> @@ -28,24 +15,22 @@ #include <linux/string.h> #include <linux/timer.h> #include <linux/slab.h> -#include <linux/blkdev.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/in.h> #include <linux/cdrom.h> #include <linux/module.h> #include <linux/ratelimit.h> -#include <asm/unaligned.h> +#include <linux/vmalloc.h> +#include <linux/unaligned.h> #include <net/sock.h> #include <net/tcp.h> -#include <scsi/scsi.h> -#include <scsi/scsi_cmnd.h> -#include <scsi/scsi_tcq.h> +#include <scsi/scsi_proto.h> +#include <scsi/scsi_common.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> #include <target/target_core_fabric.h> -#include <target/target_core_configfs.h> #include "target_core_internal.h" #include "target_core_alua.h" @@ -56,19 +41,20 @@ #include <trace/events/target.h> static struct workqueue_struct *target_completion_wq; +static struct workqueue_struct *target_submission_wq; static struct kmem_cache *se_sess_cache; struct kmem_cache *se_ua_cache; struct kmem_cache *t10_pr_reg_cache; struct kmem_cache *t10_alua_lu_gp_cache; struct kmem_cache *t10_alua_lu_gp_mem_cache; struct kmem_cache *t10_alua_tg_pt_gp_cache; -struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; +struct kmem_cache *t10_alua_lba_map_cache; +struct kmem_cache *t10_alua_lba_map_mem_cache; static void transport_complete_task_attr(struct se_cmd *cmd); +static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason); static void transport_handle_queue_full(struct se_cmd *cmd, - struct se_device *dev); -static int transport_generic_get_mem(struct se_cmd *cmd); -static int transport_put_cmd(struct se_cmd *cmd); + struct se_device *dev, int err, bool write_pending); static void target_complete_ok_work(struct work_struct *work); int init_se_kmem_caches(void) @@ -120,26 +106,43 @@ int init_se_kmem_caches(void) "cache failed\n"); goto out_free_lu_gp_mem_cache; } - t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( - "t10_alua_tg_pt_gp_mem_cache", - sizeof(struct t10_alua_tg_pt_gp_member), - __alignof__(struct t10_alua_tg_pt_gp_member), - 0, NULL); - if (!t10_alua_tg_pt_gp_mem_cache) { - pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" - "mem_t failed\n"); + t10_alua_lba_map_cache = kmem_cache_create( + "t10_alua_lba_map_cache", + sizeof(struct t10_alua_lba_map), + __alignof__(struct t10_alua_lba_map), 0, NULL); + if (!t10_alua_lba_map_cache) { + pr_err("kmem_cache_create() for t10_alua_lba_map_" + "cache failed\n"); goto out_free_tg_pt_gp_cache; } + t10_alua_lba_map_mem_cache = kmem_cache_create( + "t10_alua_lba_map_mem_cache", + sizeof(struct t10_alua_lba_map_member), + __alignof__(struct t10_alua_lba_map_member), 0, NULL); + if (!t10_alua_lba_map_mem_cache) { + pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" + "cache failed\n"); + goto out_free_lba_map_cache; + } target_completion_wq = alloc_workqueue("target_completion", - WQ_MEM_RECLAIM, 0); + WQ_MEM_RECLAIM | WQ_PERCPU, 0); if (!target_completion_wq) - goto out_free_tg_pt_gp_mem_cache; + goto out_free_lba_map_mem_cache; + + target_submission_wq = alloc_workqueue("target_submission", + WQ_MEM_RECLAIM | WQ_PERCPU, 0); + if (!target_submission_wq) + goto out_free_completion_wq; return 0; -out_free_tg_pt_gp_mem_cache: - kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); +out_free_completion_wq: + destroy_workqueue(target_completion_wq); +out_free_lba_map_mem_cache: + kmem_cache_destroy(t10_alua_lba_map_mem_cache); +out_free_lba_map_cache: + kmem_cache_destroy(t10_alua_lba_map_cache); out_free_tg_pt_gp_cache: kmem_cache_destroy(t10_alua_tg_pt_gp_cache); out_free_lu_gp_mem_cache: @@ -158,6 +161,7 @@ out: void release_se_kmem_caches(void) { + destroy_workqueue(target_submission_wq); destroy_workqueue(target_completion_wq); kmem_cache_destroy(se_sess_cache); kmem_cache_destroy(se_ua_cache); @@ -165,7 +169,8 @@ void release_se_kmem_caches(void) kmem_cache_destroy(t10_alua_lu_gp_cache); kmem_cache_destroy(t10_alua_lu_gp_mem_cache); kmem_cache_destroy(t10_alua_tg_pt_gp_cache); - kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); + kmem_cache_destroy(t10_alua_lba_map_cache); + kmem_cache_destroy(t10_alua_lba_map_mem_cache); } /* This code ensures unique mib indexes are handed out. */ @@ -196,22 +201,92 @@ void transport_subsystem_check_init(void) if (sub_api_initialized) return; - ret = request_module("target_core_iblock"); + ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock"); if (ret != 0) pr_err("Unable to load target_core_iblock\n"); - ret = request_module("target_core_file"); + ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file"); if (ret != 0) pr_err("Unable to load target_core_file\n"); - ret = request_module("target_core_pscsi"); + ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi"); if (ret != 0) pr_err("Unable to load target_core_pscsi\n"); + ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user"); + if (ret != 0) + pr_err("Unable to load target_core_user\n"); + sub_api_initialized = 1; } -struct se_session *transport_init_session(void) +static void target_release_cmd_refcnt(struct percpu_ref *ref) +{ + struct target_cmd_counter *cmd_cnt = container_of(ref, + typeof(*cmd_cnt), + refcnt); + wake_up(&cmd_cnt->refcnt_wq); +} + +struct target_cmd_counter *target_alloc_cmd_counter(void) +{ + struct target_cmd_counter *cmd_cnt; + int rc; + + cmd_cnt = kzalloc(sizeof(*cmd_cnt), GFP_KERNEL); + if (!cmd_cnt) + return NULL; + + init_completion(&cmd_cnt->stop_done); + init_waitqueue_head(&cmd_cnt->refcnt_wq); + atomic_set(&cmd_cnt->stopped, 0); + + rc = percpu_ref_init(&cmd_cnt->refcnt, target_release_cmd_refcnt, 0, + GFP_KERNEL); + if (rc) + goto free_cmd_cnt; + + return cmd_cnt; + +free_cmd_cnt: + kfree(cmd_cnt); + return NULL; +} +EXPORT_SYMBOL_GPL(target_alloc_cmd_counter); + +void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt) +{ + /* + * Drivers like loop do not call target_stop_session during session + * shutdown so we have to drop the ref taken at init time here. + */ + if (!atomic_read(&cmd_cnt->stopped)) + percpu_ref_put(&cmd_cnt->refcnt); + + percpu_ref_exit(&cmd_cnt->refcnt); + kfree(cmd_cnt); +} +EXPORT_SYMBOL_GPL(target_free_cmd_counter); + +/** + * transport_init_session - initialize a session object + * @se_sess: Session object pointer. + * + * The caller must have zero-initialized @se_sess before calling this function. + */ +void transport_init_session(struct se_session *se_sess) +{ + INIT_LIST_HEAD(&se_sess->sess_list); + INIT_LIST_HEAD(&se_sess->sess_acl_list); + spin_lock_init(&se_sess->sess_cmd_lock); +} +EXPORT_SYMBOL(transport_init_session); + +/** + * transport_alloc_session - allocate a session object and initialize it + * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. + */ +struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) { struct se_session *se_sess; @@ -221,16 +296,83 @@ struct se_session *transport_init_session(void) " se_sess_cache\n"); return ERR_PTR(-ENOMEM); } - INIT_LIST_HEAD(&se_sess->sess_list); - INIT_LIST_HEAD(&se_sess->sess_acl_list); - INIT_LIST_HEAD(&se_sess->sess_cmd_list); - INIT_LIST_HEAD(&se_sess->sess_wait_list); - spin_lock_init(&se_sess->sess_cmd_lock); - kref_init(&se_sess->sess_kref); + transport_init_session(se_sess); + se_sess->sup_prot_ops = sup_prot_ops; + + return se_sess; +} +EXPORT_SYMBOL(transport_alloc_session); + +/** + * transport_alloc_session_tags - allocate target driver private data + * @se_sess: Session pointer. + * @tag_num: Maximum number of in-flight commands between initiator and target. + * @tag_size: Size in bytes of the private data a target driver associates with + * each command. + */ +int transport_alloc_session_tags(struct se_session *se_sess, + unsigned int tag_num, unsigned int tag_size) +{ + int rc; + + se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num, + GFP_KERNEL | __GFP_RETRY_MAYFAIL); + if (!se_sess->sess_cmd_map) { + pr_err("Unable to allocate se_sess->sess_cmd_map\n"); + return -ENOMEM; + } + + rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1, + false, GFP_KERNEL, NUMA_NO_NODE); + if (rc < 0) { + pr_err("Unable to init se_sess->sess_tag_pool," + " tag_num: %u\n", tag_num); + kvfree(se_sess->sess_cmd_map); + se_sess->sess_cmd_map = NULL; + return -ENOMEM; + } + + return 0; +} +EXPORT_SYMBOL(transport_alloc_session_tags); + +/** + * transport_init_session_tags - allocate a session and target driver private data + * @tag_num: Maximum number of in-flight commands between initiator and target. + * @tag_size: Size in bytes of the private data a target driver associates with + * each command. + * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. + */ +static struct se_session * +transport_init_session_tags(unsigned int tag_num, unsigned int tag_size, + enum target_prot_op sup_prot_ops) +{ + struct se_session *se_sess; + int rc; + + if (tag_num != 0 && !tag_size) { + pr_err("init_session_tags called with percpu-ida tag_num:" + " %u, but zero tag_size\n", tag_num); + return ERR_PTR(-EINVAL); + } + if (!tag_num && tag_size) { + pr_err("init_session_tags called with percpu-ida tag_size:" + " %u, but zero tag_num\n", tag_size); + return ERR_PTR(-EINVAL); + } + + se_sess = transport_alloc_session(sup_prot_ops); + if (IS_ERR(se_sess)) + return se_sess; + + rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); + if (rc < 0) { + transport_free_session(se_sess); + return ERR_PTR(-ENOMEM); + } return se_sess; } -EXPORT_SYMBOL(transport_init_session); /* * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. @@ -241,7 +383,9 @@ void __transport_register_session( struct se_session *se_sess, void *fabric_sess_ptr) { + const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; unsigned char buf[PR_REG_ISID_LEN]; + unsigned long flags; se_sess->se_tpg = se_tpg; se_sess->fabric_sess_ptr = fabric_sess_ptr; @@ -253,6 +397,21 @@ void __transport_register_session( */ if (se_nacl) { /* + * + * Determine if fabric allows for T10-PI feature bits exposed to + * initiators for device backends with !dev->dev_attrib.pi_prot_type. + * + * If so, then always save prot_type on a per se_node_acl node + * basis and re-instate the previous sess_prot_type to avoid + * disabling PI from below any previously initiator side + * registered LUNs. + */ + if (se_nacl->saved_prot_type) + se_sess->sess_prot_type = se_nacl->saved_prot_type; + else if (tfo->tpg_check_prot_fabric_only) + se_sess->sess_prot_type = se_nacl->saved_prot_type = + tfo->tpg_check_prot_fabric_only(se_tpg); + /* * If the fabric module supports an ISID based TransportID, * save this value in binary from the fabric I_T Nexus now. */ @@ -262,9 +421,8 @@ void __transport_register_session( &buf[0], PR_REG_ISID_LEN); se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); } - kref_get(&se_nacl->acl_kref); - spin_lock_irq(&se_nacl->nacl_sess_lock); + spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); /* * The se_nacl->nacl_sess pointer will be set to the * last active I_T Nexus for each struct se_node_acl. @@ -273,12 +431,12 @@ void __transport_register_session( list_add_tail(&se_sess->sess_acl_list, &se_nacl->acl_sess_list); - spin_unlock_irq(&se_nacl->nacl_sess_lock); + spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); } list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", - se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); + se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr); } EXPORT_SYMBOL(__transport_register_session); @@ -296,45 +454,114 @@ void transport_register_session( } EXPORT_SYMBOL(transport_register_session); -static void target_release_session(struct kref *kref) +struct se_session * +target_setup_session(struct se_portal_group *tpg, + unsigned int tag_num, unsigned int tag_size, + enum target_prot_op prot_op, + const char *initiatorname, void *private, + int (*callback)(struct se_portal_group *, + struct se_session *, void *)) { - struct se_session *se_sess = container_of(kref, - struct se_session, sess_kref); - struct se_portal_group *se_tpg = se_sess->se_tpg; + struct target_cmd_counter *cmd_cnt; + struct se_session *sess; + int rc; - se_tpg->se_tpg_tfo->close_session(se_sess); -} + cmd_cnt = target_alloc_cmd_counter(); + if (!cmd_cnt) + return ERR_PTR(-ENOMEM); + /* + * If the fabric driver is using percpu-ida based pre allocation + * of I/O descriptor tags, go ahead and perform that setup now.. + */ + if (tag_num != 0) + sess = transport_init_session_tags(tag_num, tag_size, prot_op); + else + sess = transport_alloc_session(prot_op); -void target_get_session(struct se_session *se_sess) -{ - kref_get(&se_sess->sess_kref); + if (IS_ERR(sess)) { + rc = PTR_ERR(sess); + goto free_cnt; + } + sess->cmd_cnt = cmd_cnt; + + sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg, + (unsigned char *)initiatorname); + if (!sess->se_node_acl) { + rc = -EACCES; + goto free_sess; + } + /* + * Go ahead and perform any remaining fabric setup that is + * required before transport_register_session(). + */ + if (callback != NULL) { + rc = callback(tpg, sess, private); + if (rc) + goto free_sess; + } + + transport_register_session(tpg, sess->se_node_acl, sess, private); + return sess; + +free_sess: + transport_free_session(sess); + return ERR_PTR(rc); + +free_cnt: + target_free_cmd_counter(cmd_cnt); + return ERR_PTR(rc); } -EXPORT_SYMBOL(target_get_session); +EXPORT_SYMBOL(target_setup_session); -void target_put_session(struct se_session *se_sess) +ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) { - struct se_portal_group *tpg = se_sess->se_tpg; + struct se_session *se_sess; + ssize_t len = 0; - if (tpg->se_tpg_tfo->put_session != NULL) { - tpg->se_tpg_tfo->put_session(se_sess); - return; + spin_lock_bh(&se_tpg->session_lock); + list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { + if (!se_sess->se_node_acl) + continue; + if (!se_sess->se_node_acl->dynamic_node_acl) + continue; + if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) + break; + + len += snprintf(page + len, PAGE_SIZE - len, "%s\n", + se_sess->se_node_acl->initiatorname); + len += 1; /* Include NULL terminator */ } - kref_put(&se_sess->sess_kref, target_release_session); + spin_unlock_bh(&se_tpg->session_lock); + + return len; } -EXPORT_SYMBOL(target_put_session); +EXPORT_SYMBOL(target_show_dynamic_sessions); static void target_complete_nacl(struct kref *kref) { struct se_node_acl *nacl = container_of(kref, struct se_node_acl, acl_kref); + struct se_portal_group *se_tpg = nacl->se_tpg; + + if (!nacl->dynamic_stop) { + complete(&nacl->acl_free_comp); + return; + } - complete(&nacl->acl_free_comp); + mutex_lock(&se_tpg->acl_node_mutex); + list_del_init(&nacl->acl_list); + mutex_unlock(&se_tpg->acl_node_mutex); + + core_tpg_wait_for_nacl_pr_ref(nacl); + core_free_device_list_for_node(nacl, se_tpg); + kfree(nacl); } void target_put_nacl(struct se_node_acl *nacl) { kref_put(&nacl->acl_kref, target_complete_nacl); } +EXPORT_SYMBOL(target_put_nacl); void transport_deregister_session_configfs(struct se_session *se_sess) { @@ -346,8 +573,8 @@ void transport_deregister_session_configfs(struct se_session *se_sess) se_nacl = se_sess->se_node_acl; if (se_nacl) { spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); - if (se_nacl->acl_stop == 0) - list_del(&se_sess->sess_acl_list); + if (!list_empty(&se_sess->sess_acl_list)) + list_del_init(&se_sess->sess_acl_list); /* * If the session list is empty, then clear the pointer. * Otherwise, set the struct se_session pointer from the tail @@ -367,23 +594,70 @@ EXPORT_SYMBOL(transport_deregister_session_configfs); void transport_free_session(struct se_session *se_sess) { + struct se_node_acl *se_nacl = se_sess->se_node_acl; + + /* + * Drop the se_node_acl->nacl_kref obtained from within + * core_tpg_get_initiator_node_acl(). + */ + if (se_nacl) { + struct se_portal_group *se_tpg = se_nacl->se_tpg; + const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; + unsigned long flags; + + se_sess->se_node_acl = NULL; + + /* + * Also determine if we need to drop the extra ->cmd_kref if + * it had been previously dynamically generated, and + * the endpoint is not caching dynamic ACLs. + */ + mutex_lock(&se_tpg->acl_node_mutex); + if (se_nacl->dynamic_node_acl && + !se_tfo->tpg_check_demo_mode_cache(se_tpg)) { + spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); + if (list_empty(&se_nacl->acl_sess_list)) + se_nacl->dynamic_stop = true; + spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); + + if (se_nacl->dynamic_stop) + list_del_init(&se_nacl->acl_list); + } + mutex_unlock(&se_tpg->acl_node_mutex); + + if (se_nacl->dynamic_stop) + target_put_nacl(se_nacl); + + target_put_nacl(se_nacl); + } + if (se_sess->sess_cmd_map) { + sbitmap_queue_free(&se_sess->sess_tag_pool); + kvfree(se_sess->sess_cmd_map); + } + if (se_sess->cmd_cnt) + target_free_cmd_counter(se_sess->cmd_cnt); kmem_cache_free(se_sess_cache, se_sess); } EXPORT_SYMBOL(transport_free_session); +static int target_release_res(struct se_device *dev, void *data) +{ + struct se_session *sess = data; + + if (dev->reservation_holder == sess) + target_release_reservation(dev); + return 0; +} + void transport_deregister_session(struct se_session *se_sess) { struct se_portal_group *se_tpg = se_sess->se_tpg; - struct target_core_fabric_ops *se_tfo; - struct se_node_acl *se_nacl; unsigned long flags; - bool comp_nacl = true; if (!se_tpg) { transport_free_session(se_sess); return; } - se_tfo = se_tpg->se_tpg_tfo; spin_lock_irqsave(&se_tpg->session_lock, flags); list_del(&se_sess->sess_list); @@ -392,44 +666,33 @@ void transport_deregister_session(struct se_session *se_sess) spin_unlock_irqrestore(&se_tpg->session_lock, flags); /* - * Determine if we need to do extra work for this initiator node's - * struct se_node_acl if it had been previously dynamically generated. + * Since the session is being removed, release SPC-2 + * reservations held by the session that is disappearing. */ - se_nacl = se_sess->se_node_acl; - - spin_lock_irqsave(&se_tpg->acl_node_lock, flags); - if (se_nacl && se_nacl->dynamic_node_acl) { - if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) { - list_del(&se_nacl->acl_list); - se_tpg->num_node_acls--; - spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); - core_tpg_wait_for_nacl_pr_ref(se_nacl); - core_free_device_list_for_node(se_nacl, se_tpg); - se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl); - - comp_nacl = false; - spin_lock_irqsave(&se_tpg->acl_node_lock, flags); - } - } - spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); + target_for_each_device(target_release_res, se_sess); pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", - se_tpg->se_tpg_tfo->get_fabric_name()); + se_tpg->se_tpg_tfo->fabric_name); /* - * If last kref is dropping now for an explict NodeACL, awake sleeping + * If last kref is dropping now for an explicit NodeACL, awake sleeping * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group - * removal context. + * removal context from within transport_free_session() code. + * + * For dynamic ACL, target_put_nacl() uses target_complete_nacl() + * to release all remaining generate_node_acl=1 created ACL resources. */ - if (se_nacl && comp_nacl == true) - target_put_nacl(se_nacl); transport_free_session(se_sess); } EXPORT_SYMBOL(transport_deregister_session); -/* - * Called with cmd->t_state_lock held. - */ +void target_remove_session(struct se_session *se_sess) +{ + transport_deregister_session_configfs(se_sess); + transport_deregister_session(se_sess); +} +EXPORT_SYMBOL(target_remove_session); + static void target_remove_from_state_list(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; @@ -438,121 +701,91 @@ static void target_remove_from_state_list(struct se_cmd *cmd) if (!dev) return; - if (cmd->transport_state & CMD_T_BUSY) - return; - - spin_lock_irqsave(&dev->execute_task_lock, flags); + spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags); if (cmd->state_active) { list_del(&cmd->state_list); cmd->state_active = false; } - spin_unlock_irqrestore(&dev->execute_task_lock, flags); + spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); } -static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, - bool write_pending) +static void target_remove_from_tmr_list(struct se_cmd *cmd) { + struct se_device *dev = NULL; unsigned long flags; - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (write_pending) - cmd->t_state = TRANSPORT_WRITE_PENDING; - - /* - * Determine if IOCTL context caller in requesting the stopping of this - * command for LUN shutdown purposes. - */ - if (cmd->transport_state & CMD_T_LUN_STOP) { - pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", - __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); - - cmd->transport_state &= ~CMD_T_ACTIVE; - if (remove_from_lists) - target_remove_from_state_list(cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - complete(&cmd->transport_lun_stop_comp); - return 1; - } - - if (remove_from_lists) { - target_remove_from_state_list(cmd); + if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) + dev = cmd->se_tmr_req->tmr_dev; - /* - * Clear struct se_cmd->se_lun before the handoff to FE. - */ - cmd->se_lun = NULL; + if (dev) { + spin_lock_irqsave(&dev->se_tmr_lock, flags); + if (cmd->se_tmr_req->tmr_dev) + list_del_init(&cmd->se_tmr_req->tmr_list); + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); } +} +/* + * This function is called by the target core after the target core has + * finished processing a SCSI command or SCSI TMF. Both the regular command + * processing code and the code for aborting commands can call this + * function. CMD_T_STOP is set if and only if another thread is waiting + * inside transport_wait_for_tasks() for t_transport_stop_comp. + */ +static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) +{ + unsigned long flags; + spin_lock_irqsave(&cmd->t_state_lock, flags); /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. */ if (cmd->transport_state & CMD_T_STOP) { - pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", - __func__, __LINE__, - cmd->se_tfo->get_task_tag(cmd)); + pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", + __func__, __LINE__, cmd->tag); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&cmd->t_transport_stop_comp); + complete_all(&cmd->t_transport_stop_comp); return 1; } - cmd->transport_state &= ~CMD_T_ACTIVE; - if (remove_from_lists) { - /* - * Some fabric modules like tcm_loop can release - * their internally allocated I/O reference now and - * struct se_cmd now. - * - * Fabric modules are expected to return '1' here if the - * se_cmd being passed is released at this point, - * or zero if not being released. - */ - if (cmd->se_tfo->check_stop_free != NULL) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return cmd->se_tfo->check_stop_free(cmd); - } - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return 0; -} -static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) -{ - return transport_cmd_check_stop(cmd, true, false); + /* + * Some fabric modules like tcm_loop can release their internally + * allocated I/O reference and struct se_cmd now. + * + * Fabric modules are expected to return '1' here if the se_cmd being + * passed is released at this point, or zero if not being released. + */ + return cmd->se_tfo->check_stop_free(cmd); } static void transport_lun_remove_cmd(struct se_cmd *cmd) { struct se_lun *lun = cmd->se_lun; - unsigned long flags; if (!lun) return; - spin_lock_irqsave(&lun->lun_cmd_lock, flags); - if (!list_empty(&cmd->se_lun_node)) - list_del_init(&cmd->se_lun_node); - spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); -} + target_remove_from_state_list(cmd); + target_remove_from_tmr_list(cmd); -void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) -{ - if (transport_cmd_check_stop_to_fabric(cmd)) - return; - if (remove) - transport_put_cmd(cmd); + if (cmpxchg(&cmd->lun_ref_active, true, false)) + percpu_ref_put(&lun->lun_ref); + + /* + * Clear struct se_cmd->se_lun before the handoff to FE. + */ + cmd->se_lun = NULL; } static void target_complete_failure_work(struct work_struct *work) { struct se_cmd *cmd = container_of(work, struct se_cmd, work); - transport_generic_request_failure(cmd, - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); + transport_generic_request_failure(cmd, cmd->sense_reason); } /* @@ -578,72 +811,186 @@ static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) return cmd->sense_buffer; } -void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) +void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense) { - struct se_device *dev = cmd->se_dev; - int success = scsi_status == GOOD; + unsigned char *cmd_sense_buf; unsigned long flags; - cmd->scsi_status = scsi_status; + spin_lock_irqsave(&cmd->t_state_lock, flags); + cmd_sense_buf = transport_get_sense_buffer(cmd); + if (!cmd_sense_buf) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } + cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; + memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); +} +EXPORT_SYMBOL(transport_copy_sense_to_cmd); - spin_lock_irqsave(&cmd->t_state_lock, flags); - cmd->transport_state &= ~CMD_T_BUSY; +static void target_handle_abort(struct se_cmd *cmd) +{ + bool tas = cmd->transport_state & CMD_T_TAS; + bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF; + int ret; - if (dev && dev->transport->transport_complete) { - dev->transport->transport_complete(cmd, - cmd->t_data_sg, - transport_get_sense_buffer(cmd)); - if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) - success = 1; + pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas); + + if (tas) { + if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { + cmd->scsi_status = SAM_STAT_TASK_ABORTED; + pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", + cmd->t_task_cdb[0], cmd->tag); + trace_target_cmd_complete(cmd); + ret = cmd->se_tfo->queue_status(cmd); + if (ret) { + transport_handle_queue_full(cmd, cmd->se_dev, + ret, false); + return; + } + } else { + cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED; + cmd->se_tfo->queue_tm_rsp(cmd); + } + } else { + /* + * Allow the fabric driver to unmap any resources before + * releasing the descriptor via TFO->release_cmd(). + */ + cmd->se_tfo->aborted_task(cmd); + if (ack_kref) + WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0); + /* + * To do: establish a unit attention condition on the I_T + * nexus associated with cmd. See also the paragraph "Aborting + * commands" in SAM. + */ } - /* - * See if we are waiting to complete for an exception condition. - */ - if (cmd->transport_state & CMD_T_REQUEST_STOP) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&cmd->task_stop_comp); - return; + WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0); + + transport_lun_remove_cmd(cmd); + + transport_cmd_check_stop_to_fabric(cmd); +} + +static void target_abort_work(struct work_struct *work) +{ + struct se_cmd *cmd = container_of(work, struct se_cmd, work); + + target_handle_abort(cmd); +} + +static bool target_cmd_interrupted(struct se_cmd *cmd) +{ + int post_ret; + + if (cmd->transport_state & CMD_T_ABORTED) { + if (cmd->transport_complete_callback) + cmd->transport_complete_callback(cmd, false, &post_ret); + INIT_WORK(&cmd->work, target_abort_work); + queue_work(target_completion_wq, &cmd->work); + return true; + } else if (cmd->transport_state & CMD_T_STOP) { + if (cmd->transport_complete_callback) + cmd->transport_complete_callback(cmd, false, &post_ret); + complete_all(&cmd->t_transport_stop_comp); + return true; } - if (!success) - cmd->transport_state |= CMD_T_FAILED; + return false; +} + +/* May be called from interrupt context so must not sleep. */ +void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status, + sense_reason_t sense_reason) +{ + struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn; + int success, cpu; + unsigned long flags; - /* - * Check for case where an explict ABORT_TASK has been received - * and transport_wait_for_tasks() will be waiting for completion.. - */ - if (cmd->transport_state & CMD_T_ABORTED && - cmd->transport_state & CMD_T_STOP) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&cmd->t_transport_stop_comp); + if (target_cmd_interrupted(cmd)) return; - } else if (cmd->transport_state & CMD_T_FAILED) { - INIT_WORK(&cmd->work, target_complete_failure_work); - } else { - INIT_WORK(&cmd->work, target_complete_ok_work); + + cmd->scsi_status = scsi_status; + cmd->sense_reason = sense_reason; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + switch (cmd->scsi_status) { + case SAM_STAT_CHECK_CONDITION: + if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) + success = 1; + else + success = 0; + break; + default: + success = 1; + break; } cmd->t_state = TRANSPORT_COMPLETE; cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - queue_work(target_completion_wq, &cmd->work); + INIT_WORK(&cmd->work, success ? target_complete_ok_work : + target_complete_failure_work); + + if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID) + cpu = cmd->cpuid; + else + cpu = wwn->cmd_compl_affinity; + + queue_work_on(cpu, target_completion_wq, &cmd->work); +} +EXPORT_SYMBOL(target_complete_cmd_with_sense); + +void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) +{ + target_complete_cmd_with_sense(cmd, scsi_status, scsi_status ? + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE : + TCM_NO_SENSE); } EXPORT_SYMBOL(target_complete_cmd); +void target_set_cmd_data_length(struct se_cmd *cmd, int length) +{ + if (length < cmd->data_length) { + if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { + cmd->residual_count += cmd->data_length - length; + } else { + cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; + cmd->residual_count = cmd->data_length - length; + } + + cmd->data_length = length; + } +} +EXPORT_SYMBOL(target_set_cmd_data_length); + +void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) +{ + if (scsi_status == SAM_STAT_GOOD || + cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) { + target_set_cmd_data_length(cmd, length); + } + + target_complete_cmd(cmd, scsi_status); +} +EXPORT_SYMBOL(target_complete_cmd_with_length); + static void target_add_to_state_list(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; unsigned long flags; - spin_lock_irqsave(&dev->execute_task_lock, flags); + spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags); if (!cmd->state_active) { - list_add_tail(&cmd->state_list, &dev->state_list); + list_add_tail(&cmd->state_list, + &dev->queues[cmd->cpuid].state_list); cmd->state_active = true; } - spin_unlock_irqrestore(&dev->execute_task_lock, flags); + spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); } /* @@ -665,18 +1012,18 @@ void target_qf_do_work(struct work_struct *work) list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { list_del(&cmd->se_qf_node); - atomic_dec(&dev->dev_qf_count); - smp_mb__after_atomic_dec(); + atomic_dec_mb(&dev->dev_qf_count); pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" - " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, + " context: %s\n", cmd->se_tfo->fabric_name, cmd, (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" : "UNKNOWN"); if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) transport_write_pending_qf(cmd); - else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) + else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK || + cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) transport_complete_qf(cmd); } } @@ -973,6 +1320,68 @@ transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) } EXPORT_SYMBOL(transport_set_vpd_ident); +static sense_reason_t +target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, + unsigned int size) +{ + u32 mtl; + + if (!cmd->se_tfo->max_data_sg_nents) + return TCM_NO_SENSE; + /* + * Check if fabric enforced maximum SGL entries per I/O descriptor + * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + + * residual_count and reduce original cmd->data_length to maximum + * length based on single PAGE_SIZE entry scatter-lists. + */ + mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); + if (cmd->data_length > mtl) { + /* + * If an existing CDB overflow is present, calculate new residual + * based on CDB size minus fabric maximum transfer length. + * + * If an existing CDB underflow is present, calculate new residual + * based on original cmd->data_length minus fabric maximum transfer + * length. + * + * Otherwise, set the underflow residual based on cmd->data_length + * minus fabric maximum transfer length. + */ + if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { + cmd->residual_count = (size - mtl); + } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { + u32 orig_dl = size + cmd->residual_count; + cmd->residual_count = (orig_dl - mtl); + } else { + cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; + cmd->residual_count = (cmd->data_length - mtl); + } + cmd->data_length = mtl; + /* + * Reset sbc_check_prot() calculated protection payload + * length based upon the new smaller MTL. + */ + if (cmd->prot_length) { + u32 sectors = (mtl / dev->dev_attrib.block_size); + cmd->prot_length = dev->prot_length * sectors; + } + } + return TCM_NO_SENSE; +} + +/** + * target_cmd_size_check - Check whether there will be a residual. + * @cmd: SCSI command. + * @size: Data buffer size derived from CDB. The data buffer size provided by + * the SCSI transport driver is available in @cmd->data_length. + * + * Compare the data buffer size from the CDB with the data buffer limit from the transport + * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary. + * + * Note: target drivers set @cmd->data_length by calling __target_init_cmd(). + * + * Return: TCM_NO_SENSE + */ sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size) { @@ -981,32 +1390,15 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size) if (cmd->unknown_data_length) { cmd->data_length = size; } else if (size != cmd->data_length) { - pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" + pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" " %u does not match SCSI CDB Length: %u for SAM Opcode:" - " 0x%02x\n", cmd->se_tfo->get_fabric_name(), + " 0x%02x\n", cmd->se_tfo->fabric_name, cmd->data_length, size, cmd->t_task_cdb[0]); - - if (cmd->data_direction == DMA_TO_DEVICE) { - pr_err("Rejecting underflow/overflow" - " WRITE data\n"); - return TCM_INVALID_CDB_FIELD; - } - /* - * Reject READ_* or WRITE_* with overflow/underflow for - * type SCF_SCSI_DATA_CDB. - */ - if (dev->dev_attrib.block_size != 512) { - pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" - " CDB on non 512-byte sector setup subsystem" - " plugin: %s\n", dev->transport->name); - /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ - return TCM_INVALID_CDB_FIELD; - } /* - * For the overflow case keep the existing fabric provided - * ->data_length. Otherwise for the underflow case, reset - * ->data_length to the smaller SCSI expected data transfer - * length. + * For READ command for the overflow case keep the existing + * fabric provided ->data_length. Otherwise for the underflow + * case, reset ->data_length to the smaller SCSI expected data + * transfer length. */ if (size > cmd->data_length) { cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; @@ -1014,50 +1406,78 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size) } else { cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; cmd->residual_count = (cmd->data_length - size); - cmd->data_length = size; + /* + * Do not truncate ->data_length for WRITE command to + * dump all payload + */ + if (cmd->data_direction == DMA_FROM_DEVICE) { + cmd->data_length = size; + } + } + + if (cmd->data_direction == DMA_TO_DEVICE) { + if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { + pr_err_ratelimited("Rejecting underflow/overflow" + " for WRITE data CDB\n"); + return TCM_INVALID_FIELD_IN_COMMAND_IU; + } + /* + * Some fabric drivers like iscsi-target still expect to + * always reject overflow writes. Reject this case until + * full fabric driver level support for overflow writes + * is introduced tree-wide. + */ + if (size > cmd->data_length) { + pr_err_ratelimited("Rejecting overflow for" + " WRITE control CDB\n"); + return TCM_INVALID_CDB_FIELD; + } } } - return 0; + return target_check_max_data_sg_nents(cmd, dev, size); } /* * Used by fabric modules containing a local struct se_cmd within their * fabric dependent per I/O descriptor. + * + * Preserves the value of @cmd->tag. */ -void transport_init_se_cmd( - struct se_cmd *cmd, - struct target_core_fabric_ops *tfo, - struct se_session *se_sess, - u32 data_length, - int data_direction, - int task_attr, - unsigned char *sense_buffer) +void __target_init_cmd(struct se_cmd *cmd, + const struct target_core_fabric_ops *tfo, + struct se_session *se_sess, u32 data_length, + int data_direction, int task_attr, + unsigned char *sense_buffer, u64 unpacked_lun, + struct target_cmd_counter *cmd_cnt) { - INIT_LIST_HEAD(&cmd->se_lun_node); INIT_LIST_HEAD(&cmd->se_delayed_node); INIT_LIST_HEAD(&cmd->se_qf_node); - INIT_LIST_HEAD(&cmd->se_cmd_list); INIT_LIST_HEAD(&cmd->state_list); - init_completion(&cmd->transport_lun_fe_stop_comp); - init_completion(&cmd->transport_lun_stop_comp); init_completion(&cmd->t_transport_stop_comp); - init_completion(&cmd->cmd_wait_comp); - init_completion(&cmd->task_stop_comp); + cmd->free_compl = NULL; + cmd->abrt_compl = NULL; spin_lock_init(&cmd->t_state_lock); - cmd->transport_state = CMD_T_DEV_ACTIVE; + INIT_WORK(&cmd->work, NULL); + kref_init(&cmd->cmd_kref); + cmd->t_task_cdb = &cmd->__t_task_cdb[0]; cmd->se_tfo = tfo; cmd->se_sess = se_sess; cmd->data_length = data_length; cmd->data_direction = data_direction; cmd->sam_task_attr = task_attr; cmd->sense_buffer = sense_buffer; + cmd->orig_fe_lun = unpacked_lun; + cmd->cmd_cnt = cmd_cnt; + + if (!(cmd->se_cmd_flags & SCF_USE_CPUID)) + cmd->cpuid = raw_smp_processor_id(); cmd->state_active = false; } -EXPORT_SYMBOL(transport_init_se_cmd); +EXPORT_SYMBOL(__target_init_cmd); static sense_reason_t transport_check_alloc_task_attr(struct se_cmd *cmd) @@ -1068,30 +1488,21 @@ transport_check_alloc_task_attr(struct se_cmd *cmd) * Check if SAM Task Attribute emulation is enabled for this * struct se_device storage object */ - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return 0; - if (cmd->sam_task_attr == MSG_ACA_TAG) { + if (cmd->sam_task_attr == TCM_ACA_TAG) { pr_debug("SAM Task Attribute ACA" " emulation is not supported\n"); return TCM_INVALID_CDB_FIELD; } - /* - * Used to determine when ORDERED commands should go from - * Dormant to Active status. - */ - cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); - smp_mb__after_atomic_inc(); - pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", - cmd->se_ordered_id, cmd->sam_task_attr, - dev->transport->name); + return 0; } sense_reason_t -target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) +target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp) { - struct se_device *dev = cmd->se_dev; sense_reason_t ret; /* @@ -1102,7 +1513,8 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) pr_err("Received SCSI CDB with command_size: %d that" " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); - return TCM_INVALID_CDB_FIELD; + ret = TCM_INVALID_CDB_FIELD; + goto err; } /* * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, @@ -1110,42 +1522,47 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) * setup the pointer from __t_task_cdb to t_task_cdb. */ if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { - cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), - GFP_KERNEL); + cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp); if (!cmd->t_task_cdb) { pr_err("Unable to allocate cmd->t_task_cdb" " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", scsi_command_size(cdb), (unsigned long)sizeof(cmd->__t_task_cdb)); - return TCM_OUT_OF_RESOURCES; + ret = TCM_OUT_OF_RESOURCES; + goto err; } - } else - cmd->t_task_cdb = &cmd->__t_task_cdb[0]; + } /* * Copy the original CDB into cmd-> */ memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); trace_target_sequencer_start(cmd); + return 0; +err: /* - * Check for an existing UNIT ATTENTION condition + * Copy the CDB here to allow trace_target_cmd_complete() to + * print the cdb to the trace buffers. */ - ret = target_scsi3_ua_check(cmd); - if (ret) - return ret; - - ret = target_alua_state_check(cmd); - if (ret) - return ret; + memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb), + (unsigned int)TCM_MAX_COMMAND_SIZE)); + return ret; +} +EXPORT_SYMBOL(target_cmd_init_cdb); - ret = target_check_reservation(cmd); - if (ret) { - cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; - return ret; - } +sense_reason_t +target_cmd_parse_cdb(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + sense_reason_t ret; ret = dev->transport->parse_cdb(cmd); + if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) + pr_debug_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", + cmd->se_tfo->fabric_name, + cmd->se_sess->se_node_acl->initiatorname, + cmd->t_task_cdb[0]); if (ret) return ret; @@ -1154,35 +1571,55 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) return ret; cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; - - spin_lock(&cmd->se_lun->lun_sep_lock); - if (cmd->se_lun->lun_sep) - cmd->se_lun->lun_sep->sep_stats.cmd_pdus++; - spin_unlock(&cmd->se_lun->lun_sep_lock); + /* + * If this is the xcopy_lun then we won't have lun_stats since we + * can't export them. + */ + if (cmd->se_lun->lun_stats) + this_cpu_inc(cmd->se_lun->lun_stats->cmd_pdus); return 0; } -EXPORT_SYMBOL(target_setup_cmd_from_cdb); +EXPORT_SYMBOL(target_cmd_parse_cdb); -/* - * Used by fabric module frontends to queue tasks directly. - * Many only be used from process context only - */ -int transport_handle_cdb_direct( - struct se_cmd *cmd) +static int __target_submit(struct se_cmd *cmd) { sense_reason_t ret; + might_sleep(); + + /* + * Check if we need to delay processing because of ALUA + * Active/NonOptimized primary access state.. + */ + core_alua_check_nonop_delay(cmd); + + if (cmd->t_data_nents != 0) { + /* + * This is primarily a hack for udev and tcm loop which sends + * INQUIRYs with a single page and expects the data to be + * cleared. + */ + if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && + cmd->data_direction == DMA_FROM_DEVICE) { + struct scatterlist *sgl = cmd->t_data_sg; + unsigned char *buf = NULL; + + BUG_ON(!sgl); + + buf = kmap_local_page(sg_page(sgl)); + if (buf) { + memset(buf + sgl->offset, 0, sgl->length); + kunmap_local(buf); + } + } + } + if (!cmd->se_lun) { dump_stack(); pr_err("cmd->se_lun is NULL\n"); return -EINVAL; } - if (in_interrupt()) { - dump_stack(); - pr_err("transport_generic_handle_cdb cannot be called" - " from interrupt context\n"); - return -EINVAL; - } + /* * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that * outstanding descriptors are handled correctly during shutdown via @@ -1204,9 +1641,8 @@ int transport_handle_cdb_direct( transport_generic_request_failure(cmd, ret); return 0; } -EXPORT_SYMBOL(transport_handle_cdb_direct); -static sense_reason_t +sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) { @@ -1226,92 +1662,124 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, cmd->t_data_sg = sgl; cmd->t_data_nents = sgl_count; + cmd->t_bidi_data_sg = sgl_bidi; + cmd->t_bidi_data_nents = sgl_bidi_count; - if (sgl_bidi && sgl_bidi_count) { - cmd->t_bidi_data_sg = sgl_bidi; - cmd->t_bidi_data_nents = sgl_bidi_count; - } cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; return 0; } -/* - * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized - * se_cmd + use pre-allocated SGL memory. - * - * @se_cmd: command descriptor to submit +/** + * target_init_cmd - initialize se_cmd + * @se_cmd: command descriptor to init * @se_sess: associated se_sess for endpoint - * @cdb: pointer to SCSI CDB * @sense: pointer to SCSI sense buffer * @unpacked_lun: unpacked LUN to reference for struct se_lun * @data_length: fabric expected data transfer length - * @task_addr: SAM task attribute + * @task_attr: SAM task attribute * @data_dir: DMA data direction * @flags: flags for command submission from target_sc_flags_tables - * @sgl: struct scatterlist memory for unidirectional mapping - * @sgl_count: scatterlist count for unidirectional mapping - * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping - * @sgl_bidi_count: scatterlist count for bidirectional READ mapping * - * Returns non zero to signal active I/O shutdown failure. All other - * setup exceptions will be returned as a SCSI CHECK_CONDITION response, - * but still return zero here. + * Task tags are supported if the caller has set @se_cmd->tag. * - * This may only be called from process context, and also currently - * assumes internal allocation of fabric payload buffer by target-core. + * Returns: + * - less than zero to signal active I/O shutdown failure. + * - zero on success. + * + * If the fabric driver calls target_stop_session, then it must check the + * return code and handle failures. This will never fail for other drivers, + * and the return code can be ignored. */ -int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess, - unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, - u32 data_length, int task_attr, int data_dir, int flags, - struct scatterlist *sgl, u32 sgl_count, - struct scatterlist *sgl_bidi, u32 sgl_bidi_count) +int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, + unsigned char *sense, u64 unpacked_lun, + u32 data_length, int task_attr, int data_dir, int flags) { struct se_portal_group *se_tpg; - sense_reason_t rc; - int ret; se_tpg = se_sess->se_tpg; BUG_ON(!se_tpg); BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); - BUG_ON(in_interrupt()); + + if (flags & TARGET_SCF_USE_CPUID) + se_cmd->se_cmd_flags |= SCF_USE_CPUID; + /* + * Signal bidirectional data payloads to target-core + */ + if (flags & TARGET_SCF_BIDI_OP) + se_cmd->se_cmd_flags |= SCF_BIDI; + + if (flags & TARGET_SCF_UNKNOWN_SIZE) + se_cmd->unknown_data_length = 1; /* * Initialize se_cmd for target operation. From this point * exceptions are handled by sending exception status via * target_core_fabric_ops->queue_status() callback */ - transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, - data_length, data_dir, task_attr, sense); - if (flags & TARGET_SCF_UNKNOWN_SIZE) - se_cmd->unknown_data_length = 1; + __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length, + data_dir, task_attr, sense, unpacked_lun, + se_sess->cmd_cnt); + /* - * Obtain struct se_cmd->cmd_kref reference and add new cmd to - * se_sess->sess_cmd_list. A second kref_get here is necessary - * for fabrics using TARGET_SCF_ACK_KREF that expect a second + * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is + * necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second * kref_put() to happen during fabric packet acknowledgement. */ - ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); - if (ret) - return ret; + return target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); +} +EXPORT_SYMBOL_GPL(target_init_cmd); + +/** + * target_submit_prep - prepare cmd for submission + * @se_cmd: command descriptor to prep + * @cdb: pointer to SCSI CDB + * @sgl: struct scatterlist memory for unidirectional mapping + * @sgl_count: scatterlist count for unidirectional mapping + * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping + * @sgl_bidi_count: scatterlist count for bidirectional READ mapping + * @sgl_prot: struct scatterlist memory protection information + * @sgl_prot_count: scatterlist count for protection information + * @gfp: gfp allocation type + * + * Returns: + * - less than zero to signal failure. + * - zero on success. + * + * If failure is returned, lio will the callers queue_status to complete + * the cmd. + */ +int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb, + struct scatterlist *sgl, u32 sgl_count, + struct scatterlist *sgl_bidi, u32 sgl_bidi_count, + struct scatterlist *sgl_prot, u32 sgl_prot_count, + gfp_t gfp) +{ + sense_reason_t rc; + + rc = target_cmd_init_cdb(se_cmd, cdb, gfp); + if (rc) + goto send_cc_direct; + /* - * Signal bidirectional data payloads to target-core + * Locate se_lun pointer and attach it to struct se_cmd */ - if (flags & TARGET_SCF_BIDI_OP) - se_cmd->se_cmd_flags |= SCF_BIDI; + rc = transport_lookup_cmd_lun(se_cmd); + if (rc) + goto send_cc_direct; + + rc = target_cmd_parse_cdb(se_cmd); + if (rc != 0) + goto generic_fail; + /* - * Locate se_lun pointer and attach it to struct se_cmd + * Save pointers for SGLs containing protection information, + * if present. */ - rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); - if (rc) { - transport_send_check_condition_and_sense(se_cmd, rc, 0); - target_put_sess_cmd(se_sess, se_cmd); - return 0; + if (sgl_prot_count) { + se_cmd->t_prot_sg = sgl_prot; + se_cmd->t_prot_nents = sgl_prot_count; + se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; } - rc = target_setup_cmd_from_cdb(se_cmd, cdb); - if (rc != 0) { - transport_generic_request_failure(se_cmd, rc); - return 0; - } /* * When a non zero sgl_count has been passed perform SGL passthrough * mapping for pre-allocated fabric memory instead of having target @@ -1320,46 +1788,26 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess if (sgl_count != 0) { BUG_ON(!sgl); - /* - * A work-around for tcm_loop as some userspace code via - * scsi-generic do not memset their associated read buffers, - * so go ahead and do that here for type non-data CDBs. Also - * note that this is currently guaranteed to be a single SGL - * for this case by target core in target_setup_cmd_from_cdb() - * -> transport_generic_cmd_sequencer(). - */ - if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && - se_cmd->data_direction == DMA_FROM_DEVICE) { - unsigned char *buf = NULL; - - if (sgl) - buf = kmap(sg_page(sgl)) + sgl->offset; - - if (buf) { - memset(buf, 0, sgl->length); - kunmap(sg_page(sgl)); - } - } - rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, sgl_bidi, sgl_bidi_count); - if (rc != 0) { - transport_generic_request_failure(se_cmd, rc); - return 0; - } + if (rc != 0) + goto generic_fail; } - /* - * Check if we need to delay processing because of ALUA - * Active/NonOptimized primary access state.. - */ - core_alua_check_nonop_delay(se_cmd); - transport_handle_cdb_direct(se_cmd); return 0; + +send_cc_direct: + transport_send_check_condition_and_sense(se_cmd, rc, 0); + target_put_sess_cmd(se_cmd); + return -EIO; + +generic_fail: + transport_generic_request_failure(se_cmd, rc); + return -EIO; } -EXPORT_SYMBOL(target_submit_cmd_map_sgls); +EXPORT_SYMBOL_GPL(target_submit_prep); -/* +/** * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd * * @se_cmd: command descriptor to submit @@ -1368,29 +1816,143 @@ EXPORT_SYMBOL(target_submit_cmd_map_sgls); * @sense: pointer to SCSI sense buffer * @unpacked_lun: unpacked LUN to reference for struct se_lun * @data_length: fabric expected data transfer length - * @task_addr: SAM task attribute + * @task_attr: SAM task attribute * @data_dir: DMA data direction * @flags: flags for command submission from target_sc_flags_tables * - * Returns non zero to signal active I/O shutdown failure. All other - * setup exceptions will be returned as a SCSI CHECK_CONDITION response, - * but still return zero here. + * Task tags are supported if the caller has set @se_cmd->tag. * * This may only be called from process context, and also currently * assumes internal allocation of fabric payload buffer by target-core. * * It also assumes interal target core SGL memory allocation. + * + * This function must only be used by drivers that do their own + * sync during shutdown and does not use target_stop_session. If there + * is a failure this function will call into the fabric driver's + * queue_status with a CHECK_CONDITION. */ -int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, - unsigned char *cdb, unsigned char *sense, u32 unpacked_lun, +void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, + unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, u32 data_length, int task_attr, int data_dir, int flags) { - return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, - unpacked_lun, data_length, task_attr, data_dir, - flags, NULL, 0, NULL, 0); + int rc; + + rc = target_init_cmd(se_cmd, se_sess, sense, unpacked_lun, data_length, + task_attr, data_dir, flags); + WARN(rc, "Invalid target_submit_cmd use. Driver must not use target_stop_session or call target_init_cmd directly.\n"); + if (rc) + return; + + if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0, + GFP_KERNEL)) + return; + + target_submit(se_cmd); } EXPORT_SYMBOL(target_submit_cmd); + +static struct se_dev_plug *target_plug_device(struct se_device *se_dev) +{ + struct se_dev_plug *se_plug; + + if (!se_dev->transport->plug_device) + return NULL; + + se_plug = se_dev->transport->plug_device(se_dev); + if (!se_plug) + return NULL; + + se_plug->se_dev = se_dev; + /* + * We have a ref to the lun at this point, but the cmds could + * complete before we unplug, so grab a ref to the se_device so we + * can call back into the backend. + */ + config_group_get(&se_dev->dev_group); + return se_plug; +} + +static void target_unplug_device(struct se_dev_plug *se_plug) +{ + struct se_device *se_dev = se_plug->se_dev; + + se_dev->transport->unplug_device(se_plug); + config_group_put(&se_dev->dev_group); +} + +void target_queued_submit_work(struct work_struct *work) +{ + struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work); + struct se_cmd *se_cmd, *next_cmd; + struct se_dev_plug *se_plug = NULL; + struct se_device *se_dev = NULL; + struct llist_node *cmd_list; + + cmd_list = llist_del_all(&sq->cmd_list); + if (!cmd_list) + /* Previous call took what we were queued to submit */ + return; + + cmd_list = llist_reverse_order(cmd_list); + llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list) { + if (!se_dev) { + se_dev = se_cmd->se_dev; + se_plug = target_plug_device(se_dev); + } + + __target_submit(se_cmd); + } + + if (se_plug) + target_unplug_device(se_plug); +} + +/** + * target_queue_submission - queue the cmd to run on the LIO workqueue + * @se_cmd: command descriptor to submit + */ +static void target_queue_submission(struct se_cmd *se_cmd) +{ + struct se_device *se_dev = se_cmd->se_dev; + int cpu = se_cmd->cpuid; + struct se_cmd_queue *sq; + + sq = &se_dev->queues[cpu].sq; + llist_add(&se_cmd->se_cmd_list, &sq->cmd_list); + queue_work_on(cpu, target_submission_wq, &sq->work); +} + +/** + * target_submit - perform final initialization and submit cmd to LIO core + * @se_cmd: command descriptor to submit + * + * target_submit_prep or something similar must have been called on the cmd, + * and this must be called from process context. + */ +int target_submit(struct se_cmd *se_cmd) +{ + const struct target_core_fabric_ops *tfo = se_cmd->se_sess->se_tpg->se_tpg_tfo; + struct se_dev_attrib *da = &se_cmd->se_dev->dev_attrib; + u8 submit_type; + + if (da->submit_type == TARGET_FABRIC_DEFAULT_SUBMIT) + submit_type = tfo->default_submit_type; + else if (da->submit_type == TARGET_DIRECT_SUBMIT && + tfo->direct_submit_supp) + submit_type = TARGET_DIRECT_SUBMIT; + else + submit_type = TARGET_QUEUE_SUBMIT; + + if (submit_type == TARGET_DIRECT_SUBMIT) + return __target_submit(se_cmd); + + target_queue_submission(se_cmd); + return 0; +} +EXPORT_SYMBOL_GPL(target_submit); + static void target_complete_tmr_failure(struct work_struct *work) { struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); @@ -1398,6 +1960,7 @@ static void target_complete_tmr_failure(struct work_struct *work) se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; se_cmd->se_tfo->queue_tm_rsp(se_cmd); + transport_lun_remove_cmd(se_cmd); transport_cmd_check_stop_to_fabric(se_cmd); } @@ -1409,7 +1972,7 @@ static void target_complete_tmr_failure(struct work_struct *work) * @se_sess: associated se_sess for endpoint * @sense: pointer to SCSI sense buffer * @unpacked_lun: unpacked LUN to reference for struct se_lun - * @fabric_context: fabric context for TMR req + * @fabric_tmr_ptr: fabric context for TMR req * @tm_type: Type of TM request * @gfp: gfp type for caller * @tag: referenced task tag for TMR_ABORT_TASK @@ -1419,9 +1982,9 @@ static void target_complete_tmr_failure(struct work_struct *work) **/ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, - unsigned char *sense, u32 unpacked_lun, + unsigned char *sense, u64 unpacked_lun, void *fabric_tmr_ptr, unsigned char tm_type, - gfp_t gfp, unsigned int tag, int flags) + gfp_t gfp, u64 tag, int flags) { struct se_portal_group *se_tpg; int ret; @@ -1429,8 +1992,9 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, se_tpg = se_sess->se_tpg; BUG_ON(!se_tpg); - transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, - 0, DMA_NONE, MSG_SIMPLE_TAG, sense); + __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, + 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun, + se_sess->cmd_cnt); /* * FIXME: Currently expect caller to handle se_cmd->se_tmr_req * allocation failure. @@ -1443,51 +2007,29 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, se_cmd->se_tmr_req->ref_task_tag = tag; /* See target_submit_cmd for commentary */ - ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF)); + ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); if (ret) { core_tmr_release_req(se_cmd->se_tmr_req); return ret; } - ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); - if (ret) { - /* - * For callback during failure handling, push this work off - * to process context with TMR_LUN_DOES_NOT_EXIST status. - */ - INIT_WORK(&se_cmd->work, target_complete_tmr_failure); - schedule_work(&se_cmd->work); - return 0; - } + ret = transport_lookup_tmr_lun(se_cmd); + if (ret) + goto failure; + transport_generic_handle_tmr(se_cmd); return 0; -} -EXPORT_SYMBOL(target_submit_tmr); - -/* - * If the cmd is active, request it to be stopped and sleep until it - * has completed. - */ -bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) -{ - bool was_active = false; - - if (cmd->transport_state & CMD_T_BUSY) { - cmd->transport_state |= CMD_T_REQUEST_STOP; - spin_unlock_irqrestore(&cmd->t_state_lock, *flags); - - pr_debug("cmd %p waiting to complete\n", cmd); - wait_for_completion(&cmd->task_stop_comp); - pr_debug("cmd %p stopped successfully\n", cmd); - spin_lock_irqsave(&cmd->t_state_lock, *flags); - cmd->transport_state &= ~CMD_T_REQUEST_STOP; - cmd->transport_state &= ~CMD_T_BUSY; - was_active = true; - } - - return was_active; + /* + * For callback during failure handling, push this work off + * to process context with TMR_LUN_DOES_NOT_EXIST status. + */ +failure: + INIT_WORK(&se_cmd->work, target_complete_tmr_failure); + schedule_work(&se_cmd->work); + return 0; } +EXPORT_SYMBOL(target_submit_tmr); /* * Handle SAM-esque emulation for generic transport request failures. @@ -1495,24 +2037,26 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) void transport_generic_request_failure(struct se_cmd *cmd, sense_reason_t sense_reason) { - int ret = 0; + int ret = 0, post_ret; - pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" - " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), - cmd->t_task_cdb[0]); - pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", - cmd->se_tfo->get_cmd_state(cmd), - cmd->t_state, sense_reason); - pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", - (cmd->transport_state & CMD_T_ACTIVE) != 0, - (cmd->transport_state & CMD_T_STOP) != 0, - (cmd->transport_state & CMD_T_SENT) != 0); + pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", + sense_reason); + target_show_cmd("-----[ ", cmd); /* * For SAM Task Attribute emulation for failed struct se_cmd */ transport_complete_task_attr(cmd); + if (cmd->transport_complete_callback) + cmd->transport_complete_callback(cmd, false, &post_ret); + + if (cmd->transport_state & CMD_T_ABORTED) { + INIT_WORK(&cmd->work, target_abort_work); + queue_work(target_completion_wq, &cmd->work); + return; + } + switch (sense_reason) { case TCM_NON_EXISTENT_LUN: case TCM_UNSUPPORTED_SCSI_OPCODE: @@ -1525,11 +2069,26 @@ void transport_generic_request_failure(struct se_cmd *cmd, case TCM_ADDRESS_OUT_OF_RANGE: case TCM_CHECK_CONDITION_ABORT_CMD: case TCM_CHECK_CONDITION_UNIT_ATTENTION: - case TCM_CHECK_CONDITION_NOT_READY: + case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: + case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: + case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: + case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: + case TCM_TOO_MANY_TARGET_DESCS: + case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: + case TCM_TOO_MANY_SEGMENT_DESCS: + case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: + case TCM_INVALID_FIELD_IN_COMMAND_IU: + case TCM_ALUA_TG_PT_STANDBY: + case TCM_ALUA_TG_PT_UNAVAILABLE: + case TCM_ALUA_STATE_TRANSITION: + case TCM_ALUA_OFFLINE: break; case TCM_OUT_OF_RESOURCES: - sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - break; + cmd->scsi_status = SAM_STAT_TASK_SET_FULL; + goto queue_status; + case TCM_LUN_BUSY: + cmd->scsi_status = SAM_STAT_BUSY; + goto queue_status; case TCM_RESERVATION_CONFLICT: /* * No SENSE Data payload for this case, set SCSI Status @@ -1546,16 +2105,14 @@ void transport_generic_request_failure(struct se_cmd *cmd, * See spc4r17, section 7.4.6 Control Mode Page, Table 349 */ if (cmd->se_sess && - cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) - core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, - cmd->orig_fe_lun, 0x2C, - ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); + cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl + == TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) { + target_ua_allocate_lun(cmd->se_sess->se_node_acl, + cmd->orig_fe_lun, 0x2C, + ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); + } - trace_target_cmd_complete(cmd); - ret = cmd->se_tfo-> queue_status(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) - goto queue_full; - goto check_stop; + goto queue_status; default: pr_err("Unknown transport error for CDB 0x%02x: %d\n", cmd->t_task_cdb[0], sense_reason); @@ -1564,142 +2121,185 @@ void transport_generic_request_failure(struct se_cmd *cmd, } ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; check_stop: transport_lun_remove_cmd(cmd); - if (!transport_cmd_check_stop_to_fabric(cmd)) - ; + transport_cmd_check_stop_to_fabric(cmd); return; +queue_status: + trace_target_cmd_complete(cmd); + ret = cmd->se_tfo->queue_status(cmd); + if (!ret) + goto check_stop; queue_full: - cmd->t_state = TRANSPORT_COMPLETE_QF_OK; - transport_handle_queue_full(cmd, cmd->se_dev); + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); } EXPORT_SYMBOL(transport_generic_request_failure); -static void __target_execute_cmd(struct se_cmd *cmd) +void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) { sense_reason_t ret; - if (cmd->execute_cmd) { - ret = cmd->execute_cmd(cmd); + if (!cmd->execute_cmd) { + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto err; + } + if (do_checks) { + /* + * Check for an existing UNIT ATTENTION condition after + * target_handle_task_attr() has done SAM task attr + * checking, and possibly have already defered execution + * out to target_restart_delayed_cmds() context. + */ + ret = target_scsi3_ua_check(cmd); + if (ret) + goto err; + + ret = target_alua_state_check(cmd); + if (ret) + goto err; + + ret = target_check_reservation(cmd); if (ret) { + cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; + goto err; + } + } + + ret = cmd->execute_cmd(cmd); + if (!ret) + return; +err: + spin_lock_irq(&cmd->t_state_lock); + cmd->transport_state &= ~CMD_T_SENT; + spin_unlock_irq(&cmd->t_state_lock); + + transport_generic_request_failure(cmd, ret); +} + +static int target_write_prot_action(struct se_cmd *cmd) +{ + u32 sectors; + /* + * Perform WRITE_INSERT of PI using software emulation when backend + * device has PI enabled, if the transport has not already generated + * PI using hardware WRITE_INSERT offload. + */ + switch (cmd->prot_op) { + case TARGET_PROT_DOUT_INSERT: + if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) + sbc_dif_generate(cmd); + break; + case TARGET_PROT_DOUT_STRIP: + if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) + break; + + sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); + cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, + sectors, 0, cmd->t_prot_sg, 0); + if (unlikely(cmd->pi_err)) { spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); + cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); - - transport_generic_request_failure(cmd, ret); + transport_generic_request_failure(cmd, cmd->pi_err); + return -1; } + break; + default: + break; } + + return 0; } static bool target_handle_task_attr(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; + unsigned long flags; - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return false; + cmd->se_cmd_flags |= SCF_TASK_ATTR_SET; + /* * Check for the existence of HEAD_OF_QUEUE, and if true return 1 * to allow the passed struct se_cmd list of tasks to the front of the list. */ switch (cmd->sam_task_attr) { - case MSG_HEAD_TAG: - pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, " - "se_ordered_id: %u\n", - cmd->t_task_cdb[0], cmd->se_ordered_id); + case TCM_HEAD_TAG: + pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", + cmd->t_task_cdb[0]); return false; - case MSG_ORDERED_TAG: - atomic_inc(&dev->dev_ordered_sync); - smp_mb__after_atomic_inc(); - - pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " - " se_ordered_id: %u\n", - cmd->t_task_cdb[0], cmd->se_ordered_id); - - /* - * Execute an ORDERED command if no other older commands - * exist that need to be completed first. - */ - if (!atomic_read(&dev->simple_cmds)) - return false; + case TCM_ORDERED_TAG: + pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", + cmd->t_task_cdb[0]); break; default: /* * For SIMPLE and UNTAGGED Task Attribute commands */ - atomic_inc(&dev->simple_cmds); - smp_mb__after_atomic_inc(); +retry: + if (percpu_ref_tryget_live(&dev->non_ordered)) + return false; + break; } - if (atomic_read(&dev->dev_ordered_sync) == 0) - return false; + spin_lock_irqsave(&dev->delayed_cmd_lock, flags); + if (cmd->sam_task_attr == TCM_SIMPLE_TAG && + !percpu_ref_is_dying(&dev->non_ordered)) { + spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags); + /* We raced with the last ordered completion so retry. */ + goto retry; + } else if (!percpu_ref_is_dying(&dev->non_ordered)) { + percpu_ref_kill(&dev->non_ordered); + } + + spin_lock(&cmd->t_state_lock); + cmd->transport_state &= ~CMD_T_SENT; + spin_unlock(&cmd->t_state_lock); - spin_lock(&dev->delayed_cmd_lock); list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); - spin_unlock(&dev->delayed_cmd_lock); + spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags); - pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" - " delayed CMD list, se_ordered_id: %u\n", - cmd->t_task_cdb[0], cmd->sam_task_attr, - cmd->se_ordered_id); + pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", + cmd->t_task_cdb[0], cmd->sam_task_attr); + /* + * We may have no non ordered cmds when this function started or we + * could have raced with the last simple/head cmd completing, so kick + * the delayed handler here. + */ + schedule_work(&dev->delayed_cmd_work); return true; } void target_execute_cmd(struct se_cmd *cmd) { /* - * If the received CDB has aleady been aborted stop processing it here. - */ - if (transport_check_aborted_status(cmd, 1)) { - complete(&cmd->transport_lun_stop_comp); - return; - } - - /* - * Determine if IOCTL context caller in requesting the stopping of this - * command for LUN shutdown purposes. - */ - spin_lock_irq(&cmd->t_state_lock); - if (cmd->transport_state & CMD_T_LUN_STOP) { - pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n", - __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); - - cmd->transport_state &= ~CMD_T_ACTIVE; - spin_unlock_irq(&cmd->t_state_lock); - complete(&cmd->transport_lun_stop_comp); - return; - } - /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. + * + * If the received CDB has already been aborted stop processing it here. */ - if (cmd->transport_state & CMD_T_STOP) { - pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n", - __func__, __LINE__, - cmd->se_tfo->get_task_tag(cmd)); - - spin_unlock_irq(&cmd->t_state_lock); - complete(&cmd->t_transport_stop_comp); + if (target_cmd_interrupted(cmd)) return; - } + spin_lock_irq(&cmd->t_state_lock); cmd->t_state = TRANSPORT_PROCESSING; - cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; + cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); - if (target_handle_task_attr(cmd)) { - spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; - spin_unlock_irq(&cmd->t_state_lock); + if (target_write_prot_action(cmd)) + return; + + if (target_handle_task_attr(cmd)) return; - } - __target_execute_cmd(cmd); + __target_execute_cmd(cmd, true); } EXPORT_SYMBOL(target_execute_cmd); @@ -1707,27 +2307,59 @@ EXPORT_SYMBOL(target_execute_cmd); * Process all commands up to the last received ORDERED task attribute which * requires another blocking boundary */ -static void target_restart_delayed_cmds(struct se_device *dev) +void target_do_delayed_work(struct work_struct *work) { - for (;;) { + struct se_device *dev = container_of(work, struct se_device, + delayed_cmd_work); + + spin_lock(&dev->delayed_cmd_lock); + while (!dev->ordered_sync_in_progress) { struct se_cmd *cmd; - spin_lock(&dev->delayed_cmd_lock); - if (list_empty(&dev->delayed_cmd_list)) { - spin_unlock(&dev->delayed_cmd_lock); + /* + * We can be woken up early/late due to races or the + * extra wake up we do when adding commands to the list. + * We check for both cases here. + */ + if (list_empty(&dev->delayed_cmd_list) || + !percpu_ref_is_zero(&dev->non_ordered)) break; - } cmd = list_entry(dev->delayed_cmd_list.next, struct se_cmd, se_delayed_node); + cmd->se_cmd_flags |= SCF_TASK_ORDERED_SYNC; + cmd->transport_state |= CMD_T_SENT; + + dev->ordered_sync_in_progress = true; + list_del(&cmd->se_delayed_node); spin_unlock(&dev->delayed_cmd_lock); - __target_execute_cmd(cmd); - - if (cmd->sam_task_attr == MSG_ORDERED_TAG) - break; + __target_execute_cmd(cmd, true); + spin_lock(&dev->delayed_cmd_lock); } + spin_unlock(&dev->delayed_cmd_lock); +} + +static void transport_complete_ordered_sync(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + unsigned long flags; + + spin_lock_irqsave(&dev->delayed_cmd_lock, flags); + dev->dev_cur_ordered_id++; + + pr_debug("Incremented dev_cur_ordered_id: %u for type %d\n", + dev->dev_cur_ordered_id, cmd->sam_task_attr); + + dev->ordered_sync_in_progress = false; + + if (list_empty(&dev->delayed_cmd_list)) + percpu_ref_resurrect(&dev->non_ordered); + else + schedule_work(&dev->delayed_cmd_work); + + spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags); } /* @@ -1738,31 +2370,28 @@ static void transport_complete_task_attr(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return; - if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { - atomic_dec(&dev->simple_cmds); - smp_mb__after_atomic_dec(); - dev->dev_cur_ordered_id++; - pr_debug("Incremented dev->dev_cur_ordered_id: %u for" - " SIMPLE: %u\n", dev->dev_cur_ordered_id, - cmd->se_ordered_id); - } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { - dev->dev_cur_ordered_id++; - pr_debug("Incremented dev_cur_ordered_id: %u for" - " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, - cmd->se_ordered_id); - } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { - atomic_dec(&dev->dev_ordered_sync); - smp_mb__after_atomic_dec(); + if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) + return; + + cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; - dev->dev_cur_ordered_id++; - pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" - " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); + if (cmd->se_cmd_flags & SCF_TASK_ORDERED_SYNC) { + transport_complete_ordered_sync(cmd); + return; } - target_restart_delayed_cmds(dev); + switch (cmd->sam_task_attr) { + case TCM_SIMPLE_TAG: + percpu_ref_put(&dev->non_ordered); + break; + case TCM_ORDERED_TAG: + /* All ordered should have been executed as sync */ + WARN_ON(1); + break; + } } static void transport_complete_qf(struct se_cmd *cmd) @@ -1770,27 +2399,54 @@ static void transport_complete_qf(struct se_cmd *cmd) int ret = 0; transport_complete_task_attr(cmd); + /* + * If a fabric driver ->write_pending() or ->queue_data_in() callback + * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and + * the same callbacks should not be retried. Return CHECK_CONDITION + * if a scsi_status is not already set. + * + * If a fabric driver ->queue_status() has returned non zero, always + * keep retrying no matter what.. + */ + if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) { + if (cmd->scsi_status) + goto queue_status; - if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { - trace_target_cmd_complete(cmd); - ret = cmd->se_tfo->queue_status(cmd); - if (ret) - goto out; + translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); + goto queue_status; } + /* + * Check if we need to send a sense buffer from + * the struct se_cmd in question. We do NOT want + * to take this path of the IO has been marked as + * needing to be treated like a "normal read". This + * is the case if it's a tape read, and either the + * FM, EOM, or ILI bits are set, but there is no + * sense data. + */ + if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && + cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) + goto queue_status; + switch (cmd->data_direction) { case DMA_FROM_DEVICE: + /* queue status if not treating this as a normal read */ + if (cmd->scsi_status && + !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) + goto queue_status; + trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_data_in(cmd); break; case DMA_TO_DEVICE: - if (cmd->t_bidi_data_sg) { + if (cmd->se_cmd_flags & SCF_BIDI) { ret = cmd->se_tfo->queue_data_in(cmd); - if (ret < 0) - break; + break; } - /* Fall through for DMA_TO_DEVICE */ + fallthrough; case DMA_NONE: +queue_status: trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_status(cmd); break; @@ -1798,28 +2454,69 @@ static void transport_complete_qf(struct se_cmd *cmd) break; } -out: if (ret < 0) { - transport_handle_queue_full(cmd, cmd->se_dev); + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); return; } transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); } -static void transport_handle_queue_full( - struct se_cmd *cmd, - struct se_device *dev) +static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev, + int err, bool write_pending) { + /* + * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or + * ->queue_data_in() callbacks from new process context. + * + * Otherwise for other errors, transport_complete_qf() will send + * CHECK_CONDITION via ->queue_status() instead of attempting to + * retry associated fabric driver data-transfer callbacks. + */ + if (err == -EAGAIN || err == -ENOMEM) { + cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP : + TRANSPORT_COMPLETE_QF_OK; + } else { + pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err); + cmd->t_state = TRANSPORT_COMPLETE_QF_ERR; + } + spin_lock_irq(&dev->qf_cmd_lock); list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); - atomic_inc(&dev->dev_qf_count); - smp_mb__after_atomic_inc(); + atomic_inc_mb(&dev->dev_qf_count); spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); schedule_work(&cmd->se_dev->qf_work_queue); } +static bool target_read_prot_action(struct se_cmd *cmd) +{ + switch (cmd->prot_op) { + case TARGET_PROT_DIN_STRIP: + if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { + u32 sectors = cmd->data_length >> + ilog2(cmd->se_dev->dev_attrib.block_size); + + cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, + sectors, 0, cmd->t_prot_sg, + 0); + if (cmd->pi_err) + return true; + } + break; + case TARGET_PROT_DIN_INSERT: + if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) + break; + + sbc_dif_generate(cmd); + break; + default: + break; + } + + return false; +} + static void target_complete_ok_work(struct work_struct *work) { struct se_cmd *cmd = container_of(work, struct se_cmd, work); @@ -1841,13 +2538,19 @@ static void target_complete_ok_work(struct work_struct *work) /* * Check if we need to send a sense buffer from - * the struct se_cmd in question. + * the struct se_cmd in question. We do NOT want + * to take this path of the IO has been marked as + * needing to be treated like a "normal read". This + * is the case if it's a tape read, and either the + * FM, EOM, or ILI bits are set, but there is no + * sense data. */ - if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { + if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && + cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { WARN_ON(!cmd->scsi_status); ret = transport_send_check_condition_and_sense( cmd, 0, 1); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; transport_lun_remove_cmd(cmd); @@ -1856,52 +2559,95 @@ static void target_complete_ok_work(struct work_struct *work) } /* * Check for a callback, used by amongst other things - * XDWRITE_READ_10 emulation. + * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. */ - if (cmd->transport_complete_callback) - cmd->transport_complete_callback(cmd); + if (cmd->transport_complete_callback) { + sense_reason_t rc; + bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); + bool zero_dl = !(cmd->data_length); + int post_ret = 0; + + rc = cmd->transport_complete_callback(cmd, true, &post_ret); + if (!rc && !post_ret) { + if (caw && zero_dl) + goto queue_rsp; + + return; + } else if (rc) { + ret = transport_send_check_condition_and_sense(cmd, + rc, 0); + if (ret) + goto queue_full; + + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); + return; + } + } +queue_rsp: switch (cmd->data_direction) { case DMA_FROM_DEVICE: - spin_lock(&cmd->se_lun->lun_sep_lock); - if (cmd->se_lun->lun_sep) { - cmd->se_lun->lun_sep->sep_stats.tx_data_octets += - cmd->data_length; + /* + * if this is a READ-type IO, but SCSI status + * is set, then skip returning data and just + * return the status -- unless this IO is marked + * as needing to be treated as a normal read, + * in which case we want to go ahead and return + * the data. This happens, for example, for tape + * reads with the FM, EOM, or ILI bits set, with + * no sense data. + */ + if (cmd->scsi_status && + !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) + goto queue_status; + + if (cmd->se_lun->lun_stats) + this_cpu_add(cmd->se_lun->lun_stats->tx_data_octets, + cmd->data_length); + /* + * Perform READ_STRIP of PI using software emulation when + * backend had PI enabled, if the transport will not be + * performing hardware READ_STRIP offload. + */ + if (target_read_prot_action(cmd)) { + ret = transport_send_check_condition_and_sense(cmd, + cmd->pi_err, 0); + if (ret) + goto queue_full; + + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); + return; } - spin_unlock(&cmd->se_lun->lun_sep_lock); trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_data_in(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; break; case DMA_TO_DEVICE: - spin_lock(&cmd->se_lun->lun_sep_lock); - if (cmd->se_lun->lun_sep) { - cmd->se_lun->lun_sep->sep_stats.rx_data_octets += - cmd->data_length; - } - spin_unlock(&cmd->se_lun->lun_sep_lock); + if (cmd->se_lun->lun_stats) + this_cpu_add(cmd->se_lun->lun_stats->rx_data_octets, + cmd->data_length); /* * Check if we need to send READ payload for BIDI-COMMAND */ - if (cmd->t_bidi_data_sg) { - spin_lock(&cmd->se_lun->lun_sep_lock); - if (cmd->se_lun->lun_sep) { - cmd->se_lun->lun_sep->sep_stats.tx_data_octets += - cmd->data_length; - } - spin_unlock(&cmd->se_lun->lun_sep_lock); + if (cmd->se_cmd_flags & SCF_BIDI) { + if (cmd->se_lun->lun_stats) + this_cpu_add(cmd->se_lun->lun_stats->tx_data_octets, + cmd->data_length); ret = cmd->se_tfo->queue_data_in(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; break; } - /* Fall through for DMA_TO_DEVICE */ + fallthrough; case DMA_NONE: +queue_status: trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_status(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; break; default: @@ -1915,69 +2661,65 @@ static void target_complete_ok_work(struct work_struct *work) queue_full: pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," " data_direction: %d\n", cmd, cmd->data_direction); - cmd->t_state = TRANSPORT_COMPLETE_QF_OK; - transport_handle_queue_full(cmd, cmd->se_dev); + + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); } -static inline void transport_free_sgl(struct scatterlist *sgl, int nents) +void target_free_sgl(struct scatterlist *sgl, int nents) { - struct scatterlist *sg; - int count; + sgl_free_n_order(sgl, nents, 0); +} +EXPORT_SYMBOL(target_free_sgl); - for_each_sg(sgl, sg, nents, count) - __free_page(sg_page(sg)); +static inline void transport_reset_sgl_orig(struct se_cmd *cmd) +{ + /* + * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE + * emulation, and free + reset pointers if necessary.. + */ + if (!cmd->t_data_sg_orig) + return; - kfree(sgl); + kfree(cmd->t_data_sg); + cmd->t_data_sg = cmd->t_data_sg_orig; + cmd->t_data_sg_orig = NULL; + cmd->t_data_nents = cmd->t_data_nents_orig; + cmd->t_data_nents_orig = 0; } static inline void transport_free_pages(struct se_cmd *cmd) { - if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) + if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { + target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); + cmd->t_prot_sg = NULL; + cmd->t_prot_nents = 0; + } + + if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { + /* + * Release special case READ buffer payload required for + * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE + */ + if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { + target_free_sgl(cmd->t_bidi_data_sg, + cmd->t_bidi_data_nents); + cmd->t_bidi_data_sg = NULL; + cmd->t_bidi_data_nents = 0; + } + transport_reset_sgl_orig(cmd); return; + } + transport_reset_sgl_orig(cmd); - transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); + target_free_sgl(cmd->t_data_sg, cmd->t_data_nents); cmd->t_data_sg = NULL; cmd->t_data_nents = 0; - transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); + target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); cmd->t_bidi_data_sg = NULL; cmd->t_bidi_data_nents = 0; } -/** - * transport_release_cmd - free a command - * @cmd: command to free - * - * This routine unconditionally frees a command, and reference counting - * or list removal must be done in the caller. - */ -static int transport_release_cmd(struct se_cmd *cmd) -{ - BUG_ON(!cmd->se_tfo); - - if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) - core_tmr_release_req(cmd->se_tmr_req); - if (cmd->t_task_cdb != cmd->__t_task_cdb) - kfree(cmd->t_task_cdb); - /* - * If this cmd has been setup with target_get_sess_cmd(), drop - * the kref and call ->release_cmd() in kref callback. - */ - return target_put_sess_cmd(cmd->se_sess, cmd); -} - -/** - * transport_put_cmd - release a reference to a command - * @cmd: command to release - * - * This routine releases our reference to the command and frees it if possible. - */ -static int transport_put_cmd(struct se_cmd *cmd) -{ - transport_free_pages(cmd); - return transport_release_cmd(cmd); -} - void *transport_kmap_data_sg(struct se_cmd *cmd) { struct scatterlist *sg = cmd->t_data_sg; @@ -1997,7 +2739,7 @@ void *transport_kmap_data_sg(struct se_cmd *cmd) return kmap(sg_page(sg)) + sg->offset; /* >1 page. use vmap */ - pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); + pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL); if (!pages) return NULL; @@ -2029,46 +2771,16 @@ void transport_kunmap_data_sg(struct se_cmd *cmd) } EXPORT_SYMBOL(transport_kunmap_data_sg); -static int -transport_generic_get_mem(struct se_cmd *cmd) +int +target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, + bool zero_page, bool chainable) { - u32 length = cmd->data_length; - unsigned int nents; - struct page *page; - gfp_t zero_flag; - int i = 0; - - nents = DIV_ROUND_UP(length, PAGE_SIZE); - cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); - if (!cmd->t_data_sg) - return -ENOMEM; - - cmd->t_data_nents = nents; - sg_init_table(cmd->t_data_sg, nents); - - zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_CDB ? 0 : __GFP_ZERO; - - while (length) { - u32 page_len = min_t(u32, length, PAGE_SIZE); - page = alloc_page(GFP_KERNEL | zero_flag); - if (!page) - goto out; - - sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); - length -= page_len; - i++; - } - return 0; + gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0); -out: - while (i > 0) { - i--; - __free_page(sg_page(&cmd->t_data_sg[i])); - } - kfree(cmd->t_data_sg); - cmd->t_data_sg = NULL; - return -ENOMEM; + *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents); + return *sgl ? 0 : -ENOMEM; } +EXPORT_SYMBOL(target_alloc_sgl); /* * Allocate any required resources to execute the command. For writes we @@ -2078,16 +2790,59 @@ out: sense_reason_t transport_generic_new_cmd(struct se_cmd *cmd) { + unsigned long flags; int ret = 0; + bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); + + if (cmd->prot_op != TARGET_PROT_NORMAL && + !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { + ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, + cmd->prot_length, true, false); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } /* - * Determine is the TCM fabric module has already allocated physical + * Determine if the TCM fabric module has already allocated physical * memory, and is directly calling transport_generic_map_mem_to_cmd() * beforehand. */ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && cmd->data_length) { - ret = transport_generic_get_mem(cmd); + + if ((cmd->se_cmd_flags & SCF_BIDI) || + (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { + u32 bidi_length; + + if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) + bidi_length = cmd->t_task_nolb * + cmd->se_dev->dev_attrib.block_size; + else + bidi_length = cmd->data_length; + + ret = target_alloc_sgl(&cmd->t_bidi_data_sg, + &cmd->t_bidi_data_nents, + bidi_length, zero_flag, false); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, + cmd->data_length, zero_flag, false); + if (ret < 0) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && + cmd->data_length) { + /* + * Special case for COMPARE_AND_WRITE with fabrics + * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. + */ + u32 caw_length = cmd->t_task_nolb * + cmd->se_dev->dev_attrib.block_size; + + ret = target_alloc_sgl(&cmd->t_bidi_data_sg, + &cmd->t_bidi_data_nents, + caw_length, zero_flag, false); if (ret < 0) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } @@ -2097,723 +2852,733 @@ transport_generic_new_cmd(struct se_cmd *cmd) * and let it call back once the write buffers are ready. */ target_add_to_state_list(cmd); - if (cmd->data_direction != DMA_TO_DEVICE) { + if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { target_execute_cmd(cmd); return 0; } - transport_cmd_check_stop(cmd, false, true); + + spin_lock_irqsave(&cmd->t_state_lock, flags); + cmd->t_state = TRANSPORT_WRITE_PENDING; + /* + * Determine if frontend context caller is requesting the stopping of + * this command for frontend exceptions. + */ + if (cmd->transport_state & CMD_T_STOP && + !cmd->se_tfo->write_pending_must_be_called) { + pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", + __func__, __LINE__, cmd->tag); + + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + complete_all(&cmd->t_transport_stop_comp); + return 0; + } + cmd->transport_state &= ~CMD_T_ACTIVE; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); ret = cmd->se_tfo->write_pending(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; - /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ - WARN_ON(ret); - - return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return 0; queue_full: pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); - cmd->t_state = TRANSPORT_COMPLETE_QF_WP; - transport_handle_queue_full(cmd, cmd->se_dev); + transport_handle_queue_full(cmd, cmd->se_dev, ret, true); return 0; } EXPORT_SYMBOL(transport_generic_new_cmd); static void transport_write_pending_qf(struct se_cmd *cmd) { + unsigned long flags; int ret; + bool stop; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED)); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + if (stop) { + pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n", + __func__, __LINE__, cmd->tag); + complete_all(&cmd->t_transport_stop_comp); + return; + } ret = cmd->se_tfo->write_pending(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) { + if (ret) { pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); - transport_handle_queue_full(cmd, cmd->se_dev); + transport_handle_queue_full(cmd, cmd->se_dev, ret, true); } } +static bool +__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, + unsigned long *flags); + +static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) +{ + unsigned long flags; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); +} + +/* + * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has + * finished. + */ +void target_put_cmd_and_wait(struct se_cmd *cmd) +{ + DECLARE_COMPLETION_ONSTACK(compl); + + WARN_ON_ONCE(cmd->abrt_compl); + cmd->abrt_compl = &compl; + target_put_sess_cmd(cmd); + wait_for_completion(&compl); +} + +/* + * This function is called by frontend drivers after processing of a command + * has finished. + * + * The protocol for ensuring that either the regular frontend command + * processing flow or target_handle_abort() code drops one reference is as + * follows: + * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause + * the frontend driver to call this function synchronously or asynchronously. + * That will cause one reference to be dropped. + * - During regular command processing the target core sets CMD_T_COMPLETE + * before invoking one of the .queue_*() functions. + * - The code that aborts commands skips commands and TMFs for which + * CMD_T_COMPLETE has been set. + * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for + * commands that will be aborted. + * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set + * transport_generic_free_cmd() skips its call to target_put_sess_cmd(). + * - For aborted commands for which CMD_T_TAS has been set .queue_status() will + * be called and will drop a reference. + * - For aborted commands for which CMD_T_TAS has not been set .aborted_task() + * will be called. target_handle_abort() will drop the final reference. + */ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) { + DECLARE_COMPLETION_ONSTACK(compl); int ret = 0; + bool aborted = false, tas = false; - if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { - if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) - transport_wait_for_tasks(cmd); + if (wait_for_tasks) + target_wait_free_cmd(cmd, &aborted, &tas); - ret = transport_release_cmd(cmd); - } else { - if (wait_for_tasks) - transport_wait_for_tasks(cmd); + if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) { + /* + * Handle WRITE failure case where transport_generic_new_cmd() + * has already added se_cmd to state_list, but fabric has + * failed command before I/O submission. + */ + if (cmd->state_active) + target_remove_from_state_list(cmd); if (cmd->se_lun) transport_lun_remove_cmd(cmd); - - ret = transport_put_cmd(cmd); + } + if (aborted) + cmd->free_compl = &compl; + ret = target_put_sess_cmd(cmd); + if (aborted) { + pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); + wait_for_completion(&compl); + ret = 1; } return ret; } EXPORT_SYMBOL(transport_generic_free_cmd); -/* target_get_sess_cmd - Add command to active ->sess_cmd_list - * @se_sess: session to reference +/** + * target_get_sess_cmd - Verify the session is accepting cmds and take ref * @se_cmd: command descriptor to add * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() */ -int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, - bool ack_kref) +int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) { - unsigned long flags; int ret = 0; - kref_init(&se_cmd->cmd_kref); /* * Add a second kref if the fabric caller is expecting to handle * fabric acknowledgement that requires two target_put_sess_cmd() * invocations before se_cmd descriptor release. */ - if (ack_kref == true) { + if (ack_kref) { kref_get(&se_cmd->cmd_kref); se_cmd->se_cmd_flags |= SCF_ACK_KREF; } - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - if (se_sess->sess_tearing_down) { - ret = -ESHUTDOWN; - goto out; + /* + * Users like xcopy do not use counters since they never do a stop + * and wait. + */ + if (se_cmd->cmd_cnt) { + if (!percpu_ref_tryget_live(&se_cmd->cmd_cnt->refcnt)) + ret = -ESHUTDOWN; } - list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list); -out: - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + if (ret && ack_kref) + target_put_sess_cmd(se_cmd); + return ret; } EXPORT_SYMBOL(target_get_sess_cmd); +static void target_free_cmd_mem(struct se_cmd *cmd) +{ + transport_free_pages(cmd); + + if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) + core_tmr_release_req(cmd->se_tmr_req); + if (cmd->t_task_cdb != cmd->__t_task_cdb) + kfree(cmd->t_task_cdb); +} + static void target_release_cmd_kref(struct kref *kref) { struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); - struct se_session *se_sess = se_cmd->se_sess; - - if (list_empty(&se_cmd->se_cmd_list)) { - spin_unlock(&se_sess->sess_cmd_lock); - se_cmd->se_tfo->release_cmd(se_cmd); - return; - } - if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { - spin_unlock(&se_sess->sess_cmd_lock); - complete(&se_cmd->cmd_wait_comp); - return; - } - list_del(&se_cmd->se_cmd_list); - spin_unlock(&se_sess->sess_cmd_lock); + struct target_cmd_counter *cmd_cnt = se_cmd->cmd_cnt; + struct completion *free_compl = se_cmd->free_compl; + struct completion *abrt_compl = se_cmd->abrt_compl; + target_free_cmd_mem(se_cmd); se_cmd->se_tfo->release_cmd(se_cmd); + if (free_compl) + complete(free_compl); + if (abrt_compl) + complete(abrt_compl); + + if (cmd_cnt) + percpu_ref_put(&cmd_cnt->refcnt); } -/* target_put_sess_cmd - Check for active I/O shutdown via kref_put - * @se_sess: session to reference - * @se_cmd: command descriptor to drop +/** + * target_put_sess_cmd - decrease the command reference count + * @se_cmd: command to drop a reference from + * + * Returns 1 if and only if this target_put_sess_cmd() call caused the + * refcount to drop to zero. Returns zero otherwise. */ -int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) +int target_put_sess_cmd(struct se_cmd *se_cmd) { - return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, - &se_sess->sess_cmd_lock); + return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); } EXPORT_SYMBOL(target_put_sess_cmd); -/* target_sess_cmd_list_set_waiting - Flag all commands in - * sess_cmd_list to complete cmd_wait_comp. Set - * sess_tearing_down so no more commands are queued. - * @se_sess: session to flag - */ -void target_sess_cmd_list_set_waiting(struct se_session *se_sess) +static const char *data_dir_name(enum dma_data_direction d) { - struct se_cmd *se_cmd; - unsigned long flags; - - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - if (se_sess->sess_tearing_down) { - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - return; + switch (d) { + case DMA_BIDIRECTIONAL: return "BIDI"; + case DMA_TO_DEVICE: return "WRITE"; + case DMA_FROM_DEVICE: return "READ"; + case DMA_NONE: return "NONE"; } - se_sess->sess_tearing_down = 1; - list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); - - list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) - se_cmd->cmd_wait_set = 1; - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + return "(?)"; } -EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); -/* target_wait_for_sess_cmds - Wait for outstanding descriptors - * @se_sess: session to wait for active I/O - */ -void target_wait_for_sess_cmds(struct se_session *se_sess) +static const char *cmd_state_name(enum transport_state_table t) { - struct se_cmd *se_cmd, *tmp_cmd; - unsigned long flags; - - list_for_each_entry_safe(se_cmd, tmp_cmd, - &se_sess->sess_wait_list, se_cmd_list) { - list_del(&se_cmd->se_cmd_list); - - pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" - " %d\n", se_cmd, se_cmd->t_state, - se_cmd->se_tfo->get_cmd_state(se_cmd)); - - wait_for_completion(&se_cmd->cmd_wait_comp); - pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" - " fabric state: %d\n", se_cmd, se_cmd->t_state, - se_cmd->se_tfo->get_cmd_state(se_cmd)); - - se_cmd->se_tfo->release_cmd(se_cmd); + switch (t) { + case TRANSPORT_NO_STATE: return "NO_STATE"; + case TRANSPORT_NEW_CMD: return "NEW_CMD"; + case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING"; + case TRANSPORT_PROCESSING: return "PROCESSING"; + case TRANSPORT_COMPLETE: return "COMPLETE"; + case TRANSPORT_ISTATE_PROCESSING: + return "ISTATE_PROCESSING"; + case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP"; + case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK"; + case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR"; } - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - WARN_ON(!list_empty(&se_sess->sess_cmd_list)); - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - + return "(?)"; } -EXPORT_SYMBOL(target_wait_for_sess_cmds); -/* transport_lun_wait_for_tasks(): - * - * Called from ConfigFS context to stop the passed struct se_cmd to allow - * an struct se_lun to be successfully shutdown. - */ -static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) +static void target_append_str(char **str, const char *txt) { - unsigned long flags; - int ret = 0; - - /* - * If the frontend has already requested this struct se_cmd to - * be stopped, we can safely ignore this struct se_cmd. - */ - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (cmd->transport_state & CMD_T_STOP) { - cmd->transport_state &= ~CMD_T_LUN_STOP; - - pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n", - cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - transport_cmd_check_stop(cmd, false, false); - return -EPERM; - } - cmd->transport_state |= CMD_T_LUN_FE_STOP; - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + char *prev = *str; - // XXX: audit task_flags checks. - spin_lock_irqsave(&cmd->t_state_lock, flags); - if ((cmd->transport_state & CMD_T_BUSY) && - (cmd->transport_state & CMD_T_SENT)) { - if (!target_stop_cmd(cmd, &flags)) - ret++; - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) : + kstrdup(txt, GFP_ATOMIC); + kfree(prev); +} - pr_debug("ConfigFS: cmd: %p stop tasks ret:" - " %d\n", cmd, ret); - if (!ret) { - pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", - cmd->se_tfo->get_task_tag(cmd)); - wait_for_completion(&cmd->transport_lun_stop_comp); - pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", - cmd->se_tfo->get_task_tag(cmd)); +/* + * Convert a transport state bitmask into a string. The caller is + * responsible for freeing the returned pointer. + */ +static char *target_ts_to_str(u32 ts) +{ + char *str = NULL; + + if (ts & CMD_T_ABORTED) + target_append_str(&str, "aborted"); + if (ts & CMD_T_ACTIVE) + target_append_str(&str, "active"); + if (ts & CMD_T_COMPLETE) + target_append_str(&str, "complete"); + if (ts & CMD_T_SENT) + target_append_str(&str, "sent"); + if (ts & CMD_T_STOP) + target_append_str(&str, "stop"); + if (ts & CMD_T_FABRIC_STOP) + target_append_str(&str, "fabric_stop"); + + return str; +} + +static const char *target_tmf_name(enum tcm_tmreq_table tmf) +{ + switch (tmf) { + case TMR_ABORT_TASK: return "ABORT_TASK"; + case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET"; + case TMR_CLEAR_ACA: return "CLEAR_ACA"; + case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET"; + case TMR_LUN_RESET: return "LUN_RESET"; + case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET"; + case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET"; + case TMR_LUN_RESET_PRO: return "LUN_RESET_PRO"; + case TMR_UNKNOWN: break; + } + return "(?)"; +} + +void target_show_cmd(const char *pfx, struct se_cmd *cmd) +{ + char *ts_str = target_ts_to_str(cmd->transport_state); + const u8 *cdb = cmd->t_task_cdb; + struct se_tmr_req *tmf = cmd->se_tmr_req; + + if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { + pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n", + pfx, cdb[0], cdb[1], cmd->tag, + data_dir_name(cmd->data_direction), + cmd->se_tfo->get_cmd_state(cmd), + cmd_state_name(cmd->t_state), cmd->data_length, + kref_read(&cmd->cmd_kref), ts_str); + } else { + pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n", + pfx, target_tmf_name(tmf->function), cmd->tag, + tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd), + cmd_state_name(cmd->t_state), + kref_read(&cmd->cmd_kref), ts_str); } + kfree(ts_str); +} +EXPORT_SYMBOL(target_show_cmd); - return 0; +static void target_stop_cmd_counter_confirm(struct percpu_ref *ref) +{ + struct target_cmd_counter *cmd_cnt = container_of(ref, + struct target_cmd_counter, + refcnt); + complete_all(&cmd_cnt->stop_done); } -static void __transport_clear_lun_from_sessions(struct se_lun *lun) +/** + * target_stop_cmd_counter - Stop new IO from being added to the counter. + * @cmd_cnt: counter to stop + */ +void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt) { - struct se_cmd *cmd = NULL; - unsigned long lun_flags, cmd_flags; - /* - * Do exception processing and return CHECK_CONDITION status to the - * Initiator Port. - */ - spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - while (!list_empty(&lun->lun_cmd_list)) { - cmd = list_first_entry(&lun->lun_cmd_list, - struct se_cmd, se_lun_node); - list_del_init(&cmd->se_lun_node); - - spin_lock(&cmd->t_state_lock); - pr_debug("SE_LUN[%d] - Setting cmd->transport" - "_lun_stop for ITT: 0x%08x\n", - cmd->se_lun->unpacked_lun, - cmd->se_tfo->get_task_tag(cmd)); - cmd->transport_state |= CMD_T_LUN_STOP; - spin_unlock(&cmd->t_state_lock); - - spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); - - if (!cmd->se_lun) { - pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", - cmd->se_tfo->get_task_tag(cmd), - cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); - BUG(); - } - /* - * If the Storage engine still owns the iscsi_cmd_t, determine - * and/or stop its context. - */ - pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" - "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, - cmd->se_tfo->get_task_tag(cmd)); + pr_debug("Stopping command counter.\n"); + if (!atomic_cmpxchg(&cmd_cnt->stopped, 0, 1)) + percpu_ref_kill_and_confirm(&cmd_cnt->refcnt, + target_stop_cmd_counter_confirm); +} +EXPORT_SYMBOL_GPL(target_stop_cmd_counter); - if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { - spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - continue; - } +/** + * target_stop_session - Stop new IO from being queued on the session. + * @se_sess: session to stop + */ +void target_stop_session(struct se_session *se_sess) +{ + target_stop_cmd_counter(se_sess->cmd_cnt); +} +EXPORT_SYMBOL(target_stop_session); - pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" - "_wait_for_tasks(): SUCCESS\n", - cmd->se_lun->unpacked_lun, - cmd->se_tfo->get_task_tag(cmd)); +/** + * target_wait_for_cmds - Wait for outstanding cmds. + * @cmd_cnt: counter to wait for active I/O for. + */ +void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt) +{ + int ret; - spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); - if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) { - spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); - goto check_cond; - } - cmd->transport_state &= ~CMD_T_DEV_ACTIVE; - target_remove_from_state_list(cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); + WARN_ON_ONCE(!atomic_read(&cmd_cnt->stopped)); - /* - * The Storage engine stopped this struct se_cmd before it was - * send to the fabric frontend for delivery back to the - * Initiator Node. Return this SCSI CDB back with an - * CHECK_CONDITION status. - */ -check_cond: - transport_send_check_condition_and_sense(cmd, - TCM_NON_EXISTENT_LUN, 0); - /* - * If the fabric frontend is waiting for this iscsi_cmd_t to - * be released, notify the waiting thread now that LU has - * finished accessing it. - */ - spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); - if (cmd->transport_state & CMD_T_LUN_FE_STOP) { - pr_debug("SE_LUN[%d] - Detected FE stop for" - " struct se_cmd: %p ITT: 0x%08x\n", - lun->unpacked_lun, - cmd, cmd->se_tfo->get_task_tag(cmd)); - - spin_unlock_irqrestore(&cmd->t_state_lock, - cmd_flags); - transport_cmd_check_stop(cmd, false, false); - complete(&cmd->transport_lun_fe_stop_comp); - spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - continue; - } - pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", - lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); + do { + pr_debug("Waiting for running cmds to complete.\n"); + ret = wait_event_timeout(cmd_cnt->refcnt_wq, + percpu_ref_is_zero(&cmd_cnt->refcnt), + 180 * HZ); + } while (ret <= 0); - spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); - spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - } - spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); + wait_for_completion(&cmd_cnt->stop_done); + pr_debug("Waiting for cmds done.\n"); } +EXPORT_SYMBOL_GPL(target_wait_for_cmds); -static int transport_clear_lun_thread(void *p) +/** + * target_wait_for_sess_cmds - Wait for outstanding commands + * @se_sess: session to wait for active I/O + */ +void target_wait_for_sess_cmds(struct se_session *se_sess) { - struct se_lun *lun = p; - - __transport_clear_lun_from_sessions(lun); - complete(&lun->lun_shutdown_comp); - - return 0; + target_wait_for_cmds(se_sess->cmd_cnt); } +EXPORT_SYMBOL(target_wait_for_sess_cmds); -int transport_clear_lun_from_sessions(struct se_lun *lun) +/* + * Prevent that new percpu_ref_tryget_live() calls succeed and wait until + * all references to the LUN have been released. Called during LUN shutdown. + */ +void transport_clear_lun_ref(struct se_lun *lun) { - struct task_struct *kt; - - kt = kthread_run(transport_clear_lun_thread, lun, - "tcm_cl_%u", lun->unpacked_lun); - if (IS_ERR(kt)) { - pr_err("Unable to start clear_lun thread\n"); - return PTR_ERR(kt); - } + percpu_ref_kill(&lun->lun_ref); wait_for_completion(&lun->lun_shutdown_comp); - - return 0; } -/** - * transport_wait_for_tasks - wait for completion to occur - * @cmd: command to wait - * - * Called from frontend fabric context to wait for storage engine - * to pause and/or release frontend generated struct se_cmd. - */ -bool transport_wait_for_tasks(struct se_cmd *cmd) +static bool +__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, + bool *aborted, bool *tas, unsigned long *flags) + __releases(&cmd->t_state_lock) + __acquires(&cmd->t_state_lock) { - unsigned long flags; + lockdep_assert_held(&cmd->t_state_lock); + + if (fabric_stop) + cmd->transport_state |= CMD_T_FABRIC_STOP; + + if (cmd->transport_state & CMD_T_ABORTED) + *aborted = true; + + if (cmd->transport_state & CMD_T_TAS) + *tas = true; - spin_lock_irqsave(&cmd->t_state_lock, flags); if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && - !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) return false; - } if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && - !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) return false; - } - /* - * If we are already stopped due to an external event (ie: LUN shutdown) - * sleep until the connection can have the passed struct se_cmd back. - * The cmd->transport_lun_stopped_sem will be upped by - * transport_clear_lun_from_sessions() once the ConfigFS context caller - * has completed its operation on the struct se_cmd. - */ - if (cmd->transport_state & CMD_T_LUN_STOP) { - pr_debug("wait_for_tasks: Stopping" - " wait_for_completion(&cmd->t_tasktransport_lun_fe" - "_stop_comp); for ITT: 0x%08x\n", - cmd->se_tfo->get_task_tag(cmd)); - /* - * There is a special case for WRITES where a FE exception + - * LUN shutdown means ConfigFS context is still sleeping on - * transport_lun_stop_comp in transport_lun_wait_for_tasks(). - * We go ahead and up transport_lun_stop_comp just to be sure - * here. - */ - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&cmd->transport_lun_stop_comp); - wait_for_completion(&cmd->transport_lun_fe_stop_comp); - spin_lock_irqsave(&cmd->t_state_lock, flags); - - target_remove_from_state_list(cmd); - /* - * At this point, the frontend who was the originator of this - * struct se_cmd, now owns the structure and can be released through - * normal means below. - */ - pr_debug("wait_for_tasks: Stopped" - " wait_for_completion(&cmd->t_tasktransport_lun_fe_" - "stop_comp); for ITT: 0x%08x\n", - cmd->se_tfo->get_task_tag(cmd)); - cmd->transport_state &= ~CMD_T_LUN_STOP; - } + if (!(cmd->transport_state & CMD_T_ACTIVE)) + return false; - if (!(cmd->transport_state & CMD_T_ACTIVE)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + if (fabric_stop && *aborted) return false; - } cmd->transport_state |= CMD_T_STOP; - pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" - " i_state: %d, t_state: %d, CMD_T_STOP\n", - cmd, cmd->se_tfo->get_task_tag(cmd), - cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); + target_show_cmd("wait_for_tasks: Stopping ", cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, *flags); - wait_for_completion(&cmd->t_transport_stop_comp); + while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp, + 180 * HZ)) + target_show_cmd("wait for tasks: ", cmd); - spin_lock_irqsave(&cmd->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, *flags); cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); - pr_debug("wait_for_tasks: Stopped wait_for_completion(" - "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", - cmd->se_tfo->get_task_tag(cmd)); - - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" + "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); return true; } -EXPORT_SYMBOL(transport_wait_for_tasks); -static int transport_get_sense_codes( - struct se_cmd *cmd, - u8 *asc, - u8 *ascq) -{ - *asc = cmd->scsi_asc; - *ascq = cmd->scsi_ascq; - - return 0; -} - -int -transport_send_check_condition_and_sense(struct se_cmd *cmd, - sense_reason_t reason, int from_transport) +/** + * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp + * @cmd: command to wait on + */ +bool transport_wait_for_tasks(struct se_cmd *cmd) { - unsigned char *buffer = cmd->sense_buffer; unsigned long flags; - u8 asc = 0, ascq = 0; + bool ret, aborted = false, tas = false; spin_lock_irqsave(&cmd->t_state_lock, flags); - if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return 0; - } - cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; + ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - if (!reason && from_transport) - goto after_reason; - - if (!from_transport) - cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; + return ret; +} +EXPORT_SYMBOL(transport_wait_for_tasks); - /* - * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses - * SENSE KEY values from include/scsi/scsi.h - */ - switch (reason) { - case TCM_NO_SENSE: - /* CURRENT ERROR */ - buffer[0] = 0x70; - buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; - /* Not Ready */ - buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; - /* NO ADDITIONAL SENSE INFORMATION */ - buffer[SPC_ASC_KEY_OFFSET] = 0; - buffer[SPC_ASCQ_KEY_OFFSET] = 0; - break; - case TCM_NON_EXISTENT_LUN: - /* CURRENT ERROR */ - buffer[0] = 0x70; - buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; - /* ILLEGAL REQUEST */ - buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; - /* LOGICAL UNIT NOT SUPPORTED */ - buffer[SPC_ASC_KEY_OFFSET] = 0x25; - break; - case TCM_UNSUPPORTED_SCSI_OPCODE: - case TCM_SECTOR_COUNT_TOO_MANY: - /* CURRENT ERROR */ - buffer[0] = 0x70; - buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; - /* ILLEGAL REQUEST */ - buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; - /* INVALID COMMAND OPERATION CODE */ - buffer[SPC_ASC_KEY_OFFSET] = 0x20; - break; - case TCM_UNKNOWN_MODE_PAGE: - /* CURRENT ERROR */ - buffer[0] = 0x70; - buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; - /* ILLEGAL REQUEST */ - buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; - /* INVALID FIELD IN CDB */ - buffer[SPC_ASC_KEY_OFFSET] = 0x24; - break; - case TCM_CHECK_CONDITION_ABORT_CMD: - /* CURRENT ERROR */ - buffer[0] = 0x70; - buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; - /* ABORTED COMMAND */ - buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; - /* BUS DEVICE RESET FUNCTION OCCURRED */ - buffer[SPC_ASC_KEY_OFFSET] = 0x29; - buffer[SPC_ASCQ_KEY_OFFSET] = 0x03; - break; - case TCM_INCORRECT_AMOUNT_OF_DATA: - /* CURRENT ERROR */ - buffer[0] = 0x70; - buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; - /* ABORTED COMMAND */ - buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; - /* WRITE ERROR */ - buffer[SPC_ASC_KEY_OFFSET] = 0x0c; - /* NOT ENOUGH UNSOLICITED DATA */ - buffer[SPC_ASCQ_KEY_OFFSET] = 0x0d; - break; - case TCM_INVALID_CDB_FIELD: - /* CURRENT ERROR */ - buffer[0] = 0x70; - buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; - /* ILLEGAL REQUEST */ - buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; - /* INVALID FIELD IN CDB */ - buffer[SPC_ASC_KEY_OFFSET] = 0x24; - break; - case TCM_INVALID_PARAMETER_LIST: - /* CURRENT ERROR */ - buffer[0] = 0x70; - buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; - /* ILLEGAL REQUEST */ - buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; - /* INVALID FIELD IN PARAMETER LIST */ - buffer[SPC_ASC_KEY_OFFSET] = 0x26; - break; - case TCM_PARAMETER_LIST_LENGTH_ERROR: - /* CURRENT ERROR */ - buffer[0] = 0x70; - buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; - /* ILLEGAL REQUEST */ - buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; - /* PARAMETER LIST LENGTH ERROR */ - buffer[SPC_ASC_KEY_OFFSET] = 0x1a; - break; - case TCM_UNEXPECTED_UNSOLICITED_DATA: - /* CURRENT ERROR */ - buffer[0] = 0x70; - buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; - /* ABORTED COMMAND */ - buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; - /* WRITE ERROR */ - buffer[SPC_ASC_KEY_OFFSET] = 0x0c; - /* UNEXPECTED_UNSOLICITED_DATA */ - buffer[SPC_ASCQ_KEY_OFFSET] = 0x0c; - break; - case TCM_SERVICE_CRC_ERROR: - /* CURRENT ERROR */ - buffer[0] = 0x70; - buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; - /* ABORTED COMMAND */ - buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; - /* PROTOCOL SERVICE CRC ERROR */ - buffer[SPC_ASC_KEY_OFFSET] = 0x47; - /* N/A */ - buffer[SPC_ASCQ_KEY_OFFSET] = 0x05; - break; - case TCM_SNACK_REJECTED: - /* CURRENT ERROR */ - buffer[0] = 0x70; - buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; - /* ABORTED COMMAND */ - buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND; - /* READ ERROR */ - buffer[SPC_ASC_KEY_OFFSET] = 0x11; - /* FAILED RETRANSMISSION REQUEST */ - buffer[SPC_ASCQ_KEY_OFFSET] = 0x13; - break; - case TCM_WRITE_PROTECTED: - /* CURRENT ERROR */ - buffer[0] = 0x70; - buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; - /* DATA PROTECT */ - buffer[SPC_SENSE_KEY_OFFSET] = DATA_PROTECT; - /* WRITE PROTECTED */ - buffer[SPC_ASC_KEY_OFFSET] = 0x27; - break; - case TCM_ADDRESS_OUT_OF_RANGE: - /* CURRENT ERROR */ - buffer[0] = 0x70; - buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; - /* ILLEGAL REQUEST */ - buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; - /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ - buffer[SPC_ASC_KEY_OFFSET] = 0x21; - break; - case TCM_CHECK_CONDITION_UNIT_ATTENTION: - /* CURRENT ERROR */ - buffer[0] = 0x70; - buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; - /* UNIT ATTENTION */ - buffer[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; - core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); - buffer[SPC_ASC_KEY_OFFSET] = asc; - buffer[SPC_ASCQ_KEY_OFFSET] = ascq; - break; - case TCM_CHECK_CONDITION_NOT_READY: - /* CURRENT ERROR */ - buffer[0] = 0x70; - buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; - /* Not Ready */ - buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; - transport_get_sense_codes(cmd, &asc, &ascq); - buffer[SPC_ASC_KEY_OFFSET] = asc; - buffer[SPC_ASCQ_KEY_OFFSET] = ascq; - break; - case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: - default: - /* CURRENT ERROR */ - buffer[0] = 0x70; - buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10; +struct sense_detail { + u8 key; + u8 asc; + u8 ascq; + bool add_sense_info; +}; + +static const struct sense_detail sense_detail_table[] = { + [TCM_NO_SENSE] = { + .key = NOT_READY + }, + [TCM_NON_EXISTENT_LUN] = { + .key = ILLEGAL_REQUEST, + .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ + }, + [TCM_UNSUPPORTED_SCSI_OPCODE] = { + .key = ILLEGAL_REQUEST, + .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ + }, + [TCM_SECTOR_COUNT_TOO_MANY] = { + .key = ILLEGAL_REQUEST, + .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ + }, + [TCM_UNKNOWN_MODE_PAGE] = { + .key = ILLEGAL_REQUEST, + .asc = 0x24, /* INVALID FIELD IN CDB */ + }, + [TCM_CHECK_CONDITION_ABORT_CMD] = { + .key = ABORTED_COMMAND, + .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ + .ascq = 0x03, + }, + [TCM_INCORRECT_AMOUNT_OF_DATA] = { + .key = ABORTED_COMMAND, + .asc = 0x0c, /* WRITE ERROR */ + .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ + }, + [TCM_INVALID_CDB_FIELD] = { + .key = ILLEGAL_REQUEST, + .asc = 0x24, /* INVALID FIELD IN CDB */ + }, + [TCM_INVALID_PARAMETER_LIST] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ + }, + [TCM_TOO_MANY_TARGET_DESCS] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ + }, + [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ + }, + [TCM_TOO_MANY_SEGMENT_DESCS] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ + }, + [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { + .key = ILLEGAL_REQUEST, + .asc = 0x26, + .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ + }, + [TCM_PARAMETER_LIST_LENGTH_ERROR] = { + .key = ILLEGAL_REQUEST, + .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ + }, + [TCM_UNEXPECTED_UNSOLICITED_DATA] = { + .key = ILLEGAL_REQUEST, + .asc = 0x0c, /* WRITE ERROR */ + .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ + }, + [TCM_SERVICE_CRC_ERROR] = { + .key = ABORTED_COMMAND, + .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ + .ascq = 0x05, /* N/A */ + }, + [TCM_SNACK_REJECTED] = { + .key = ABORTED_COMMAND, + .asc = 0x11, /* READ ERROR */ + .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ + }, + [TCM_WRITE_PROTECTED] = { + .key = DATA_PROTECT, + .asc = 0x27, /* WRITE PROTECTED */ + }, + [TCM_ADDRESS_OUT_OF_RANGE] = { + .key = ILLEGAL_REQUEST, + .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ + }, + [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { + .key = UNIT_ATTENTION, + }, + [TCM_MISCOMPARE_VERIFY] = { + .key = MISCOMPARE, + .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ + .ascq = 0x00, + .add_sense_info = true, + }, + [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { + .key = ABORTED_COMMAND, + .asc = 0x10, + .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ + .add_sense_info = true, + }, + [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { + .key = ABORTED_COMMAND, + .asc = 0x10, + .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ + .add_sense_info = true, + }, + [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { + .key = ABORTED_COMMAND, + .asc = 0x10, + .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ + .add_sense_info = true, + }, + [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = { + .key = COPY_ABORTED, + .asc = 0x0d, + .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */ + + }, + [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { /* * Returning ILLEGAL REQUEST would cause immediate IO errors on * Solaris initiators. Returning NOT READY instead means the * operations will be retried a finite number of times and we * can survive intermittent errors. */ - buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY; - /* LOGICAL UNIT COMMUNICATION FAILURE */ - buffer[SPC_ASC_KEY_OFFSET] = 0x08; - break; - } - /* - * This code uses linux/include/scsi/scsi.h SAM status codes! - */ - cmd->scsi_status = SAM_STAT_CHECK_CONDITION; - /* - * Automatically padded, this value is encoded in the fabric's - * data_length response PDU containing the SCSI defined sense data. - */ - cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; - -after_reason: - trace_target_cmd_complete(cmd); - return cmd->se_tfo->queue_status(cmd); -} -EXPORT_SYMBOL(transport_send_check_condition_and_sense); + .key = NOT_READY, + .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ + }, + [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = { + /* + * From spc4r22 section5.7.7,5.7.8 + * If a PERSISTENT RESERVE OUT command with a REGISTER service action + * or a REGISTER AND IGNORE EXISTING KEY service action or + * REGISTER AND MOVE service actionis attempted, + * but there are insufficient device server resources to complete the + * operation, then the command shall be terminated with CHECK CONDITION + * status, with the sense key set to ILLEGAL REQUEST,and the additonal + * sense code set to INSUFFICIENT REGISTRATION RESOURCES. + */ + .key = ILLEGAL_REQUEST, + .asc = 0x55, + .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */ + }, + [TCM_INVALID_FIELD_IN_COMMAND_IU] = { + .key = ILLEGAL_REQUEST, + .asc = 0x0e, + .ascq = 0x03, /* INVALID FIELD IN COMMAND INFORMATION UNIT */ + }, + [TCM_ALUA_TG_PT_STANDBY] = { + .key = NOT_READY, + .asc = 0x04, + .ascq = ASCQ_04H_ALUA_TG_PT_STANDBY, + }, + [TCM_ALUA_TG_PT_UNAVAILABLE] = { + .key = NOT_READY, + .asc = 0x04, + .ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE, + }, + [TCM_ALUA_STATE_TRANSITION] = { + .key = NOT_READY, + .asc = 0x04, + .ascq = ASCQ_04H_ALUA_STATE_TRANSITION, + }, + [TCM_ALUA_OFFLINE] = { + .key = NOT_READY, + .asc = 0x04, + .ascq = ASCQ_04H_ALUA_OFFLINE, + }, +}; -int transport_check_aborted_status(struct se_cmd *cmd, int send_status) +/** + * translate_sense_reason - translate a sense reason into T10 key, asc and ascq + * @cmd: SCSI command in which the resulting sense buffer or SCSI status will + * be stored. + * @reason: LIO sense reason code. If this argument has the value + * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If + * dequeuing a unit attention fails due to multiple commands being processed + * concurrently, set the command status to BUSY. + * + * Return: 0 upon success or -EINVAL if the sense buffer is too small. + */ +static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) { - if (!(cmd->transport_state & CMD_T_ABORTED)) - return 0; - - if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) - return 1; - - pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n", - cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); + const struct sense_detail *sd; + u8 *buffer = cmd->sense_buffer; + int r = (__force int)reason; + u8 key, asc, ascq; + bool desc_format = target_sense_desc_format(cmd->se_dev); - cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; - trace_target_cmd_complete(cmd); - cmd->se_tfo->queue_status(cmd); + if (r < ARRAY_SIZE(sense_detail_table) && sense_detail_table[r].key) + sd = &sense_detail_table[r]; + else + sd = &sense_detail_table[(__force int) + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; + + key = sd->key; + if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { + if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc, + &ascq)) { + cmd->scsi_status = SAM_STAT_BUSY; + return; + } + } else { + WARN_ON_ONCE(sd->asc == 0); + asc = sd->asc; + ascq = sd->ascq; + } - return 1; + cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; + cmd->scsi_status = SAM_STAT_CHECK_CONDITION; + cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; + scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq); + if (sd->add_sense_info) + WARN_ON_ONCE(scsi_set_sense_information(buffer, + cmd->scsi_sense_length, + cmd->sense_info) < 0); } -EXPORT_SYMBOL(transport_check_aborted_status); -void transport_send_task_abort(struct se_cmd *cmd) +int +transport_send_check_condition_and_sense(struct se_cmd *cmd, + sense_reason_t reason, int from_transport) { unsigned long flags; + WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); + spin_lock_irqsave(&cmd->t_state_lock, flags); - if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) { + if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; + return 0; } + cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; spin_unlock_irqrestore(&cmd->t_state_lock, flags); - /* - * If there are still expected incoming fabric WRITEs, we wait - * until until they have completed before sending a TASK_ABORTED - * response. This response with TASK_ABORTED status will be - * queued back to fabric module by transport_check_aborted_status(). - */ - if (cmd->data_direction == DMA_TO_DEVICE) { - if (cmd->se_tfo->write_pending_status(cmd) != 0) { - cmd->transport_state |= CMD_T_ABORTED; - smp_mb__after_atomic_inc(); - } - } - cmd->scsi_status = SAM_STAT_TASK_ABORTED; + if (!from_transport) + translate_sense_reason(cmd, reason); - transport_lun_remove_cmd(cmd); + trace_target_cmd_complete(cmd); + return cmd->se_tfo->queue_status(cmd); +} +EXPORT_SYMBOL(transport_send_check_condition_and_sense); - pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," - " ITT: 0x%08x\n", cmd->t_task_cdb[0], - cmd->se_tfo->get_task_tag(cmd)); +/** + * target_send_busy - Send SCSI BUSY status back to the initiator + * @cmd: SCSI command for which to send a BUSY reply. + * + * Note: Only call this function if target_submit_cmd*() failed. + */ +int target_send_busy(struct se_cmd *cmd) +{ + WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); + cmd->scsi_status = SAM_STAT_BUSY; trace_target_cmd_complete(cmd); - cmd->se_tfo->queue_status(cmd); + return cmd->se_tfo->queue_status(cmd); } +EXPORT_SYMBOL(target_send_busy); static void target_tmr_work(struct work_struct *work) { @@ -2822,6 +3587,9 @@ static void target_tmr_work(struct work_struct *work) struct se_tmr_req *tmr = cmd->se_tmr_req; int ret; + if (cmd->transport_state & CMD_T_ABORTED) + goto aborted; + switch (tmr->function) { case TMR_ABORT_TASK: core_tmr_abort_task(dev, tmr, cmd->se_sess); @@ -2835,6 +3603,10 @@ static void target_tmr_work(struct work_struct *work) ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : TMR_FUNCTION_REJECTED; + if (tmr->response == TMR_FUNCTION_COMPLETE) { + target_dev_ua_allocate(dev, 0x29, + ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); + } break; case TMR_TARGET_WARM_RESET: tmr->response = TMR_FUNCTION_REJECTED; @@ -2843,23 +3615,73 @@ static void target_tmr_work(struct work_struct *work) tmr->response = TMR_FUNCTION_REJECTED; break; default: - pr_err("Uknown TMR function: 0x%02x.\n", + pr_err("Unknown TMR function: 0x%02x.\n", tmr->function); tmr->response = TMR_FUNCTION_REJECTED; break; } - cmd->t_state = TRANSPORT_ISTATE_PROCESSING; + if (cmd->transport_state & CMD_T_ABORTED) + goto aborted; + cmd->se_tfo->queue_tm_rsp(cmd); + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); + return; + +aborted: + target_handle_abort(cmd); } int transport_generic_handle_tmr( struct se_cmd *cmd) { + unsigned long flags; + bool aborted = false; + + spin_lock_irqsave(&cmd->se_dev->se_tmr_lock, flags); + list_add_tail(&cmd->se_tmr_req->tmr_list, &cmd->se_dev->dev_tmr_list); + spin_unlock_irqrestore(&cmd->se_dev->se_tmr_lock, flags); + + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (cmd->transport_state & CMD_T_ABORTED) { + aborted = true; + } else { + cmd->t_state = TRANSPORT_ISTATE_PROCESSING; + cmd->transport_state |= CMD_T_ACTIVE; + } + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + if (aborted) { + pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n", + cmd->se_tmr_req->function, + cmd->se_tmr_req->ref_task_tag, cmd->tag); + target_handle_abort(cmd); + return 0; + } + INIT_WORK(&cmd->work, target_tmr_work); - queue_work(cmd->se_dev->tmr_wq, &cmd->work); + schedule_work(&cmd->work); return 0; } EXPORT_SYMBOL(transport_generic_handle_tmr); + +bool +target_check_wce(struct se_device *dev) +{ + bool wce = false; + + if (dev->transport->get_write_cache) + wce = dev->transport->get_write_cache(dev); + else if (dev->dev_attrib.emulate_write_cache > 0) + wce = true; + + return wce; +} + +bool +target_check_fua(struct se_device *dev) +{ + return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; +} |
