diff options
Diffstat (limited to 'drivers/target/target_core_transport.c')
| -rw-r--r-- | drivers/target/target_core_transport.c | 472 |
1 files changed, 274 insertions, 198 deletions
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 7838dc20f713..e8b7955d40f2 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -22,7 +22,7 @@ #include <linux/module.h> #include <linux/ratelimit.h> #include <linux/vmalloc.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <net/sock.h> #include <net/tcp.h> #include <scsi/scsi_proto.h> @@ -126,12 +126,12 @@ int init_se_kmem_caches(void) } target_completion_wq = alloc_workqueue("target_completion", - WQ_MEM_RECLAIM, 0); + WQ_MEM_RECLAIM | WQ_PERCPU, 0); if (!target_completion_wq) goto out_free_lba_map_mem_cache; target_submission_wq = alloc_workqueue("target_submission", - WQ_MEM_RECLAIM, 0); + WQ_MEM_RECLAIM | WQ_PERCPU, 0); if (!target_submission_wq) goto out_free_completion_wq; @@ -220,12 +220,53 @@ void transport_subsystem_check_init(void) sub_api_initialized = 1; } -static void target_release_sess_cmd_refcnt(struct percpu_ref *ref) +static void target_release_cmd_refcnt(struct percpu_ref *ref) { - struct se_session *sess = container_of(ref, typeof(*sess), cmd_count); + struct target_cmd_counter *cmd_cnt = container_of(ref, + typeof(*cmd_cnt), + refcnt); + wake_up(&cmd_cnt->refcnt_wq); +} + +struct target_cmd_counter *target_alloc_cmd_counter(void) +{ + struct target_cmd_counter *cmd_cnt; + int rc; + + cmd_cnt = kzalloc(sizeof(*cmd_cnt), GFP_KERNEL); + if (!cmd_cnt) + return NULL; + + init_completion(&cmd_cnt->stop_done); + init_waitqueue_head(&cmd_cnt->refcnt_wq); + atomic_set(&cmd_cnt->stopped, 0); + + rc = percpu_ref_init(&cmd_cnt->refcnt, target_release_cmd_refcnt, 0, + GFP_KERNEL); + if (rc) + goto free_cmd_cnt; + + return cmd_cnt; - wake_up(&sess->cmd_count_wq); +free_cmd_cnt: + kfree(cmd_cnt); + return NULL; } +EXPORT_SYMBOL_GPL(target_alloc_cmd_counter); + +void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt) +{ + /* + * Drivers like loop do not call target_stop_session during session + * shutdown so we have to drop the ref taken at init time here. + */ + if (!atomic_read(&cmd_cnt->stopped)) + percpu_ref_put(&cmd_cnt->refcnt); + + percpu_ref_exit(&cmd_cnt->refcnt); + kfree(cmd_cnt); +} +EXPORT_SYMBOL_GPL(target_free_cmd_counter); /** * transport_init_session - initialize a session object @@ -233,32 +274,14 @@ static void target_release_sess_cmd_refcnt(struct percpu_ref *ref) * * The caller must have zero-initialized @se_sess before calling this function. */ -int transport_init_session(struct se_session *se_sess) +void transport_init_session(struct se_session *se_sess) { INIT_LIST_HEAD(&se_sess->sess_list); INIT_LIST_HEAD(&se_sess->sess_acl_list); spin_lock_init(&se_sess->sess_cmd_lock); - init_waitqueue_head(&se_sess->cmd_count_wq); - init_completion(&se_sess->stop_done); - atomic_set(&se_sess->stopped, 0); - return percpu_ref_init(&se_sess->cmd_count, - target_release_sess_cmd_refcnt, 0, GFP_KERNEL); } EXPORT_SYMBOL(transport_init_session); -void transport_uninit_session(struct se_session *se_sess) -{ - /* - * Drivers like iscsi and loop do not call target_stop_session - * during session shutdown so we have to drop the ref taken at init - * time here. - */ - if (!atomic_read(&se_sess->stopped)) - percpu_ref_put(&se_sess->cmd_count); - - percpu_ref_exit(&se_sess->cmd_count); -} - /** * transport_alloc_session - allocate a session object and initialize it * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. @@ -266,7 +289,6 @@ void transport_uninit_session(struct se_session *se_sess) struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) { struct se_session *se_sess; - int ret; se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); if (!se_sess) { @@ -274,11 +296,7 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) " se_sess_cache\n"); return ERR_PTR(-ENOMEM); } - ret = transport_init_session(se_sess); - if (ret < 0) { - kmem_cache_free(se_sess_cache, se_sess); - return ERR_PTR(ret); - } + transport_init_session(se_sess); se_sess->sup_prot_ops = sup_prot_ops; return se_sess; @@ -444,8 +462,13 @@ target_setup_session(struct se_portal_group *tpg, int (*callback)(struct se_portal_group *, struct se_session *, void *)) { + struct target_cmd_counter *cmd_cnt; struct se_session *sess; + int rc; + cmd_cnt = target_alloc_cmd_counter(); + if (!cmd_cnt) + return ERR_PTR(-ENOMEM); /* * If the fabric driver is using percpu-ida based pre allocation * of I/O descriptor tags, go ahead and perform that setup now.. @@ -455,29 +478,38 @@ target_setup_session(struct se_portal_group *tpg, else sess = transport_alloc_session(prot_op); - if (IS_ERR(sess)) - return sess; + if (IS_ERR(sess)) { + rc = PTR_ERR(sess); + goto free_cnt; + } + sess->cmd_cnt = cmd_cnt; sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg, (unsigned char *)initiatorname); if (!sess->se_node_acl) { - transport_free_session(sess); - return ERR_PTR(-EACCES); + rc = -EACCES; + goto free_sess; } /* * Go ahead and perform any remaining fabric setup that is * required before transport_register_session(). */ if (callback != NULL) { - int rc = callback(tpg, sess, private); - if (rc) { - transport_free_session(sess); - return ERR_PTR(rc); - } + rc = callback(tpg, sess, private); + if (rc) + goto free_sess; } transport_register_session(tpg, sess->se_node_acl, sess, private); return sess; + +free_sess: + transport_free_session(sess); + return ERR_PTR(rc); + +free_cnt: + target_free_cmd_counter(cmd_cnt); + return ERR_PTR(rc); } EXPORT_SYMBOL(target_setup_session); @@ -602,7 +634,8 @@ void transport_free_session(struct se_session *se_sess) sbitmap_queue_free(&se_sess->sess_tag_pool); kvfree(se_sess->sess_cmd_map); } - transport_uninit_session(se_sess); + if (se_sess->cmd_cnt) + target_free_cmd_counter(se_sess->cmd_cnt); kmem_cache_free(se_sess_cache, se_sess); } EXPORT_SYMBOL(transport_free_session); @@ -1412,14 +1445,12 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size) * * Preserves the value of @cmd->tag. */ -void __target_init_cmd( - struct se_cmd *cmd, - const struct target_core_fabric_ops *tfo, - struct se_session *se_sess, - u32 data_length, - int data_direction, - int task_attr, - unsigned char *sense_buffer, u64 unpacked_lun) +void __target_init_cmd(struct se_cmd *cmd, + const struct target_core_fabric_ops *tfo, + struct se_session *se_sess, u32 data_length, + int data_direction, int task_attr, + unsigned char *sense_buffer, u64 unpacked_lun, + struct target_cmd_counter *cmd_cnt) { INIT_LIST_HEAD(&cmd->se_delayed_node); INIT_LIST_HEAD(&cmd->se_qf_node); @@ -1439,6 +1470,7 @@ void __target_init_cmd( cmd->sam_task_attr = task_attr; cmd->sense_buffer = sense_buffer; cmd->orig_fe_lun = unpacked_lun; + cmd->cmd_cnt = cmd_cnt; if (!(cmd->se_cmd_flags & SCF_USE_CPUID)) cmd->cpuid = raw_smp_processor_id(); @@ -1539,22 +1571,49 @@ target_cmd_parse_cdb(struct se_cmd *cmd) return ret; cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; - atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); + /* + * If this is the xcopy_lun then we won't have lun_stats since we + * can't export them. + */ + if (cmd->se_lun->lun_stats) + this_cpu_inc(cmd->se_lun->lun_stats->cmd_pdus); return 0; } EXPORT_SYMBOL(target_cmd_parse_cdb); -/* - * Used by fabric module frontends to queue tasks directly. - * May only be used from process context. - */ -int transport_handle_cdb_direct( - struct se_cmd *cmd) +static int __target_submit(struct se_cmd *cmd) { sense_reason_t ret; might_sleep(); + /* + * Check if we need to delay processing because of ALUA + * Active/NonOptimized primary access state.. + */ + core_alua_check_nonop_delay(cmd); + + if (cmd->t_data_nents != 0) { + /* + * This is primarily a hack for udev and tcm loop which sends + * INQUIRYs with a single page and expects the data to be + * cleared. + */ + if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && + cmd->data_direction == DMA_FROM_DEVICE) { + struct scatterlist *sgl = cmd->t_data_sg; + unsigned char *buf = NULL; + + BUG_ON(!sgl); + + buf = kmap_local_page(sg_page(sgl)); + if (buf) { + memset(buf + sgl->offset, 0, sgl->length); + kunmap_local(buf); + } + } + } + if (!cmd->se_lun) { dump_stack(); pr_err("cmd->se_lun is NULL\n"); @@ -1582,7 +1641,6 @@ int transport_handle_cdb_direct( transport_generic_request_failure(cmd, ret); return 0; } -EXPORT_SYMBOL(transport_handle_cdb_direct); sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, @@ -1658,7 +1716,8 @@ int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, * target_core_fabric_ops->queue_status() callback */ __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length, - data_dir, task_attr, sense, unpacked_lun); + data_dir, task_attr, sense, unpacked_lun, + se_sess->cmd_cnt); /* * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is @@ -1749,53 +1808,6 @@ generic_fail: EXPORT_SYMBOL_GPL(target_submit_prep); /** - * target_submit - perform final initialization and submit cmd to LIO core - * @se_cmd: command descriptor to submit - * - * target_submit_prep must have been called on the cmd, and this must be - * called from process context. - */ -void target_submit(struct se_cmd *se_cmd) -{ - struct scatterlist *sgl = se_cmd->t_data_sg; - unsigned char *buf = NULL; - - might_sleep(); - - if (se_cmd->t_data_nents != 0) { - BUG_ON(!sgl); - /* - * A work-around for tcm_loop as some userspace code via - * scsi-generic do not memset their associated read buffers, - * so go ahead and do that here for type non-data CDBs. Also - * note that this is currently guaranteed to be a single SGL - * for this case by target core in target_setup_cmd_from_cdb() - * -> transport_generic_cmd_sequencer(). - */ - if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && - se_cmd->data_direction == DMA_FROM_DEVICE) { - if (sgl) - buf = kmap(sg_page(sgl)) + sgl->offset; - - if (buf) { - memset(buf, 0, sgl->length); - kunmap(sg_page(sgl)); - } - } - - } - - /* - * Check if we need to delay processing because of ALUA - * Active/NonOptimized primary access state.. - */ - core_alua_check_nonop_delay(se_cmd); - - transport_handle_cdb_direct(se_cmd); -} -EXPORT_SYMBOL_GPL(target_submit); - -/** * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd * * @se_cmd: command descriptor to submit @@ -1890,7 +1902,7 @@ void target_queued_submit_work(struct work_struct *work) se_plug = target_plug_device(se_dev); } - target_submit(se_cmd); + __target_submit(se_cmd); } if (se_plug) @@ -1901,7 +1913,7 @@ void target_queued_submit_work(struct work_struct *work) * target_queue_submission - queue the cmd to run on the LIO workqueue * @se_cmd: command descriptor to submit */ -void target_queue_submission(struct se_cmd *se_cmd) +static void target_queue_submission(struct se_cmd *se_cmd) { struct se_device *se_dev = se_cmd->se_dev; int cpu = se_cmd->cpuid; @@ -1911,7 +1923,35 @@ void target_queue_submission(struct se_cmd *se_cmd) llist_add(&se_cmd->se_cmd_list, &sq->cmd_list); queue_work_on(cpu, target_submission_wq, &sq->work); } -EXPORT_SYMBOL_GPL(target_queue_submission); + +/** + * target_submit - perform final initialization and submit cmd to LIO core + * @se_cmd: command descriptor to submit + * + * target_submit_prep or something similar must have been called on the cmd, + * and this must be called from process context. + */ +int target_submit(struct se_cmd *se_cmd) +{ + const struct target_core_fabric_ops *tfo = se_cmd->se_sess->se_tpg->se_tpg_tfo; + struct se_dev_attrib *da = &se_cmd->se_dev->dev_attrib; + u8 submit_type; + + if (da->submit_type == TARGET_FABRIC_DEFAULT_SUBMIT) + submit_type = tfo->default_submit_type; + else if (da->submit_type == TARGET_DIRECT_SUBMIT && + tfo->direct_submit_supp) + submit_type = TARGET_DIRECT_SUBMIT; + else + submit_type = TARGET_QUEUE_SUBMIT; + + if (submit_type == TARGET_DIRECT_SUBMIT) + return __target_submit(se_cmd); + + target_queue_submission(se_cmd); + return 0; +} +EXPORT_SYMBOL_GPL(target_submit); static void target_complete_tmr_failure(struct work_struct *work) { @@ -1953,7 +1993,8 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, BUG_ON(!se_tpg); __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, - 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun); + 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun, + se_sess->cmd_cnt); /* * FIXME: Currently expect caller to handle se_cmd->se_tmr_req * allocation failure. @@ -2177,6 +2218,7 @@ static int target_write_prot_action(struct se_cmd *cmd) static bool target_handle_task_attr(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; + unsigned long flags; if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return false; @@ -2189,13 +2231,10 @@ static bool target_handle_task_attr(struct se_cmd *cmd) */ switch (cmd->sam_task_attr) { case TCM_HEAD_TAG: - atomic_inc_mb(&dev->non_ordered); pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", cmd->t_task_cdb[0]); return false; case TCM_ORDERED_TAG: - atomic_inc_mb(&dev->delayed_cmd_count); - pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", cmd->t_task_cdb[0]); break; @@ -2203,29 +2242,29 @@ static bool target_handle_task_attr(struct se_cmd *cmd) /* * For SIMPLE and UNTAGGED Task Attribute commands */ - atomic_inc_mb(&dev->non_ordered); - - if (atomic_read(&dev->delayed_cmd_count) == 0) +retry: + if (percpu_ref_tryget_live(&dev->non_ordered)) return false; + break; } - if (cmd->sam_task_attr != TCM_ORDERED_TAG) { - atomic_inc_mb(&dev->delayed_cmd_count); - /* - * We will account for this when we dequeue from the delayed - * list. - */ - atomic_dec_mb(&dev->non_ordered); + spin_lock_irqsave(&dev->delayed_cmd_lock, flags); + if (cmd->sam_task_attr == TCM_SIMPLE_TAG && + !percpu_ref_is_dying(&dev->non_ordered)) { + spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags); + /* We raced with the last ordered completion so retry. */ + goto retry; + } else if (!percpu_ref_is_dying(&dev->non_ordered)) { + percpu_ref_kill(&dev->non_ordered); } - spin_lock_irq(&cmd->t_state_lock); + spin_lock(&cmd->t_state_lock); cmd->transport_state &= ~CMD_T_SENT; - spin_unlock_irq(&cmd->t_state_lock); + spin_unlock(&cmd->t_state_lock); - spin_lock(&dev->delayed_cmd_lock); list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); - spin_unlock(&dev->delayed_cmd_lock); + spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags); pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", cmd->t_task_cdb[0], cmd->sam_task_attr); @@ -2277,41 +2316,52 @@ void target_do_delayed_work(struct work_struct *work) while (!dev->ordered_sync_in_progress) { struct se_cmd *cmd; - if (list_empty(&dev->delayed_cmd_list)) + /* + * We can be woken up early/late due to races or the + * extra wake up we do when adding commands to the list. + * We check for both cases here. + */ + if (list_empty(&dev->delayed_cmd_list) || + !percpu_ref_is_zero(&dev->non_ordered)) break; cmd = list_entry(dev->delayed_cmd_list.next, struct se_cmd, se_delayed_node); + cmd->se_cmd_flags |= SCF_TASK_ORDERED_SYNC; + cmd->transport_state |= CMD_T_SENT; - if (cmd->sam_task_attr == TCM_ORDERED_TAG) { - /* - * Check if we started with: - * [ordered] [simple] [ordered] - * and we are now at the last ordered so we have to wait - * for the simple cmd. - */ - if (atomic_read(&dev->non_ordered) > 0) - break; - - dev->ordered_sync_in_progress = true; - } + dev->ordered_sync_in_progress = true; list_del(&cmd->se_delayed_node); - atomic_dec_mb(&dev->delayed_cmd_count); spin_unlock(&dev->delayed_cmd_lock); - if (cmd->sam_task_attr != TCM_ORDERED_TAG) - atomic_inc_mb(&dev->non_ordered); - - cmd->transport_state |= CMD_T_SENT; - __target_execute_cmd(cmd, true); - spin_lock(&dev->delayed_cmd_lock); } spin_unlock(&dev->delayed_cmd_lock); } +static void transport_complete_ordered_sync(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + unsigned long flags; + + spin_lock_irqsave(&dev->delayed_cmd_lock, flags); + dev->dev_cur_ordered_id++; + + pr_debug("Incremented dev_cur_ordered_id: %u for type %d\n", + dev->dev_cur_ordered_id, cmd->sam_task_attr); + + dev->ordered_sync_in_progress = false; + + if (list_empty(&dev->delayed_cmd_list)) + percpu_ref_resurrect(&dev->non_ordered); + else + schedule_work(&dev->delayed_cmd_work); + + spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags); +} + /* * Called from I/O completion to determine which dormant/delayed * and ordered cmds need to have their tasks added to the execution queue. @@ -2324,30 +2374,24 @@ static void transport_complete_task_attr(struct se_cmd *cmd) return; if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) - goto restart; - - if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { - atomic_dec_mb(&dev->non_ordered); - dev->dev_cur_ordered_id++; - } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { - atomic_dec_mb(&dev->non_ordered); - dev->dev_cur_ordered_id++; - pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", - dev->dev_cur_ordered_id); - } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { - spin_lock(&dev->delayed_cmd_lock); - dev->ordered_sync_in_progress = false; - spin_unlock(&dev->delayed_cmd_lock); + return; - dev->dev_cur_ordered_id++; - pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", - dev->dev_cur_ordered_id); - } cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; -restart: - if (atomic_read(&dev->delayed_cmd_count) > 0) - schedule_work(&dev->delayed_cmd_work); + if (cmd->se_cmd_flags & SCF_TASK_ORDERED_SYNC) { + transport_complete_ordered_sync(cmd); + return; + } + + switch (cmd->sam_task_attr) { + case TCM_SIMPLE_TAG: + percpu_ref_put(&dev->non_ordered); + break; + case TCM_ORDERED_TAG: + /* All ordered should have been executed as sync */ + WARN_ON(1); + break; + } } static void transport_complete_qf(struct se_cmd *cmd) @@ -2558,8 +2602,9 @@ queue_rsp: !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) goto queue_status; - atomic_long_add(cmd->data_length, - &cmd->se_lun->lun_stats.tx_data_octets); + if (cmd->se_lun->lun_stats) + this_cpu_add(cmd->se_lun->lun_stats->tx_data_octets, + cmd->data_length); /* * Perform READ_STRIP of PI using software emulation when * backend had PI enabled, if the transport will not be @@ -2582,14 +2627,16 @@ queue_rsp: goto queue_full; break; case DMA_TO_DEVICE: - atomic_long_add(cmd->data_length, - &cmd->se_lun->lun_stats.rx_data_octets); + if (cmd->se_lun->lun_stats) + this_cpu_add(cmd->se_lun->lun_stats->rx_data_octets, + cmd->data_length); /* * Check if we need to send READ payload for BIDI-COMMAND */ if (cmd->se_cmd_flags & SCF_BIDI) { - atomic_long_add(cmd->data_length, - &cmd->se_lun->lun_stats.tx_data_octets); + if (cmd->se_lun->lun_stats) + this_cpu_add(cmd->se_lun->lun_stats->tx_data_octets, + cmd->data_length); ret = cmd->se_tfo->queue_data_in(cmd); if (ret) goto queue_full; @@ -2957,7 +3004,6 @@ EXPORT_SYMBOL(transport_generic_free_cmd); */ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) { - struct se_session *se_sess = se_cmd->se_sess; int ret = 0; /* @@ -2970,9 +3016,14 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) se_cmd->se_cmd_flags |= SCF_ACK_KREF; } - if (!percpu_ref_tryget_live(&se_sess->cmd_count)) - ret = -ESHUTDOWN; - + /* + * Users like xcopy do not use counters since they never do a stop + * and wait. + */ + if (se_cmd->cmd_cnt) { + if (!percpu_ref_tryget_live(&se_cmd->cmd_cnt->refcnt)) + ret = -ESHUTDOWN; + } if (ret && ack_kref) target_put_sess_cmd(se_cmd); @@ -2993,7 +3044,7 @@ static void target_free_cmd_mem(struct se_cmd *cmd) static void target_release_cmd_kref(struct kref *kref) { struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); - struct se_session *se_sess = se_cmd->se_sess; + struct target_cmd_counter *cmd_cnt = se_cmd->cmd_cnt; struct completion *free_compl = se_cmd->free_compl; struct completion *abrt_compl = se_cmd->abrt_compl; @@ -3004,7 +3055,8 @@ static void target_release_cmd_kref(struct kref *kref) if (abrt_compl) complete(abrt_compl); - percpu_ref_put(&se_sess->cmd_count); + if (cmd_cnt) + percpu_ref_put(&cmd_cnt->refcnt); } /** @@ -3123,46 +3175,67 @@ void target_show_cmd(const char *pfx, struct se_cmd *cmd) } EXPORT_SYMBOL(target_show_cmd); -static void target_stop_session_confirm(struct percpu_ref *ref) +static void target_stop_cmd_counter_confirm(struct percpu_ref *ref) +{ + struct target_cmd_counter *cmd_cnt = container_of(ref, + struct target_cmd_counter, + refcnt); + complete_all(&cmd_cnt->stop_done); +} + +/** + * target_stop_cmd_counter - Stop new IO from being added to the counter. + * @cmd_cnt: counter to stop + */ +void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt) { - struct se_session *se_sess = container_of(ref, struct se_session, - cmd_count); - complete_all(&se_sess->stop_done); + pr_debug("Stopping command counter.\n"); + if (!atomic_cmpxchg(&cmd_cnt->stopped, 0, 1)) + percpu_ref_kill_and_confirm(&cmd_cnt->refcnt, + target_stop_cmd_counter_confirm); } +EXPORT_SYMBOL_GPL(target_stop_cmd_counter); /** * target_stop_session - Stop new IO from being queued on the session. - * @se_sess: session to stop + * @se_sess: session to stop */ void target_stop_session(struct se_session *se_sess) { - pr_debug("Stopping session queue.\n"); - if (atomic_cmpxchg(&se_sess->stopped, 0, 1) == 0) - percpu_ref_kill_and_confirm(&se_sess->cmd_count, - target_stop_session_confirm); + target_stop_cmd_counter(se_sess->cmd_cnt); } EXPORT_SYMBOL(target_stop_session); /** - * target_wait_for_sess_cmds - Wait for outstanding commands - * @se_sess: session to wait for active I/O + * target_wait_for_cmds - Wait for outstanding cmds. + * @cmd_cnt: counter to wait for active I/O for. */ -void target_wait_for_sess_cmds(struct se_session *se_sess) +void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt) { int ret; - WARN_ON_ONCE(!atomic_read(&se_sess->stopped)); + WARN_ON_ONCE(!atomic_read(&cmd_cnt->stopped)); do { pr_debug("Waiting for running cmds to complete.\n"); - ret = wait_event_timeout(se_sess->cmd_count_wq, - percpu_ref_is_zero(&se_sess->cmd_count), - 180 * HZ); + ret = wait_event_timeout(cmd_cnt->refcnt_wq, + percpu_ref_is_zero(&cmd_cnt->refcnt), + 180 * HZ); } while (ret <= 0); - wait_for_completion(&se_sess->stop_done); + wait_for_completion(&cmd_cnt->stop_done); pr_debug("Waiting for cmds done.\n"); } +EXPORT_SYMBOL_GPL(target_wait_for_cmds); + +/** + * target_wait_for_sess_cmds - Wait for outstanding commands + * @se_sess: session to wait for active I/O + */ +void target_wait_for_sess_cmds(struct se_session *se_sess) +{ + target_wait_for_cmds(se_sess->cmd_cnt); +} EXPORT_SYMBOL(target_wait_for_sess_cmds); /* @@ -3531,8 +3604,7 @@ static void target_tmr_work(struct work_struct *work) tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : TMR_FUNCTION_REJECTED; if (tmr->response == TMR_FUNCTION_COMPLETE) { - target_ua_allocate_lun(cmd->se_sess->se_node_acl, - cmd->orig_fe_lun, 0x29, + target_dev_ua_allocate(dev, 0x29, ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); } break; @@ -3568,6 +3640,10 @@ int transport_generic_handle_tmr( unsigned long flags; bool aborted = false; + spin_lock_irqsave(&cmd->se_dev->se_tmr_lock, flags); + list_add_tail(&cmd->se_tmr_req->tmr_list, &cmd->se_dev->dev_tmr_list); + spin_unlock_irqrestore(&cmd->se_dev->se_tmr_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); if (cmd->transport_state & CMD_T_ABORTED) { aborted = true; |
