diff options
Diffstat (limited to 'drivers/target/target_core_device.c')
| -rw-r--r-- | drivers/target/target_core_device.c | 267 |
1 files changed, 151 insertions, 116 deletions
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 44bb380e7390..8ccb8541db1c 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -21,7 +21,7 @@ #include <linux/in.h> #include <linux/export.h> #include <linux/t10-pi.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <net/sock.h> #include <net/tcp.h> #include <scsi/scsi_common.h> @@ -37,7 +37,6 @@ #include "target_core_ua.h" static DEFINE_MUTEX(device_mutex); -static LIST_HEAD(device_list); static DEFINE_IDR(devices_idr); static struct se_hba *lun0_hba; @@ -56,14 +55,14 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd) rcu_read_lock(); deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); if (deve) { - atomic_long_inc(&deve->total_cmds); + this_cpu_inc(deve->stats->total_cmds); if (se_cmd->data_direction == DMA_TO_DEVICE) - atomic_long_add(se_cmd->data_length, - &deve->write_bytes); + this_cpu_add(deve->stats->write_bytes, + se_cmd->data_length); else if (se_cmd->data_direction == DMA_FROM_DEVICE) - atomic_long_add(se_cmd->data_length, - &deve->read_bytes); + this_cpu_add(deve->stats->read_bytes, + se_cmd->data_length); if ((se_cmd->data_direction == DMA_TO_DEVICE) && deve->lun_access_ro) { @@ -75,7 +74,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd) return TCM_WRITE_PROTECTED; } - se_lun = rcu_dereference(deve->se_lun); + se_lun = deve->se_lun; if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { se_lun = NULL; @@ -127,14 +126,14 @@ out_unlock: * target_core_fabric_configfs.c:target_fabric_port_release */ se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); - atomic_long_inc(&se_cmd->se_dev->num_cmds); + this_cpu_inc(se_cmd->se_dev->stats->total_cmds); if (se_cmd->data_direction == DMA_TO_DEVICE) - atomic_long_add(se_cmd->data_length, - &se_cmd->se_dev->write_bytes); + this_cpu_add(se_cmd->se_dev->stats->write_bytes, + se_cmd->data_length); else if (se_cmd->data_direction == DMA_FROM_DEVICE) - atomic_long_add(se_cmd->data_length, - &se_cmd->se_dev->read_bytes); + this_cpu_add(se_cmd->se_dev->stats->read_bytes, + se_cmd->data_length); return ret; } @@ -147,12 +146,11 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd) struct se_session *se_sess = se_cmd->se_sess; struct se_node_acl *nacl = se_sess->se_node_acl; struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; - unsigned long flags; rcu_read_lock(); deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); if (deve) { - se_lun = rcu_dereference(deve->se_lun); + se_lun = deve->se_lun; if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { se_lun = NULL; @@ -178,10 +176,6 @@ out_unlock: se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); - spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); - list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); - spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); - return 0; } EXPORT_SYMBOL(transport_lookup_tmr_lun); @@ -216,14 +210,14 @@ struct se_dev_entry *core_get_se_deve_from_rtpi( rcu_read_lock(); hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { - lun = rcu_dereference(deve->se_lun); + lun = deve->se_lun; if (!lun) { pr_err("%s device entries device pointer is" " NULL, but Initiator has access.\n", tpg->se_tpg_tfo->fabric_name); continue; } - if (lun->lun_rtpi != rtpi) + if (lun->lun_tpg->tpg_rtpi != rtpi) continue; kref_get(&deve->pr_kref); @@ -243,11 +237,8 @@ void core_free_device_list_for_node( struct se_dev_entry *deve; mutex_lock(&nacl->lun_entry_mutex); - hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { - struct se_lun *lun = rcu_dereference_check(deve->se_lun, - lockdep_is_held(&nacl->lun_entry_mutex)); - core_disable_device_list_for_node(lun, deve, nacl, tpg); - } + hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) + core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg); mutex_unlock(&nacl->lun_entry_mutex); } @@ -287,6 +278,25 @@ void target_pr_kref_release(struct kref *kref) complete(&deve->pr_comp); } +/* + * Establish UA condition on SCSI device - all LUNs + */ +void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq) +{ + struct se_dev_entry *se_deve; + struct se_lun *lun; + + spin_lock(&dev->se_port_lock); + list_for_each_entry(lun, &dev->dev_sep_list, lun_dev_link) { + + spin_lock(&lun->lun_deve_lock); + list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) + core_scsi3_ua_allocate(se_deve, asc, ascq); + spin_unlock(&lun->lun_deve_lock); + } + spin_unlock(&dev->se_port_lock); +} + static void target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new, bool skip_new) @@ -312,6 +322,7 @@ int core_enable_device_list_for_node( struct se_portal_group *tpg) { struct se_dev_entry *orig, *new; + int ret = 0; new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) { @@ -319,6 +330,12 @@ int core_enable_device_list_for_node( return -ENOMEM; } + new->stats = alloc_percpu(struct se_dev_entry_io_stats); + if (!new->stats) { + ret = -ENOMEM; + goto free_deve; + } + spin_lock_init(&new->ua_lock); INIT_LIST_HEAD(&new->ua_list); INIT_LIST_HEAD(&new->lun_link); @@ -334,16 +351,15 @@ int core_enable_device_list_for_node( mutex_lock(&nacl->lun_entry_mutex); orig = target_nacl_find_deve(nacl, mapped_lun); if (orig && orig->se_lun) { - struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun, - lockdep_is_held(&nacl->lun_entry_mutex)); + struct se_lun *orig_lun = orig->se_lun; if (orig_lun != lun) { pr_err("Existing orig->se_lun doesn't match new lun" " for dynamic -> explicit NodeACL conversion:" " %s\n", nacl->initiatorname); mutex_unlock(&nacl->lun_entry_mutex); - kfree(new); - return -EINVAL; + ret = -EINVAL; + goto free_stats; } if (orig->se_lun_acl != NULL) { pr_warn_ratelimited("Detected existing explicit" @@ -351,12 +367,12 @@ int core_enable_device_list_for_node( " mapped_lun: %llu, failing\n", nacl->initiatorname, mapped_lun); mutex_unlock(&nacl->lun_entry_mutex); - kfree(new); - return -EINVAL; + ret = -EINVAL; + goto free_stats; } - rcu_assign_pointer(new->se_lun, lun); - rcu_assign_pointer(new->se_lun_acl, lun_acl); + new->se_lun = lun; + new->se_lun_acl = lun_acl; hlist_del_rcu(&orig->link); hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); mutex_unlock(&nacl->lun_entry_mutex); @@ -374,8 +390,8 @@ int core_enable_device_list_for_node( return 0; } - rcu_assign_pointer(new->se_lun, lun); - rcu_assign_pointer(new->se_lun_acl, lun_acl); + new->se_lun = lun; + new->se_lun_acl = lun_acl; hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist); mutex_unlock(&nacl->lun_entry_mutex); @@ -385,6 +401,20 @@ int core_enable_device_list_for_node( target_luns_data_has_changed(nacl, new, true); return 0; + +free_stats: + free_percpu(new->stats); +free_deve: + kfree(new); + return ret; +} + +static void target_free_dev_entry(struct rcu_head *head) +{ + struct se_dev_entry *deve = container_of(head, struct se_dev_entry, + rcu_head); + free_percpu(deve->stats); + kfree(deve); } void core_disable_device_list_for_node( @@ -434,10 +464,7 @@ void core_disable_device_list_for_node( kref_put(&orig->pr_kref, target_pr_kref_release); wait_for_completion(&orig->pr_comp); - rcu_assign_pointer(orig->se_lun, NULL); - rcu_assign_pointer(orig->se_lun_acl, NULL); - - kfree_rcu(orig, rcu_head); + call_rcu(&orig->rcu_head, target_free_dev_entry); core_scsi3_free_pr_reg_from_nacl(dev, nacl); target_luns_data_has_changed(nacl, NULL, false); @@ -457,10 +484,7 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) mutex_lock(&nacl->lun_entry_mutex); hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { - struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun, - lockdep_is_held(&nacl->lun_entry_mutex)); - - if (lun != tmp_lun) + if (lun != deve->se_lun) continue; core_disable_device_list_for_node(lun, deve, nacl, tpg); @@ -470,47 +494,6 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) mutex_unlock(&tpg->acl_node_mutex); } -int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev) -{ - struct se_lun *tmp; - - spin_lock(&dev->se_port_lock); - if (dev->export_count == 0x0000ffff) { - pr_warn("Reached dev->dev_port_count ==" - " 0x0000ffff\n"); - spin_unlock(&dev->se_port_lock); - return -ENOSPC; - } -again: - /* - * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device - * Here is the table from spc4r17 section 7.7.3.8. - * - * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field - * - * Code Description - * 0h Reserved - * 1h Relative port 1, historically known as port A - * 2h Relative port 2, historically known as port B - * 3h to FFFFh Relative port 3 through 65 535 - */ - lun->lun_rtpi = dev->dev_rpti_counter++; - if (!lun->lun_rtpi) - goto again; - - list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) { - /* - * Make sure RELATIVE TARGET PORT IDENTIFIER is unique - * for 16-bit wrap.. - */ - if (lun->lun_rtpi == tmp->lun_rtpi) - goto again; - } - spin_unlock(&dev->se_port_lock); - - return 0; -} - static void se_release_vpd_for_dev(struct se_device *dev) { struct t10_vpd *vpd, *vpd_tmp; @@ -717,6 +700,18 @@ static void scsi_dump_inquiry(struct se_device *dev) pr_debug(" Type: %s ", scsi_device_type(device_type)); } +static void target_non_ordered_release(struct percpu_ref *ref) +{ + struct se_device *dev = container_of(ref, struct se_device, + non_ordered); + unsigned long flags; + + spin_lock_irqsave(&dev->delayed_cmd_lock, flags); + if (!list_empty(&dev->delayed_cmd_list)) + schedule_work(&dev->delayed_cmd_work); + spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags); +} + struct se_device *target_alloc_device(struct se_hba *hba, const char *name) { struct se_device *dev; @@ -727,11 +722,13 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) if (!dev) return NULL; + dev->stats = alloc_percpu(struct se_dev_io_stats); + if (!dev->stats) + goto free_device; + dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL); - if (!dev->queues) { - dev->transport->free_device(dev); - return NULL; - } + if (!dev->queues) + goto free_stats; dev->queue_cnt = nr_cpu_ids; for (i = 0; i < dev->queue_cnt; i++) { @@ -745,6 +742,10 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) INIT_WORK(&q->sq.work, target_queued_submit_work); } + if (percpu_ref_init(&dev->non_ordered, target_non_ordered_release, + PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) + goto free_queues; + dev->se_hba = hba; dev->transport = hba->backend->ops; dev->transport_flags = dev->transport->transport_flags_default; @@ -773,6 +774,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) spin_lock_init(&dev->t10_alua.lba_map_lock); INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work); + mutex_init(&dev->lun_reset_mutex); dev->t10_wwn.t10_dev = dev; /* @@ -795,6 +797,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->dev_attrib.emulate_caw = DA_EMULATE_CAW; dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; dev->dev_attrib.emulate_pr = DA_EMULATE_PR; + dev->dev_attrib.emulate_rsoc = DA_EMULATE_RSOC; dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL; @@ -809,7 +812,9 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->dev_attrib.unmap_zeroes_data = DA_UNMAP_ZEROES_DATA_DEFAULT; dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; + dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT; + /* Skip allocating lun_stats since we can't export them. */ xcopy_lun = &dev->xcopy_lun; rcu_assign_pointer(xcopy_lun->lun_se_dev, dev); init_completion(&xcopy_lun->lun_shutdown_comp); @@ -819,41 +824,64 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) xcopy_lun->lun_tpg = &xcopy_pt_tpg; /* Preload the default INQUIRY const values */ - strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor)); - strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod, + strscpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor)); + strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod, sizeof(dev->t10_wwn.model)); - strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev, + strscpy(dev->t10_wwn.revision, dev->transport->inquiry_rev, sizeof(dev->t10_wwn.revision)); return dev; + +free_queues: + kfree(dev->queues); +free_stats: + free_percpu(dev->stats); +free_device: + hba->backend->ops->free_device(dev); + return NULL; } +void target_configure_write_atomic_from_bdev(struct se_dev_attrib *attrib, + struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + int block_size = bdev_logical_block_size(bdev); + + if (!bdev_can_atomic_write(bdev)) + return; + + attrib->atomic_max_len = queue_atomic_write_max_bytes(q) / block_size; + attrib->atomic_granularity = attrib->atomic_alignment = + queue_atomic_write_unit_min_bytes(q) / block_size; + attrib->atomic_max_with_boundary = 0; + attrib->atomic_max_boundary = 0; +} +EXPORT_SYMBOL_GPL(target_configure_write_atomic_from_bdev); + /* - * Check if the underlying struct block_device request_queue supports - * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM - * in ATA and we need to set TPE=1 + * Check if the underlying struct block_device supports discard and if yes + * configure the UNMAP parameters. */ -bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, - struct request_queue *q) +bool target_configure_unmap_from_bdev(struct se_dev_attrib *attrib, + struct block_device *bdev) { - int block_size = queue_logical_block_size(q); + int block_size = bdev_logical_block_size(bdev); - if (!blk_queue_discard(q)) + if (!bdev_max_discard_sectors(bdev)) return false; attrib->max_unmap_lba_count = - q->limits.max_discard_sectors >> (ilog2(block_size) - 9); + bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9); /* * Currently hardcoded to 1 in Linux/SCSI code.. */ attrib->max_unmap_block_desc_count = 1; - attrib->unmap_granularity = q->limits.discard_granularity / block_size; - attrib->unmap_granularity_alignment = q->limits.discard_alignment / - block_size; - attrib->unmap_zeroes_data = !!(q->limits.max_write_zeroes_sectors); + attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size; + attrib->unmap_granularity_alignment = + bdev_discard_alignment(bdev) / block_size; return true; } -EXPORT_SYMBOL(target_configure_unmap_from_queue); +EXPORT_SYMBOL(target_configure_unmap_from_bdev); /* * Convert from blocksize advertised to the initiator to the 512 byte @@ -875,7 +903,6 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) EXPORT_SYMBOL(target_to_linux_sector); struct devices_idr_iter { - struct config_item *prev_item; int (*fn)(struct se_device *dev, void *data); void *data; }; @@ -885,11 +912,9 @@ static int target_devices_idr_iter(int id, void *p, void *data) { struct devices_idr_iter *iter = data; struct se_device *dev = p; + struct config_item *item; int ret; - config_item_put(iter->prev_item); - iter->prev_item = NULL; - /* * We add the device early to the idr, so it can be used * by backend modules during configuration. We do not want @@ -899,12 +924,13 @@ static int target_devices_idr_iter(int id, void *p, void *data) if (!target_dev_configured(dev)) return 0; - iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item); - if (!iter->prev_item) + item = config_item_get_unless_zero(&dev->dev_group.cg_item); + if (!item) return 0; mutex_unlock(&device_mutex); ret = iter->fn(dev, iter->data); + config_item_put(item); mutex_lock(&device_mutex); return ret; @@ -927,7 +953,6 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data), mutex_lock(&device_mutex); ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); mutex_unlock(&device_mutex); - config_item_put(iter.prev_item); return ret; } @@ -962,6 +987,12 @@ int target_configure_device(struct se_device *dev) ret = dev->transport->configure_device(dev); if (ret) goto out_free_index; + + if (dev->transport->configure_unmap && + dev->transport->configure_unmap(dev)) { + pr_debug("Discard support available, but disabled by default.\n"); + } + /* * XXX: there is not much point to have two different values here.. */ @@ -1014,6 +1045,9 @@ void target_free_device(struct se_device *dev) WARN_ON(!list_empty(&dev->dev_sep_list)); + percpu_ref_exit(&dev->non_ordered); + cancel_work_sync(&dev->delayed_cmd_work); + if (target_dev_configured(dev)) { dev->transport->destroy_device(dev); @@ -1035,6 +1069,7 @@ void target_free_device(struct se_device *dev) dev->transport->free_prot(dev); kfree(dev->queues); + free_percpu(dev->stats); dev->transport->free_device(dev); } @@ -1112,8 +1147,8 @@ passthrough_parse_cdb(struct se_cmd *cmd, if (!dev->dev_attrib.emulate_pr && ((cdb[0] == PERSISTENT_RESERVE_IN) || (cdb[0] == PERSISTENT_RESERVE_OUT) || - (cdb[0] == RELEASE || cdb[0] == RELEASE_10) || - (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) { + (cdb[0] == RELEASE_6 || cdb[0] == RELEASE_10) || + (cdb[0] == RESERVE_6 || cdb[0] == RESERVE_10))) { return TCM_UNSUPPORTED_SCSI_OPCODE; } @@ -1135,7 +1170,7 @@ passthrough_parse_cdb(struct se_cmd *cmd, return target_cmd_size_check(cmd, size); } - if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) { + if (cdb[0] == RELEASE_6 || cdb[0] == RELEASE_10) { cmd->execute_cmd = target_scsi2_reservation_release; if (cdb[0] == RELEASE_10) size = get_unaligned_be16(&cdb[7]); @@ -1143,7 +1178,7 @@ passthrough_parse_cdb(struct se_cmd *cmd, size = cmd->data_length; return target_cmd_size_check(cmd, size); } - if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) { + if (cdb[0] == RESERVE_6 || cdb[0] == RESERVE_10) { cmd->execute_cmd = target_scsi2_reservation_reserve; if (cdb[0] == RESERVE_10) size = get_unaligned_be16(&cdb[7]); |
