summaryrefslogtreecommitdiff
path: root/drivers/target/target_core_device.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target/target_core_device.c')
-rw-r--r--drivers/target/target_core_device.c191
1 files changed, 106 insertions, 85 deletions
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index f6e58410ec3f..8ccb8541db1c 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -21,7 +21,7 @@
#include <linux/in.h>
#include <linux/export.h>
#include <linux/t10-pi.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi_common.h>
@@ -37,7 +37,6 @@
#include "target_core_ua.h"
static DEFINE_MUTEX(device_mutex);
-static LIST_HEAD(device_list);
static DEFINE_IDR(devices_idr);
static struct se_hba *lun0_hba;
@@ -56,14 +55,14 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd)
rcu_read_lock();
deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
if (deve) {
- atomic_long_inc(&deve->total_cmds);
+ this_cpu_inc(deve->stats->total_cmds);
if (se_cmd->data_direction == DMA_TO_DEVICE)
- atomic_long_add(se_cmd->data_length,
- &deve->write_bytes);
+ this_cpu_add(deve->stats->write_bytes,
+ se_cmd->data_length);
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
- atomic_long_add(se_cmd->data_length,
- &deve->read_bytes);
+ this_cpu_add(deve->stats->read_bytes,
+ se_cmd->data_length);
if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
deve->lun_access_ro) {
@@ -127,14 +126,14 @@ out_unlock:
* target_core_fabric_configfs.c:target_fabric_port_release
*/
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
- atomic_long_inc(&se_cmd->se_dev->num_cmds);
+ this_cpu_inc(se_cmd->se_dev->stats->total_cmds);
if (se_cmd->data_direction == DMA_TO_DEVICE)
- atomic_long_add(se_cmd->data_length,
- &se_cmd->se_dev->write_bytes);
+ this_cpu_add(se_cmd->se_dev->stats->write_bytes,
+ se_cmd->data_length);
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
- atomic_long_add(se_cmd->data_length,
- &se_cmd->se_dev->read_bytes);
+ this_cpu_add(se_cmd->se_dev->stats->read_bytes,
+ se_cmd->data_length);
return ret;
}
@@ -147,7 +146,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
struct se_session *se_sess = se_cmd->se_sess;
struct se_node_acl *nacl = se_sess->se_node_acl;
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
- unsigned long flags;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
@@ -178,10 +176,6 @@ out_unlock:
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
- spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
- list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
- spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
-
return 0;
}
EXPORT_SYMBOL(transport_lookup_tmr_lun);
@@ -223,7 +217,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
tpg->se_tpg_tfo->fabric_name);
continue;
}
- if (lun->lun_rtpi != rtpi)
+ if (lun->lun_tpg->tpg_rtpi != rtpi)
continue;
kref_get(&deve->pr_kref);
@@ -328,6 +322,7 @@ int core_enable_device_list_for_node(
struct se_portal_group *tpg)
{
struct se_dev_entry *orig, *new;
+ int ret = 0;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new) {
@@ -335,6 +330,12 @@ int core_enable_device_list_for_node(
return -ENOMEM;
}
+ new->stats = alloc_percpu(struct se_dev_entry_io_stats);
+ if (!new->stats) {
+ ret = -ENOMEM;
+ goto free_deve;
+ }
+
spin_lock_init(&new->ua_lock);
INIT_LIST_HEAD(&new->ua_list);
INIT_LIST_HEAD(&new->lun_link);
@@ -357,8 +358,8 @@ int core_enable_device_list_for_node(
" for dynamic -> explicit NodeACL conversion:"
" %s\n", nacl->initiatorname);
mutex_unlock(&nacl->lun_entry_mutex);
- kfree(new);
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_stats;
}
if (orig->se_lun_acl != NULL) {
pr_warn_ratelimited("Detected existing explicit"
@@ -366,8 +367,8 @@ int core_enable_device_list_for_node(
" mapped_lun: %llu, failing\n",
nacl->initiatorname, mapped_lun);
mutex_unlock(&nacl->lun_entry_mutex);
- kfree(new);
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_stats;
}
new->se_lun = lun;
@@ -400,6 +401,20 @@ int core_enable_device_list_for_node(
target_luns_data_has_changed(nacl, new, true);
return 0;
+
+free_stats:
+ free_percpu(new->stats);
+free_deve:
+ kfree(new);
+ return ret;
+}
+
+static void target_free_dev_entry(struct rcu_head *head)
+{
+ struct se_dev_entry *deve = container_of(head, struct se_dev_entry,
+ rcu_head);
+ free_percpu(deve->stats);
+ kfree(deve);
}
void core_disable_device_list_for_node(
@@ -449,7 +464,7 @@ void core_disable_device_list_for_node(
kref_put(&orig->pr_kref, target_pr_kref_release);
wait_for_completion(&orig->pr_comp);
- kfree_rcu(orig, rcu_head);
+ call_rcu(&orig->rcu_head, target_free_dev_entry);
core_scsi3_free_pr_reg_from_nacl(dev, nacl);
target_luns_data_has_changed(nacl, NULL, false);
@@ -479,47 +494,6 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
mutex_unlock(&tpg->acl_node_mutex);
}
-int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev)
-{
- struct se_lun *tmp;
-
- spin_lock(&dev->se_port_lock);
- if (dev->export_count == 0x0000ffff) {
- pr_warn("Reached dev->dev_port_count =="
- " 0x0000ffff\n");
- spin_unlock(&dev->se_port_lock);
- return -ENOSPC;
- }
-again:
- /*
- * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
- * Here is the table from spc4r17 section 7.7.3.8.
- *
- * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
- *
- * Code Description
- * 0h Reserved
- * 1h Relative port 1, historically known as port A
- * 2h Relative port 2, historically known as port B
- * 3h to FFFFh Relative port 3 through 65 535
- */
- lun->lun_rtpi = dev->dev_rpti_counter++;
- if (!lun->lun_rtpi)
- goto again;
-
- list_for_each_entry(tmp, &dev->dev_sep_list, lun_dev_link) {
- /*
- * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
- * for 16-bit wrap..
- */
- if (lun->lun_rtpi == tmp->lun_rtpi)
- goto again;
- }
- spin_unlock(&dev->se_port_lock);
-
- return 0;
-}
-
static void se_release_vpd_for_dev(struct se_device *dev)
{
struct t10_vpd *vpd, *vpd_tmp;
@@ -726,6 +700,18 @@ static void scsi_dump_inquiry(struct se_device *dev)
pr_debug(" Type: %s ", scsi_device_type(device_type));
}
+static void target_non_ordered_release(struct percpu_ref *ref)
+{
+ struct se_device *dev = container_of(ref, struct se_device,
+ non_ordered);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->delayed_cmd_lock, flags);
+ if (!list_empty(&dev->delayed_cmd_list))
+ schedule_work(&dev->delayed_cmd_work);
+ spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags);
+}
+
struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
{
struct se_device *dev;
@@ -736,11 +722,13 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
if (!dev)
return NULL;
+ dev->stats = alloc_percpu(struct se_dev_io_stats);
+ if (!dev->stats)
+ goto free_device;
+
dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
- if (!dev->queues) {
- dev->transport->free_device(dev);
- return NULL;
- }
+ if (!dev->queues)
+ goto free_stats;
dev->queue_cnt = nr_cpu_ids;
for (i = 0; i < dev->queue_cnt; i++) {
@@ -754,6 +742,10 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
INIT_WORK(&q->sq.work, target_queued_submit_work);
}
+ if (percpu_ref_init(&dev->non_ordered, target_non_ordered_release,
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
+ goto free_queues;
+
dev->se_hba = hba;
dev->transport = hba->backend->ops;
dev->transport_flags = dev->transport->transport_flags_default;
@@ -782,6 +774,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
spin_lock_init(&dev->t10_alua.lba_map_lock);
INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
+ mutex_init(&dev->lun_reset_mutex);
dev->t10_wwn.t10_dev = dev;
/*
@@ -819,7 +812,9 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.unmap_zeroes_data =
DA_UNMAP_ZEROES_DATA_DEFAULT;
dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
+ dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT;
+ /* Skip allocating lun_stats since we can't export them. */
xcopy_lun = &dev->xcopy_lun;
rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
init_completion(&xcopy_lun->lun_shutdown_comp);
@@ -829,21 +824,46 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
xcopy_lun->lun_tpg = &xcopy_pt_tpg;
/* Preload the default INQUIRY const values */
- strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
- strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
+ strscpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
+ strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
sizeof(dev->t10_wwn.model));
- strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
+ strscpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
sizeof(dev->t10_wwn.revision));
return dev;
+
+free_queues:
+ kfree(dev->queues);
+free_stats:
+ free_percpu(dev->stats);
+free_device:
+ hba->backend->ops->free_device(dev);
+ return NULL;
+}
+
+void target_configure_write_atomic_from_bdev(struct se_dev_attrib *attrib,
+ struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ int block_size = bdev_logical_block_size(bdev);
+
+ if (!bdev_can_atomic_write(bdev))
+ return;
+
+ attrib->atomic_max_len = queue_atomic_write_max_bytes(q) / block_size;
+ attrib->atomic_granularity = attrib->atomic_alignment =
+ queue_atomic_write_unit_min_bytes(q) / block_size;
+ attrib->atomic_max_with_boundary = 0;
+ attrib->atomic_max_boundary = 0;
}
+EXPORT_SYMBOL_GPL(target_configure_write_atomic_from_bdev);
/*
* Check if the underlying struct block_device supports discard and if yes
* configure the UNMAP parameters.
*/
-bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct block_device *bdev)
+bool target_configure_unmap_from_bdev(struct se_dev_attrib *attrib,
+ struct block_device *bdev)
{
int block_size = bdev_logical_block_size(bdev);
@@ -861,7 +881,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
bdev_discard_alignment(bdev) / block_size;
return true;
}
-EXPORT_SYMBOL(target_configure_unmap_from_queue);
+EXPORT_SYMBOL(target_configure_unmap_from_bdev);
/*
* Convert from blocksize advertised to the initiator to the 512 byte
@@ -883,7 +903,6 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
EXPORT_SYMBOL(target_to_linux_sector);
struct devices_idr_iter {
- struct config_item *prev_item;
int (*fn)(struct se_device *dev, void *data);
void *data;
};
@@ -893,11 +912,9 @@ static int target_devices_idr_iter(int id, void *p, void *data)
{
struct devices_idr_iter *iter = data;
struct se_device *dev = p;
+ struct config_item *item;
int ret;
- config_item_put(iter->prev_item);
- iter->prev_item = NULL;
-
/*
* We add the device early to the idr, so it can be used
* by backend modules during configuration. We do not want
@@ -907,12 +924,13 @@ static int target_devices_idr_iter(int id, void *p, void *data)
if (!target_dev_configured(dev))
return 0;
- iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
- if (!iter->prev_item)
+ item = config_item_get_unless_zero(&dev->dev_group.cg_item);
+ if (!item)
return 0;
mutex_unlock(&device_mutex);
ret = iter->fn(dev, iter->data);
+ config_item_put(item);
mutex_lock(&device_mutex);
return ret;
@@ -935,7 +953,6 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
mutex_lock(&device_mutex);
ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
mutex_unlock(&device_mutex);
- config_item_put(iter.prev_item);
return ret;
}
@@ -1028,6 +1045,9 @@ void target_free_device(struct se_device *dev)
WARN_ON(!list_empty(&dev->dev_sep_list));
+ percpu_ref_exit(&dev->non_ordered);
+ cancel_work_sync(&dev->delayed_cmd_work);
+
if (target_dev_configured(dev)) {
dev->transport->destroy_device(dev);
@@ -1049,6 +1069,7 @@ void target_free_device(struct se_device *dev)
dev->transport->free_prot(dev);
kfree(dev->queues);
+ free_percpu(dev->stats);
dev->transport->free_device(dev);
}
@@ -1126,8 +1147,8 @@ passthrough_parse_cdb(struct se_cmd *cmd,
if (!dev->dev_attrib.emulate_pr &&
((cdb[0] == PERSISTENT_RESERVE_IN) ||
(cdb[0] == PERSISTENT_RESERVE_OUT) ||
- (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
- (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
+ (cdb[0] == RELEASE_6 || cdb[0] == RELEASE_10) ||
+ (cdb[0] == RESERVE_6 || cdb[0] == RESERVE_10))) {
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
@@ -1149,7 +1170,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
return target_cmd_size_check(cmd, size);
}
- if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
+ if (cdb[0] == RELEASE_6 || cdb[0] == RELEASE_10) {
cmd->execute_cmd = target_scsi2_reservation_release;
if (cdb[0] == RELEASE_10)
size = get_unaligned_be16(&cdb[7]);
@@ -1157,7 +1178,7 @@ passthrough_parse_cdb(struct se_cmd *cmd,
size = cmd->data_length;
return target_cmd_size_check(cmd, size);
}
- if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
+ if (cdb[0] == RESERVE_6 || cdb[0] == RESERVE_10) {
cmd->execute_cmd = target_scsi2_reservation_reserve;
if (cdb[0] == RESERVE_10)
size = get_unaligned_be16(&cdb[7]);