summaryrefslogtreecommitdiff
path: root/drivers/target
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-10-14 15:15:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-14 15:15:35 -0700
commit55e0500eb5c0440a3d43074edbd8db3e95851b66 (patch)
tree874b9da7a764df298441242ce79b9fd89c2910df /drivers/target
parent4815519ed0af833884ce9c288183bf1ae3cb9caa (diff)
parent69f4ec1edb136d2d2511d1ef96f94ef0aeecefdf (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "The usual driver updates (ufs, qla2xxx, tcmu, ibmvfc, lpfc, smartpqi, hisi_sas, qedi, qedf, mpt3sas) and minor bug fixes. There are only three core changes: adding sense codes, cleaning up noretry and adding an option for limitless retries" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (226 commits) scsi: hisi_sas: Recover PHY state according to the status before reset scsi: hisi_sas: Filter out new PHY up events during suspend scsi: hisi_sas: Add device link between SCSI devices and hisi_hba scsi: hisi_sas: Add check for methods _PS0 and _PR0 scsi: hisi_sas: Add controller runtime PM support for v3 hw scsi: hisi_sas: Switch to new framework to support suspend and resume scsi: hisi_sas: Use hisi_hba->cq_nvecs for calling calling synchronize_irq() scsi: qedf: Remove redundant assignment to variable 'rc' scsi: lpfc: Remove unneeded variable 'status' in lpfc_fcp_cpu_map_store() scsi: snic: Convert to use DEFINE_SEQ_ATTRIBUTE macro scsi: qla4xxx: Delete unneeded variable 'status' in qla4xxx_process_ddb_changed scsi: sun_esp: Use module_platform_driver to simplify the code scsi: sun3x_esp: Use module_platform_driver to simplify the code scsi: sni_53c710: Use module_platform_driver to simplify the code scsi: qlogicpti: Use module_platform_driver to simplify the code scsi: mac_esp: Use module_platform_driver to simplify the code scsi: jazz_esp: Use module_platform_driver to simplify the code scsi: mvumi: Fix error return in mvumi_io_attach() scsi: lpfc: Drop nodelist reference on error in lpfc_gen_req() scsi: be2iscsi: Fix a theoretical leak in beiscsi_create_eqs() ...
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/target_core_user.c344
2 files changed, 172 insertions, 174 deletions
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 7b56fe9f1062..f77e5eee6b80 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4529,7 +4529,6 @@ int iscsit_logout_post_handler(
iscsit_logout_post_handler_closesession(conn);
break;
}
- ret = 0;
break;
case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
if (conn->cid == cmd->logout_cid) {
@@ -4540,7 +4539,6 @@ int iscsit_logout_post_handler(
iscsit_logout_post_handler_samecid(conn);
break;
}
- ret = 0;
} else {
switch (cmd->logout_response) {
case ISCSI_LOGOUT_SUCCESS:
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9b7592350502..4c960b66de8e 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -177,9 +177,12 @@ struct tcmu_cmd {
/* Can't use se_cmd when cleaning up expired cmds, because if
cmd has been completed then accessing se_cmd is off limits */
uint32_t dbi_cnt;
+ uint32_t dbi_bidi_cnt;
uint32_t dbi_cur;
uint32_t *dbi;
+ uint32_t data_len_bidi;
+
unsigned long deadline;
#define TCMU_CMD_BIT_EXPIRED 0
@@ -242,7 +245,7 @@ static int tcmu_set_global_max_data_area(const char *str,
static int tcmu_get_global_max_data_area(char *buffer,
const struct kernel_param *kp)
{
- return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
+ return sprintf(buffer, "%d\n", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
}
static const struct kernel_param_ops tcmu_global_max_data_area_op = {
@@ -492,15 +495,16 @@ static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
}
-static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
- struct tcmu_cmd *tcmu_cmd)
+static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
+ struct tcmu_cmd *tcmu_cmd,
+ int prev_dbi, int *iov_cnt)
{
struct page *page;
int ret, dbi;
dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
if (dbi == udev->dbi_thresh)
- return false;
+ return -1;
page = radix_tree_lookup(&udev->data_blocks, dbi);
if (!page) {
@@ -524,24 +528,30 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
set_bit(dbi, udev->data_bitmap);
tcmu_cmd_set_dbi(tcmu_cmd, dbi);
- return true;
+ if (dbi != prev_dbi + 1)
+ *iov_cnt += 1;
+
+ return dbi;
err_insert:
__free_page(page);
err_alloc:
atomic_dec(&global_db_count);
- return false;
+ return -1;
}
-static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
- struct tcmu_cmd *tcmu_cmd)
+static int tcmu_get_empty_blocks(struct tcmu_dev *udev,
+ struct tcmu_cmd *tcmu_cmd, int dbi_cnt)
{
- int i;
+ /* start value of dbi + 1 must not be a valid dbi */
+ int dbi = -2;
+ int i, iov_cnt = 0;
- for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) {
- if (!tcmu_get_empty_block(udev, tcmu_cmd))
- return false;
+ for (i = 0; i < dbi_cnt; i++) {
+ dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, &iov_cnt);
+ if (dbi < 0)
+ return -1;
}
- return true;
+ return iov_cnt;
}
static inline struct page *
@@ -558,25 +568,58 @@ static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
}
-static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
+static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd)
{
- struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
- size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
+ int i, len;
+ struct se_cmd *se_cmd = cmd->se_cmd;
+
+ cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
if (se_cmd->se_cmd_flags & SCF_BIDI) {
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
- data_length += round_up(se_cmd->t_bidi_data_sg->length,
- DATA_BLOCK_SIZE);
+ for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++)
+ len += se_cmd->t_bidi_data_sg[i].length;
+ cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, DATA_BLOCK_SIZE);
+ cmd->dbi_cnt += cmd->dbi_bidi_cnt;
+ cmd->data_len_bidi = len;
}
+}
+
+static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+ struct iovec **iov, int prev_dbi, int *remain)
+{
+ /* Get the next dbi */
+ int dbi = tcmu_cmd_get_dbi(cmd);
+ /* Do not add more than DATA_BLOCK_SIZE to iov */
+ int len = min_t(int, DATA_BLOCK_SIZE, *remain);
- return data_length;
+ *remain -= len;
+ /*
+ * The following code will gather and map the blocks to the same iovec
+ * when the blocks are all next to each other.
+ */
+ if (dbi != prev_dbi + 1) {
+ /* dbi is not next to previous dbi, so start new iov */
+ if (prev_dbi >= 0)
+ (*iov)++;
+ /* write offset relative to mb_addr */
+ (*iov)->iov_base = (void __user *)
+ (udev->data_off + dbi * DATA_BLOCK_SIZE);
+ }
+ (*iov)->iov_len += len;
+
+ return dbi;
}
-static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
+static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+ struct iovec **iov, int data_length)
{
- size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
+ /* start value of dbi + 1 must not be a valid dbi */
+ int dbi = -2;
- return data_length / DATA_BLOCK_SIZE;
+ /* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */
+ while (data_length > 0)
+ dbi = new_block_to_iov(udev, cmd, iov, dbi, &data_length);
}
static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
@@ -593,8 +636,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
- tcmu_cmd_reset_dbi_cur(tcmu_cmd);
- tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
+ tcmu_cmd_set_block_cnts(tcmu_cmd);
tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
GFP_NOIO);
if (!tcmu_cmd->dbi) {
@@ -644,46 +686,22 @@ static inline size_t head_to_end(size_t head, size_t size)
return size - head;
}
-static inline void new_iov(struct iovec **iov, int *iov_cnt)
-{
- struct iovec *iovec;
-
- if (*iov_cnt != 0)
- (*iov)++;
- (*iov_cnt)++;
-
- iovec = *iov;
- memset(iovec, 0, sizeof(struct iovec));
-}
-
#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
-/* offset is relative to mb_addr */
-static inline size_t get_block_offset_user(struct tcmu_dev *dev,
- int dbi, int remaining)
-{
- return dev->data_off + dbi * DATA_BLOCK_SIZE +
- DATA_BLOCK_SIZE - remaining;
-}
-
-static inline size_t iov_tail(struct iovec *iov)
-{
- return (size_t)iov->iov_base + iov->iov_len;
-}
-
-static void scatter_data_area(struct tcmu_dev *udev,
- struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg,
- unsigned int data_nents, struct iovec **iov,
- int *iov_cnt, bool copy_data)
+static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
+ struct iovec **iov)
{
- int i, dbi;
+ struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+ /* start value of dbi + 1 must not be a valid dbi */
+ int i, dbi = -2;
int block_remaining = 0;
+ int data_len = se_cmd->data_length;
void *from, *to = NULL;
- size_t copy_bytes, to_offset, offset;
+ size_t copy_bytes, offset;
struct scatterlist *sg;
- struct page *page;
+ struct page *page = NULL;
- for_each_sg(data_sg, sg, data_nents, i) {
+ for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) {
int sg_remaining = sg->length;
from = kmap_atomic(sg_page(sg)) + sg->offset;
while (sg_remaining > 0) {
@@ -693,50 +711,19 @@ static void scatter_data_area(struct tcmu_dev *udev,
kunmap_atomic(to);
}
- block_remaining = DATA_BLOCK_SIZE;
- dbi = tcmu_cmd_get_dbi(tcmu_cmd);
+ /* get next dbi and add to IOVs */
+ dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi,
+ &data_len);
page = tcmu_get_block_page(udev, dbi);
to = kmap_atomic(page);
+ block_remaining = DATA_BLOCK_SIZE;
}
- /*
- * Covert to virtual offset of the ring data area.
- */
- to_offset = get_block_offset_user(udev, dbi,
- block_remaining);
-
- /*
- * The following code will gather and map the blocks
- * to the same iovec when the blocks are all next to
- * each other.
- */
copy_bytes = min_t(size_t, sg_remaining,
block_remaining);
- if (*iov_cnt != 0 &&
- to_offset == iov_tail(*iov)) {
- /*
- * Will append to the current iovec, because
- * the current block page is next to the
- * previous one.
- */
- (*iov)->iov_len += copy_bytes;
- } else {
- /*
- * Will allocate a new iovec because we are
- * first time here or the current block page
- * is not next to the previous one.
- */
- new_iov(iov, iov_cnt);
- (*iov)->iov_base = (void __user *)to_offset;
- (*iov)->iov_len = copy_bytes;
- }
-
- if (copy_data) {
- offset = DATA_BLOCK_SIZE - block_remaining;
- memcpy(to + offset,
- from + sg->length - sg_remaining,
- copy_bytes);
- }
+ offset = DATA_BLOCK_SIZE - block_remaining;
+ memcpy(to + offset, from + sg->length - sg_remaining,
+ copy_bytes);
sg_remaining -= copy_bytes;
block_remaining -= copy_bytes;
@@ -767,13 +754,12 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
data_sg = se_cmd->t_data_sg;
data_nents = se_cmd->t_data_nents;
} else {
-
/*
* For bidi case, the first count blocks are for Data-Out
* buffer blocks, and before gathering the Data-In buffer
- * the Data-Out buffer blocks should be discarded.
+ * the Data-Out buffer blocks should be skipped.
*/
- count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
+ count = cmd->dbi_cnt - cmd->dbi_bidi_cnt;
data_sg = se_cmd->t_bidi_data_sg;
data_nents = se_cmd->t_bidi_data_nents;
@@ -821,17 +807,13 @@ static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
}
/*
- * We can't queue a command until we have space available on the cmd ring *and*
- * space available on the data area.
+ * We can't queue a command until we have space available on the cmd ring.
*
* Called with ring lock held.
*/
-static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
- size_t cmd_size, size_t data_needed)
+static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size)
{
struct tcmu_mailbox *mb = udev->mb_addr;
- uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1)
- / DATA_BLOCK_SIZE;
size_t space, cmd_needed;
u32 cmd_head;
@@ -854,29 +836,54 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
udev->cmdr_last_cleaned, udev->cmdr_size);
return false;
}
+ return true;
+}
- if (!data_needed)
- return true;
+/*
+ * We have to allocate data buffers before we can queue a command.
+ * Returns -1 on error (not enough space) or number of needed iovs on success
+ *
+ * Called with ring lock held.
+ */
+static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+ int *iov_bidi_cnt)
+{
+ int space, iov_cnt = 0, ret = 0;
+
+ if (!cmd->dbi_cnt)
+ goto wr_iov_cnts;
/* try to check and get the data blocks as needed */
space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
- if ((space * DATA_BLOCK_SIZE) < data_needed) {
+ if (space < cmd->dbi_cnt) {
unsigned long blocks_left =
(udev->max_blocks - udev->dbi_thresh) + space;
- if (blocks_left < blocks_needed) {
- pr_debug("no data space: only %lu available, but ask for %zu\n",
+ if (blocks_left < cmd->dbi_cnt) {
+ pr_debug("no data space: only %lu available, but ask for %lu\n",
blocks_left * DATA_BLOCK_SIZE,
- data_needed);
- return false;
+ cmd->dbi_cnt * DATA_BLOCK_SIZE);
+ return -1;
}
- udev->dbi_thresh += blocks_needed;
+ udev->dbi_thresh += cmd->dbi_cnt;
if (udev->dbi_thresh > udev->max_blocks)
udev->dbi_thresh = udev->max_blocks;
}
- return tcmu_get_empty_blocks(udev, cmd);
+ iov_cnt = tcmu_get_empty_blocks(udev, cmd,
+ cmd->dbi_cnt - cmd->dbi_bidi_cnt);
+ if (iov_cnt < 0)
+ return -1;
+
+ if (cmd->dbi_bidi_cnt) {
+ ret = tcmu_get_empty_blocks(udev, cmd, cmd->dbi_bidi_cnt);
+ if (ret < 0)
+ return -1;
+ }
+wr_iov_cnts:
+ *iov_bidi_cnt = ret;
+ return iov_cnt + ret;
}
static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
@@ -986,11 +993,11 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
struct tcmu_mailbox *mb = udev->mb_addr;
struct tcmu_cmd_entry *entry;
struct iovec *iov;
- int iov_cnt, cmd_id;
+ int iov_cnt, iov_bidi_cnt, cmd_id;
uint32_t cmd_head;
uint64_t cdb_off;
- bool copy_to_data_area;
- size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
+ /* size of data buffer needed */
+ size_t data_length = (size_t)tcmu_cmd->dbi_cnt * DATA_BLOCK_SIZE;
*scsi_err = TCM_NO_SENSE;
@@ -1004,42 +1011,54 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
return -1;
}
+ if (!list_empty(&udev->qfull_queue))
+ goto queue;
+
+ if (data_length > udev->data_size) {
+ pr_warn("TCMU: Request of size %zu is too big for %zu data area\n",
+ data_length, udev->data_size);
+ *scsi_err = TCM_INVALID_CDB_FIELD;
+ return -1;
+ }
+
+ iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt);
+ if (iov_cnt < 0)
+ goto free_and_queue;
+
/*
* Must be a certain minimum size for response sense info, but
* also may be larger if the iov array is large.
- *
- * We prepare as many iovs as possbile for potential uses here,
- * because it's expensive to tell how many regions are freed in
- * the bitmap & global data pool, as the size calculated here
- * will only be used to do the checks.
- *
- * The size will be recalculated later as actually needed to save
- * cmd area memories.
*/
- base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
+ base_command_size = tcmu_cmd_get_base_cmd_size(iov_cnt);
command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
- if (!list_empty(&udev->qfull_queue))
- goto queue;
-
- if ((command_size > (udev->cmdr_size / 2)) ||
- data_length > udev->data_size) {
- pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
- "cmd ring/data area\n", command_size, data_length,
- udev->cmdr_size, udev->data_size);
+ if (command_size > (udev->cmdr_size / 2)) {
+ pr_warn("TCMU: Request of size %zu is too big for %u cmd ring\n",
+ command_size, udev->cmdr_size);
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
*scsi_err = TCM_INVALID_CDB_FIELD;
return -1;
}
- if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
+ if (!is_ring_space_avail(udev, command_size))
/*
* Don't leave commands partially setup because the unmap
* thread might need the blocks to make forward progress.
*/
- tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
- tcmu_cmd_reset_dbi_cur(tcmu_cmd);
- goto queue;
+ goto free_and_queue;
+
+ cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
+ if (cmd_id < 0) {
+ pr_err("tcmu: Could not allocate cmd id.\n");
+
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
+ *scsi_err = TCM_OUT_OF_RESOURCES;
+ return -1;
}
+ tcmu_cmd->cmd_id = cmd_id;
+
+ pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
+ tcmu_cmd, udev->name);
cmd_head = ring_insert_padding(udev, command_size);
@@ -1047,52 +1066,29 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
memset(entry, 0, command_size);
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
- /* Handle allocating space from the data area */
+ /* prepare iov list and copy data to data area if necessary */
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
iov = &entry->req.iov[0];
- iov_cnt = 0;
- copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
- || se_cmd->se_cmd_flags & SCF_BIDI);
- scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
- se_cmd->t_data_nents, &iov, &iov_cnt,
- copy_to_data_area);
- entry->req.iov_cnt = iov_cnt;
+
+ if (se_cmd->data_direction == DMA_TO_DEVICE ||
+ se_cmd->se_cmd_flags & SCF_BIDI)
+ scatter_data_area(udev, tcmu_cmd, &iov);
+ else
+ tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length);
+
+ entry->req.iov_cnt = iov_cnt - iov_bidi_cnt;
/* Handle BIDI commands */
- iov_cnt = 0;
if (se_cmd->se_cmd_flags & SCF_BIDI) {
iov++;
- scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg,
- se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
- false);
- }
- entry->req.iov_bidi_cnt = iov_cnt;
-
- cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
- if (cmd_id < 0) {
- pr_err("tcmu: Could not allocate cmd id.\n");
-
- tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
- *scsi_err = TCM_OUT_OF_RESOURCES;
- return -1;
+ tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi);
+ entry->req.iov_bidi_cnt = iov_bidi_cnt;
}
- tcmu_cmd->cmd_id = cmd_id;
-
- pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
- tcmu_cmd, udev->name);
tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
- /*
- * Recalaulate the command's base size and size according
- * to the actual needs
- */
- base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt +
- entry->req.iov_bidi_cnt);
- command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
-
tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
/* All offsets relative to mb_addr, not start of entry! */
@@ -1111,6 +1107,10 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
return 0;
+free_and_queue:
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
+ tcmu_cmd_reset_dbi_cur(tcmu_cmd);
+
queue:
if (add_to_qfull_queue(tcmu_cmd)) {
*scsi_err = TCM_OUT_OF_RESOURCES;
@@ -1145,7 +1145,7 @@ queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr)
cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE);
if (!list_empty(&udev->tmr_queue) ||
- !is_ring_space_avail(udev, NULL, cmd_size, 0)) {
+ !is_ring_space_avail(udev, cmd_size)) {
list_add_tail(&tmr->queue_entry, &udev->tmr_queue);
pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n",
tmr, udev->name);