summaryrefslogtreecommitdiff
path: root/drivers/target/target_core_user.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target/target_core_user.c')
-rw-r--r--drivers/target/target_core_user.c1569
1 files changed, 1080 insertions, 489 deletions
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 560bfec933bc..3fd963612775 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -8,19 +8,19 @@
#include <linux/spinlock.h>
#include <linux/module.h>
-#include <linux/idr.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/parser.h>
#include <linux/vmalloc.h>
#include <linux/uio_driver.h>
-#include <linux/radix-tree.h>
+#include <linux/xarray.h>
#include <linux/stringify.h>
#include <linux/bitops.h>
#include <linux/highmem.h>
#include <linux/configfs.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
+#include <linux/pagemap.h>
#include <net/genetlink.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
@@ -61,25 +61,27 @@
#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
-/* For cmd area, the size is fixed 8MB */
-#define CMDR_SIZE (8 * 1024 * 1024)
+/* For mailbox plus cmd ring, the size is fixed 8MB */
+#define MB_CMDR_SIZE_DEF (8 * 1024 * 1024)
+/* Offset of cmd ring is size of mailbox */
+#define CMDR_OFF ((__u32)sizeof(struct tcmu_mailbox))
+#define CMDR_SIZE_DEF (MB_CMDR_SIZE_DEF - CMDR_OFF)
/*
- * For data area, the block size is PAGE_SIZE and
- * the total size is 256K * PAGE_SIZE.
+ * For data area, the default block size is PAGE_SIZE and
+ * the default total size is 256K * PAGE_SIZE.
*/
-#define DATA_BLOCK_SIZE PAGE_SIZE
-#define DATA_BLOCK_SHIFT PAGE_SHIFT
-#define DATA_BLOCK_BITS_DEF (256 * 1024)
+#define DATA_PAGES_PER_BLK_DEF 1
+#define DATA_AREA_PAGES_DEF (256 * 1024)
-#define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT))
-#define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT))
+#define TCMU_MBS_TO_PAGES(_mbs) ((size_t)_mbs << (20 - PAGE_SHIFT))
+#define TCMU_PAGES_TO_MBS(_pages) (_pages >> (20 - PAGE_SHIFT))
/*
* Default number of global data blocks(512K * PAGE_SIZE)
* when the unmap thread will be started.
*/
-#define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024)
+#define TCMU_GLOBAL_MAX_PAGES_DEF (512 * 1024)
static u8 tcmu_kern_cmd_reply_supported;
static u8 tcmu_netlink_blocked;
@@ -111,6 +113,7 @@ struct tcmu_dev {
struct kref kref;
struct se_device se_dev;
+ struct se_dev_plug se_plug;
char *name;
struct se_hba *hba;
@@ -118,32 +121,39 @@ struct tcmu_dev {
#define TCMU_DEV_BIT_OPEN 0
#define TCMU_DEV_BIT_BROKEN 1
#define TCMU_DEV_BIT_BLOCKED 2
+#define TCMU_DEV_BIT_TMR_NOTIFY 3
+#define TCMU_DEV_BIT_PLUGGED 4
unsigned long flags;
struct uio_info uio_info;
struct inode *inode;
- struct tcmu_mailbox *mb_addr;
uint64_t dev_size;
+
+ struct tcmu_mailbox *mb_addr;
+ void *cmdr;
u32 cmdr_size;
u32 cmdr_last_cleaned;
/* Offset of data area from start of mb */
/* Must add data_off and mb_addr to get the address */
size_t data_off;
- size_t data_size;
+ int data_area_mb;
uint32_t max_blocks;
- size_t ring_size;
+ size_t mmap_pages;
struct mutex cmdr_lock;
struct list_head qfull_queue;
+ struct list_head tmr_queue;
uint32_t dbi_max;
uint32_t dbi_thresh;
unsigned long *data_bitmap;
- struct radix_tree_root data_blocks;
+ struct xarray data_pages;
+ uint32_t data_pages_per_blk;
+ uint32_t data_blk_size;
- struct idr commands;
+ struct xarray commands;
struct timer_list cmd_timer;
unsigned int cmd_time_out;
@@ -163,8 +173,6 @@ struct tcmu_dev {
#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
-#define CMDR_OFF sizeof(struct tcmu_mailbox)
-
struct tcmu_cmd {
struct se_cmd *se_cmd;
struct tcmu_dev *tcmu_dev;
@@ -175,15 +183,27 @@ struct tcmu_cmd {
/* Can't use se_cmd when cleaning up expired cmds, because if
cmd has been completed then accessing se_cmd is off limits */
uint32_t dbi_cnt;
+ uint32_t dbi_bidi_cnt;
uint32_t dbi_cur;
uint32_t *dbi;
+ uint32_t data_len_bidi;
+
unsigned long deadline;
#define TCMU_CMD_BIT_EXPIRED 0
-#define TCMU_CMD_BIT_INFLIGHT 1
+#define TCMU_CMD_BIT_KEEP_BUF 1
unsigned long flags;
};
+
+struct tcmu_tmr {
+ struct list_head queue_entry;
+
+ uint8_t tmr_type;
+ uint32_t tmr_cmd_cnt;
+ int16_t tmr_cmd_ids[] __counted_by(tmr_cmd_cnt);
+};
+
/*
* To avoid dead lock the mutex lock order should always be:
*
@@ -202,9 +222,9 @@ static LIST_HEAD(timed_out_udevs);
static struct kmem_cache *tcmu_cmd_cache;
-static atomic_t global_db_count = ATOMIC_INIT(0);
+static atomic_t global_page_count = ATOMIC_INIT(0);
static struct delayed_work tcmu_unmap_work;
-static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF;
+static int tcmu_global_max_pages = TCMU_GLOBAL_MAX_PAGES_DEF;
static int tcmu_set_global_max_data_area(const char *str,
const struct kernel_param *kp)
@@ -220,8 +240,8 @@ static int tcmu_set_global_max_data_area(const char *str,
return -EINVAL;
}
- tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb);
- if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
+ tcmu_global_max_pages = TCMU_MBS_TO_PAGES(max_area_mb);
+ if (atomic_read(&global_page_count) > tcmu_global_max_pages)
schedule_delayed_work(&tcmu_unmap_work, 0);
else
cancel_delayed_work_sync(&tcmu_unmap_work);
@@ -232,7 +252,7 @@ static int tcmu_set_global_max_data_area(const char *str,
static int tcmu_get_global_max_data_area(char *buffer,
const struct kernel_param *kp)
{
- return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
+ return sprintf(buffer, "%d\n", TCMU_PAGES_TO_MBS(tcmu_global_max_pages));
}
static const struct kernel_param_ops tcmu_global_max_data_area_op = {
@@ -341,7 +361,7 @@ static const struct genl_multicast_group tcmu_mcgrps[] = {
[TCMU_MCGRP_CONFIG] = { .name = "config", },
};
-static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
+static const struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX + 1] = {
[TCMU_ATTR_DEVICE] = { .type = NLA_STRING },
[TCMU_ATTR_MINOR] = { .type = NLA_U32 },
[TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 },
@@ -426,7 +446,7 @@ static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-static const struct genl_ops tcmu_genl_ops[] = {
+static const struct genl_small_ops tcmu_genl_ops[] = {
{
.cmd = TCMU_CMD_SET_FEATURES,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -464,8 +484,9 @@ static struct genl_family tcmu_genl_family __ro_after_init = {
.mcgrps = tcmu_mcgrps,
.n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
.netnsok = true,
- .ops = tcmu_genl_ops,
- .n_ops = ARRAY_SIZE(tcmu_genl_ops),
+ .small_ops = tcmu_genl_ops,
+ .n_small_ops = ARRAY_SIZE(tcmu_genl_ops),
+ .resv_start_op = TCMU_CMD_SET_FEATURES + 1,
};
#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
@@ -482,62 +503,70 @@ static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
}
-static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
- struct tcmu_cmd *tcmu_cmd)
+static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
+ struct tcmu_cmd *tcmu_cmd,
+ int prev_dbi, int length, int *iov_cnt)
{
+ XA_STATE(xas, &udev->data_pages, 0);
struct page *page;
- int ret, dbi;
+ int i, cnt, dbi, dpi;
+ int page_cnt = DIV_ROUND_UP(length, PAGE_SIZE);
dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
if (dbi == udev->dbi_thresh)
- return false;
-
- page = radix_tree_lookup(&udev->data_blocks, dbi);
- if (!page) {
- if (atomic_add_return(1, &global_db_count) >
- tcmu_global_max_blocks)
- schedule_delayed_work(&tcmu_unmap_work, 0);
+ return -1;
- /* try to get new page from the mm */
- page = alloc_page(GFP_NOIO);
+ dpi = dbi * udev->data_pages_per_blk;
+ /* Count the number of already allocated pages */
+ xas_set(&xas, dpi);
+ rcu_read_lock();
+ for (cnt = 0; xas_next(&xas) && cnt < page_cnt;)
+ cnt++;
+ rcu_read_unlock();
+
+ for (i = cnt; i < page_cnt; i++) {
+ /* try to get new zeroed page from the mm */
+ page = alloc_page(GFP_NOIO | __GFP_ZERO);
if (!page)
- goto err_alloc;
+ break;
- ret = radix_tree_insert(&udev->data_blocks, dbi, page);
- if (ret)
- goto err_insert;
+ if (xa_store(&udev->data_pages, dpi + i, page, GFP_NOIO)) {
+ __free_page(page);
+ break;
+ }
}
+ if (atomic_add_return(i - cnt, &global_page_count) >
+ tcmu_global_max_pages)
+ schedule_delayed_work(&tcmu_unmap_work, 0);
- if (dbi > udev->dbi_max)
+ if (i && dbi > udev->dbi_max)
udev->dbi_max = dbi;
set_bit(dbi, udev->data_bitmap);
tcmu_cmd_set_dbi(tcmu_cmd, dbi);
- return true;
-err_insert:
- __free_page(page);
-err_alloc:
- atomic_dec(&global_db_count);
- return false;
+ if (dbi != prev_dbi + 1)
+ *iov_cnt += 1;
+
+ return i == page_cnt ? dbi : -1;
}
-static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
- struct tcmu_cmd *tcmu_cmd)
+static int tcmu_get_empty_blocks(struct tcmu_dev *udev,
+ struct tcmu_cmd *tcmu_cmd, int length)
{
- int i;
+ /* start value of dbi + 1 must not be a valid dbi */
+ int dbi = -2;
+ int blk_data_len, iov_cnt = 0;
+ uint32_t blk_size = udev->data_blk_size;
- for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) {
- if (!tcmu_get_empty_block(udev, tcmu_cmd))
- return false;
+ for (; length > 0; length -= blk_size) {
+ blk_data_len = min_t(uint32_t, length, blk_size);
+ dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len,
+ &iov_cnt);
+ if (dbi < 0)
+ return -1;
}
- return true;
-}
-
-static inline struct page *
-tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
-{
- return radix_tree_lookup(&udev->data_blocks, dbi);
+ return iov_cnt;
}
static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
@@ -546,25 +575,59 @@ static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
}
-static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
+static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd)
{
- struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
- size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
+ int i, len;
+ struct se_cmd *se_cmd = cmd->se_cmd;
+ uint32_t blk_size = cmd->tcmu_dev->data_blk_size;
+
+ cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, blk_size);
if (se_cmd->se_cmd_flags & SCF_BIDI) {
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
- data_length += round_up(se_cmd->t_bidi_data_sg->length,
- DATA_BLOCK_SIZE);
+ for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++)
+ len += se_cmd->t_bidi_data_sg[i].length;
+ cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, blk_size);
+ cmd->dbi_cnt += cmd->dbi_bidi_cnt;
+ cmd->data_len_bidi = len;
}
+}
- return data_length;
+static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+ struct iovec **iov, int prev_dbi, int len)
+{
+ /* Get the next dbi */
+ int dbi = tcmu_cmd_get_dbi(cmd);
+
+ /* Do not add more than udev->data_blk_size to iov */
+ len = min_t(int, len, udev->data_blk_size);
+
+ /*
+ * The following code will gather and map the blocks to the same iovec
+ * when the blocks are all next to each other.
+ */
+ if (dbi != prev_dbi + 1) {
+ /* dbi is not next to previous dbi, so start new iov */
+ if (prev_dbi >= 0)
+ (*iov)++;
+ /* write offset relative to mb_addr */
+ (*iov)->iov_base = (void __user *)
+ (udev->data_off + dbi * udev->data_blk_size);
+ }
+ (*iov)->iov_len += len;
+
+ return dbi;
}
-static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
+static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+ struct iovec **iov, int data_length)
{
- size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
+ /* start value of dbi + 1 must not be a valid dbi */
+ int dbi = -2;
- return data_length / DATA_BLOCK_SIZE;
+ /* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */
+ for (; data_length > 0; data_length -= udev->data_blk_size)
+ dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length);
}
static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
@@ -581,8 +644,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
- tcmu_cmd_reset_dbi_cur(tcmu_cmd);
- tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
+ tcmu_cmd_set_block_cnts(tcmu_cmd);
tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
GFP_NOIO);
if (!tcmu_cmd->dbi) {
@@ -601,7 +663,7 @@ static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
size = round_up(size+offset, PAGE_SIZE);
while (size) {
- flush_dcache_page(virt_to_page(start));
+ flush_dcache_page(vmalloc_to_page(start));
start += PAGE_SIZE;
size -= PAGE_SIZE;
}
@@ -632,172 +694,117 @@ static inline size_t head_to_end(size_t head, size_t size)
return size - head;
}
-static inline void new_iov(struct iovec **iov, int *iov_cnt)
-{
- struct iovec *iovec;
-
- if (*iov_cnt != 0)
- (*iov)++;
- (*iov_cnt)++;
-
- iovec = *iov;
- memset(iovec, 0, sizeof(struct iovec));
-}
-
#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
-/* offset is relative to mb_addr */
-static inline size_t get_block_offset_user(struct tcmu_dev *dev,
- int dbi, int remaining)
-{
- return dev->data_off + dbi * DATA_BLOCK_SIZE +
- DATA_BLOCK_SIZE - remaining;
-}
-
-static inline size_t iov_tail(struct iovec *iov)
-{
- return (size_t)iov->iov_base + iov->iov_len;
-}
+#define TCMU_SG_TO_DATA_AREA 1
+#define TCMU_DATA_AREA_TO_SG 2
-static void scatter_data_area(struct tcmu_dev *udev,
- struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg,
- unsigned int data_nents, struct iovec **iov,
- int *iov_cnt, bool copy_data)
+static inline void tcmu_copy_data(struct tcmu_dev *udev,
+ struct tcmu_cmd *tcmu_cmd, uint32_t direction,
+ struct scatterlist *sg, unsigned int sg_nents,
+ struct iovec **iov, size_t data_len)
{
- int i, dbi;
- int block_remaining = 0;
- void *from, *to = NULL;
- size_t copy_bytes, to_offset, offset;
- struct scatterlist *sg;
+ /* start value of dbi + 1 must not be a valid dbi */
+ int dbi = -2;
+ size_t page_remaining, cp_len;
+ int page_cnt, page_inx, dpi;
+ struct sg_mapping_iter sg_iter;
+ unsigned int sg_flags;
struct page *page;
+ void *data_page_start, *data_addr;
- for_each_sg(data_sg, sg, data_nents, i) {
- int sg_remaining = sg->length;
- from = kmap_atomic(sg_page(sg)) + sg->offset;
- while (sg_remaining > 0) {
- if (block_remaining == 0) {
- if (to)
- kunmap_atomic(to);
-
- block_remaining = DATA_BLOCK_SIZE;
- dbi = tcmu_cmd_get_dbi(tcmu_cmd);
- page = tcmu_get_block_page(udev, dbi);
- to = kmap_atomic(page);
- }
-
- /*
- * Covert to virtual offset of the ring data area.
- */
- to_offset = get_block_offset_user(udev, dbi,
- block_remaining);
-
- /*
- * The following code will gather and map the blocks
- * to the same iovec when the blocks are all next to
- * each other.
- */
- copy_bytes = min_t(size_t, sg_remaining,
- block_remaining);
- if (*iov_cnt != 0 &&
- to_offset == iov_tail(*iov)) {
- /*
- * Will append to the current iovec, because
- * the current block page is next to the
- * previous one.
- */
- (*iov)->iov_len += copy_bytes;
- } else {
- /*
- * Will allocate a new iovec because we are
- * first time here or the current block page
- * is not next to the previous one.
- */
- new_iov(iov, iov_cnt);
- (*iov)->iov_base = (void __user *)to_offset;
- (*iov)->iov_len = copy_bytes;
- }
+ if (direction == TCMU_SG_TO_DATA_AREA)
+ sg_flags = SG_MITER_ATOMIC | SG_MITER_FROM_SG;
+ else
+ sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
+ sg_miter_start(&sg_iter, sg, sg_nents, sg_flags);
- if (copy_data) {
- offset = DATA_BLOCK_SIZE - block_remaining;
- memcpy(to + offset,
- from + sg->length - sg_remaining,
- copy_bytes);
- tcmu_flush_dcache_range(to, copy_bytes);
+ while (data_len) {
+ if (direction == TCMU_SG_TO_DATA_AREA)
+ dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi,
+ data_len);
+ else
+ dbi = tcmu_cmd_get_dbi(tcmu_cmd);
+
+ page_cnt = DIV_ROUND_UP(data_len, PAGE_SIZE);
+ if (page_cnt > udev->data_pages_per_blk)
+ page_cnt = udev->data_pages_per_blk;
+
+ dpi = dbi * udev->data_pages_per_blk;
+ for (page_inx = 0; page_inx < page_cnt && data_len;
+ page_inx++, dpi++) {
+ page = xa_load(&udev->data_pages, dpi);
+
+ if (direction == TCMU_DATA_AREA_TO_SG)
+ flush_dcache_page(page);
+ data_page_start = kmap_atomic(page);
+ page_remaining = PAGE_SIZE;
+
+ while (page_remaining && data_len) {
+ if (!sg_miter_next(&sg_iter)) {
+ /* set length to 0 to abort outer loop */
+ data_len = 0;
+ pr_debug("%s: aborting data copy due to exhausted sg_list\n",
+ __func__);
+ break;
+ }
+ cp_len = min3(sg_iter.length, page_remaining,
+ data_len);
+
+ data_addr = data_page_start +
+ PAGE_SIZE - page_remaining;
+ if (direction == TCMU_SG_TO_DATA_AREA)
+ memcpy(data_addr, sg_iter.addr, cp_len);
+ else
+ memcpy(sg_iter.addr, data_addr, cp_len);
+
+ data_len -= cp_len;
+ page_remaining -= cp_len;
+ sg_iter.consumed = cp_len;
}
+ sg_miter_stop(&sg_iter);
- sg_remaining -= copy_bytes;
- block_remaining -= copy_bytes;
+ kunmap_atomic(data_page_start);
+ if (direction == TCMU_SG_TO_DATA_AREA)
+ flush_dcache_page(page);
}
- kunmap_atomic(from - sg->offset);
}
+}
+
+static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
+ struct iovec **iov)
+{
+ struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
- if (to)
- kunmap_atomic(to);
+ tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg,
+ se_cmd->t_data_nents, iov, se_cmd->data_length);
}
-static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
bool bidi, uint32_t read_len)
{
- struct se_cmd *se_cmd = cmd->se_cmd;
- int i, dbi;
- int block_remaining = 0;
- void *from = NULL, *to;
- size_t copy_bytes, offset;
- struct scatterlist *sg, *data_sg;
- struct page *page;
+ struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+ struct scatterlist *data_sg;
unsigned int data_nents;
- uint32_t count = 0;
if (!bidi) {
data_sg = se_cmd->t_data_sg;
data_nents = se_cmd->t_data_nents;
} else {
-
/*
* For bidi case, the first count blocks are for Data-Out
* buffer blocks, and before gathering the Data-In buffer
- * the Data-Out buffer blocks should be discarded.
+ * the Data-Out buffer blocks should be skipped.
*/
- count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
+ tcmu_cmd_set_dbi_cur(tcmu_cmd,
+ tcmu_cmd->dbi_cnt - tcmu_cmd->dbi_bidi_cnt);
data_sg = se_cmd->t_bidi_data_sg;
data_nents = se_cmd->t_bidi_data_nents;
}
- tcmu_cmd_set_dbi_cur(cmd, count);
-
- for_each_sg(data_sg, sg, data_nents, i) {
- int sg_remaining = sg->length;
- to = kmap_atomic(sg_page(sg)) + sg->offset;
- while (sg_remaining > 0 && read_len > 0) {
- if (block_remaining == 0) {
- if (from)
- kunmap_atomic(from);
-
- block_remaining = DATA_BLOCK_SIZE;
- dbi = tcmu_cmd_get_dbi(cmd);
- page = tcmu_get_block_page(udev, dbi);
- from = kmap_atomic(page);
- }
- copy_bytes = min_t(size_t, sg_remaining,
- block_remaining);
- if (read_len < copy_bytes)
- copy_bytes = read_len;
- offset = DATA_BLOCK_SIZE - block_remaining;
- tcmu_flush_dcache_range(from, copy_bytes);
- memcpy(to + sg->length - sg_remaining, from + offset,
- copy_bytes);
-
- sg_remaining -= copy_bytes;
- block_remaining -= copy_bytes;
- read_len -= copy_bytes;
- }
- kunmap_atomic(to - sg->offset);
- if (read_len == 0)
- break;
- }
- if (from)
- kunmap_atomic(from);
+ tcmu_copy_data(udev, tcmu_cmd, TCMU_DATA_AREA_TO_SG, data_sg,
+ data_nents, NULL, read_len);
}
static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
@@ -806,17 +813,13 @@ static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
}
/*
- * We can't queue a command until we have space available on the cmd ring *and*
- * space available on the data area.
+ * We can't queue a command until we have space available on the cmd ring.
*
* Called with ring lock held.
*/
-static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
- size_t cmd_size, size_t data_needed)
+static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size)
{
struct tcmu_mailbox *mb = udev->mb_addr;
- uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1)
- / DATA_BLOCK_SIZE;
size_t space, cmd_needed;
u32 cmd_head;
@@ -839,26 +842,53 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
udev->cmdr_last_cleaned, udev->cmdr_size);
return false;
}
+ return true;
+}
+
+/*
+ * We have to allocate data buffers before we can queue a command.
+ * Returns -1 on error (not enough space) or number of needed iovs on success
+ *
+ * Called with ring lock held.
+ */
+static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+ int *iov_bidi_cnt)
+{
+ int space, iov_cnt = 0, ret = 0;
+
+ if (!cmd->dbi_cnt)
+ goto wr_iov_cnts;
/* try to check and get the data blocks as needed */
space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
- if ((space * DATA_BLOCK_SIZE) < data_needed) {
+ if (space < cmd->dbi_cnt) {
unsigned long blocks_left =
(udev->max_blocks - udev->dbi_thresh) + space;
- if (blocks_left < blocks_needed) {
- pr_debug("no data space: only %lu available, but ask for %zu\n",
- blocks_left * DATA_BLOCK_SIZE,
- data_needed);
- return false;
+ if (blocks_left < cmd->dbi_cnt) {
+ pr_debug("no data space: only %lu available, but ask for %u\n",
+ blocks_left * udev->data_blk_size,
+ cmd->dbi_cnt * udev->data_blk_size);
+ return -1;
}
- udev->dbi_thresh += blocks_needed;
+ udev->dbi_thresh += cmd->dbi_cnt;
if (udev->dbi_thresh > udev->max_blocks)
udev->dbi_thresh = udev->max_blocks;
}
- return tcmu_get_empty_blocks(udev, cmd);
+ iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length);
+ if (iov_cnt < 0)
+ return -1;
+
+ if (cmd->dbi_bidi_cnt) {
+ ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi);
+ if (ret < 0)
+ return -1;
+ }
+wr_iov_cnts:
+ *iov_bidi_cnt = ret;
+ return iov_cnt + ret;
}
static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
@@ -922,6 +952,53 @@ static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
return 0;
}
+static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size)
+{
+ struct tcmu_cmd_entry_hdr *hdr;
+ struct tcmu_mailbox *mb = udev->mb_addr;
+ uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
+
+ /* Insert a PAD if end-of-ring space is too small */
+ if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) {
+ size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
+
+ hdr = udev->cmdr + cmd_head;
+ tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD);
+ tcmu_hdr_set_len(&hdr->len_op, pad_size);
+ hdr->cmd_id = 0; /* not used for PAD */
+ hdr->kflags = 0;
+ hdr->uflags = 0;
+ tcmu_flush_dcache_range(hdr, sizeof(*hdr));
+
+ UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
+ tcmu_flush_dcache_range(mb, sizeof(*mb));
+
+ cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
+ WARN_ON(cmd_head != 0);
+ }
+
+ return cmd_head;
+}
+
+static void tcmu_unplug_device(struct se_dev_plug *se_plug)
+{
+ struct se_device *se_dev = se_plug->se_dev;
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+
+ clear_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags);
+ uio_event_notify(&udev->uio_info);
+}
+
+static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev)
+{
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+
+ if (!test_and_set_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags))
+ return &udev->se_plug;
+
+ return NULL;
+}
+
/**
* queue_cmd_ring - queue cmd to ring or internally
* @tcmu_cmd: cmd to queue
@@ -937,14 +1014,15 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
size_t base_command_size, command_size;
- struct tcmu_mailbox *mb;
+ struct tcmu_mailbox *mb = udev->mb_addr;
struct tcmu_cmd_entry *entry;
struct iovec *iov;
- int iov_cnt, cmd_id;
- uint32_t cmd_head;
+ int iov_cnt, iov_bidi_cnt;
+ uint32_t cmd_id, cmd_head;
uint64_t cdb_off;
- bool copy_to_data_area;
- size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
+ uint32_t blk_size = udev->data_blk_size;
+ /* size of data buffer needed */
+ size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size;
*scsi_err = TCM_NO_SENSE;
@@ -958,114 +1036,84 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
return -1;
}
+ if (!list_empty(&udev->qfull_queue))
+ goto queue;
+
+ if (data_length > (size_t)udev->max_blocks * blk_size) {
+ pr_warn("TCMU: Request of size %zu is too big for %zu data area\n",
+ data_length, (size_t)udev->max_blocks * blk_size);
+ *scsi_err = TCM_INVALID_CDB_FIELD;
+ return -1;
+ }
+
+ iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt);
+ if (iov_cnt < 0)
+ goto free_and_queue;
+
/*
* Must be a certain minimum size for response sense info, but
* also may be larger if the iov array is large.
- *
- * We prepare as many iovs as possbile for potential uses here,
- * because it's expensive to tell how many regions are freed in
- * the bitmap & global data pool, as the size calculated here
- * will only be used to do the checks.
- *
- * The size will be recalculated later as actually needed to save
- * cmd area memories.
*/
- base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
+ base_command_size = tcmu_cmd_get_base_cmd_size(iov_cnt);
command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
- if (!list_empty(&udev->qfull_queue))
- goto queue;
-
- mb = udev->mb_addr;
- cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
- if ((command_size > (udev->cmdr_size / 2)) ||
- data_length > udev->data_size) {
- pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
- "cmd ring/data area\n", command_size, data_length,
- udev->cmdr_size, udev->data_size);
+ if (command_size > (udev->cmdr_size / 2)) {
+ pr_warn("TCMU: Request of size %zu is too big for %u cmd ring\n",
+ command_size, udev->cmdr_size);
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
*scsi_err = TCM_INVALID_CDB_FIELD;
return -1;
}
- if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
+ if (!is_ring_space_avail(udev, command_size))
/*
* Don't leave commands partially setup because the unmap
* thread might need the blocks to make forward progress.
*/
- tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
- tcmu_cmd_reset_dbi_cur(tcmu_cmd);
- goto queue;
- }
+ goto free_and_queue;
- /* Insert a PAD if end-of-ring space is too small */
- if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
- size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
+ if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff),
+ GFP_NOWAIT) < 0) {
+ pr_err("tcmu: Could not allocate cmd id.\n");
- entry = (void *) mb + CMDR_OFF + cmd_head;
- tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
- tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
- entry->hdr.cmd_id = 0; /* not used for PAD */
- entry->hdr.kflags = 0;
- entry->hdr.uflags = 0;
- tcmu_flush_dcache_range(entry, sizeof(entry->hdr));
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
+ *scsi_err = TCM_OUT_OF_RESOURCES;
+ return -1;
+ }
+ tcmu_cmd->cmd_id = cmd_id;
- UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
- tcmu_flush_dcache_range(mb, sizeof(*mb));
+ pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
+ tcmu_cmd, udev->name);
- cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
- WARN_ON(cmd_head != 0);
- }
+ cmd_head = ring_insert_padding(udev, command_size);
- entry = (void *) mb + CMDR_OFF + cmd_head;
+ entry = udev->cmdr + cmd_head;
memset(entry, 0, command_size);
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
- /* Handle allocating space from the data area */
+ /* prepare iov list and copy data to data area if necessary */
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
iov = &entry->req.iov[0];
- iov_cnt = 0;
- copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
- || se_cmd->se_cmd_flags & SCF_BIDI);
- scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
- se_cmd->t_data_nents, &iov, &iov_cnt,
- copy_to_data_area);
- entry->req.iov_cnt = iov_cnt;
+
+ if (se_cmd->data_direction == DMA_TO_DEVICE ||
+ se_cmd->se_cmd_flags & SCF_BIDI)
+ scatter_data_area(udev, tcmu_cmd, &iov);
+ else
+ tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length);
+
+ entry->req.iov_cnt = iov_cnt - iov_bidi_cnt;
/* Handle BIDI commands */
- iov_cnt = 0;
if (se_cmd->se_cmd_flags & SCF_BIDI) {
iov++;
- scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg,
- se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
- false);
+ tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi);
+ entry->req.iov_bidi_cnt = iov_bidi_cnt;
}
- entry->req.iov_bidi_cnt = iov_cnt;
-
- cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
- if (cmd_id < 0) {
- pr_err("tcmu: Could not allocate cmd id.\n");
-
- tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
- *scsi_err = TCM_OUT_OF_RESOURCES;
- return -1;
- }
- tcmu_cmd->cmd_id = cmd_id;
-
- pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
- tcmu_cmd, udev->name);
tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
- /*
- * Recalaulate the command's base size and size according
- * to the actual needs
- */
- base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt +
- entry->req.iov_bidi_cnt);
- command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
-
tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
/* All offsets relative to mb_addr, not start of entry! */
@@ -1078,13 +1126,16 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
tcmu_flush_dcache_range(mb, sizeof(*mb));
list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
- set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
- /* TODO: only if FLUSH and FUA? */
- uio_event_notify(&udev->uio_info);
+ if (!test_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags))
+ uio_event_notify(&udev->uio_info);
return 0;
+free_and_queue:
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
+ tcmu_cmd_reset_dbi_cur(tcmu_cmd);
+
queue:
if (add_to_qfull_queue(tcmu_cmd)) {
*scsi_err = TCM_OUT_OF_RESOURCES;
@@ -1094,32 +1145,185 @@ queue:
return 1;
}
+/**
+ * queue_tmr_ring - queue tmr info to ring or internally
+ * @udev: related tcmu_dev
+ * @tmr: tcmu_tmr containing tmr info to queue
+ *
+ * Returns:
+ * 0 success
+ * 1 internally queued to wait for ring memory to free.
+ */
+static int
+queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr)
+{
+ struct tcmu_tmr_entry *entry;
+ int cmd_size;
+ int id_list_sz;
+ struct tcmu_mailbox *mb = udev->mb_addr;
+ uint32_t cmd_head;
+
+ if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
+ goto out_free;
+
+ id_list_sz = sizeof(tmr->tmr_cmd_ids[0]) * tmr->tmr_cmd_cnt;
+ cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE);
+
+ if (!list_empty(&udev->tmr_queue) ||
+ !is_ring_space_avail(udev, cmd_size)) {
+ list_add_tail(&tmr->queue_entry, &udev->tmr_queue);
+ pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n",
+ tmr, udev->name);
+ return 1;
+ }
+
+ cmd_head = ring_insert_padding(udev, cmd_size);
+
+ entry = udev->cmdr + cmd_head;
+ memset(entry, 0, cmd_size);
+ tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR);
+ tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size);
+ entry->tmr_type = tmr->tmr_type;
+ entry->cmd_cnt = tmr->tmr_cmd_cnt;
+ memcpy(&entry->cmd_ids[0], &tmr->tmr_cmd_ids[0], id_list_sz);
+ tcmu_flush_dcache_range(entry, cmd_size);
+
+ UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size);
+ tcmu_flush_dcache_range(mb, sizeof(*mb));
+
+ uio_event_notify(&udev->uio_info);
+
+out_free:
+ kfree(tmr);
+
+ return 0;
+}
+
static sense_reason_t
tcmu_queue_cmd(struct se_cmd *se_cmd)
{
struct se_device *se_dev = se_cmd->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
- sense_reason_t scsi_ret;
- int ret;
+ sense_reason_t scsi_ret = TCM_CHECK_CONDITION_ABORT_CMD;
+ int ret = -1;
tcmu_cmd = tcmu_alloc_cmd(se_cmd);
if (!tcmu_cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
mutex_lock(&udev->cmdr_lock);
- ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
- mutex_unlock(&udev->cmdr_lock);
+ if (!(se_cmd->transport_state & CMD_T_ABORTED))
+ ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
if (ret < 0)
tcmu_free_cmd(tcmu_cmd);
+ else
+ se_cmd->priv = tcmu_cmd;
+ mutex_unlock(&udev->cmdr_lock);
return scsi_ret;
}
-static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
+static void tcmu_set_next_deadline(struct list_head *queue,
+ struct timer_list *timer)
+{
+ struct tcmu_cmd *cmd;
+
+ if (!list_empty(queue)) {
+ cmd = list_first_entry(queue, struct tcmu_cmd, queue_entry);
+ mod_timer(timer, cmd->deadline);
+ } else
+ timer_delete(timer);
+}
+
+static int
+tcmu_tmr_type(enum tcm_tmreq_table tmf)
+{
+ switch (tmf) {
+ case TMR_ABORT_TASK: return TCMU_TMR_ABORT_TASK;
+ case TMR_ABORT_TASK_SET: return TCMU_TMR_ABORT_TASK_SET;
+ case TMR_CLEAR_ACA: return TCMU_TMR_CLEAR_ACA;
+ case TMR_CLEAR_TASK_SET: return TCMU_TMR_CLEAR_TASK_SET;
+ case TMR_LUN_RESET: return TCMU_TMR_LUN_RESET;
+ case TMR_TARGET_WARM_RESET: return TCMU_TMR_TARGET_WARM_RESET;
+ case TMR_TARGET_COLD_RESET: return TCMU_TMR_TARGET_COLD_RESET;
+ case TMR_LUN_RESET_PRO: return TCMU_TMR_LUN_RESET_PRO;
+ default: return TCMU_TMR_UNKNOWN;
+ }
+}
+
+static void
+tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
+ struct list_head *cmd_list)
+{
+ int i = 0, cmd_cnt = 0;
+ bool unqueued = false;
+ struct tcmu_cmd *cmd;
+ struct se_cmd *se_cmd;
+ struct tcmu_tmr *tmr;
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+
+ mutex_lock(&udev->cmdr_lock);
+
+ /* First we check for aborted commands in qfull_queue */
+ list_for_each_entry(se_cmd, cmd_list, state_list) {
+ i++;
+ if (!se_cmd->priv)
+ continue;
+ cmd = se_cmd->priv;
+ /* Commands on qfull queue have no id yet */
+ if (cmd->cmd_id) {
+ cmd_cnt++;
+ continue;
+ }
+ pr_debug("Removing aborted command %p from queue on dev %s.\n",
+ cmd, udev->name);
+
+ list_del_init(&cmd->queue_entry);
+ tcmu_free_cmd(cmd);
+ se_cmd->priv = NULL;
+ target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED);
+ unqueued = true;
+ }
+ if (unqueued)
+ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
+
+ if (!test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags))
+ goto unlock;
+
+ pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n",
+ tcmu_tmr_type(tmf), udev->name, i, cmd_cnt);
+
+ tmr = kmalloc(struct_size(tmr, tmr_cmd_ids, cmd_cnt), GFP_NOIO);
+ if (!tmr)
+ goto unlock;
+
+ tmr->tmr_type = tcmu_tmr_type(tmf);
+ tmr->tmr_cmd_cnt = cmd_cnt;
+
+ if (cmd_cnt != 0) {
+ cmd_cnt = 0;
+ list_for_each_entry(se_cmd, cmd_list, state_list) {
+ if (!se_cmd->priv)
+ continue;
+ cmd = se_cmd->priv;
+ if (cmd->cmd_id)
+ tmr->tmr_cmd_ids[cmd_cnt++] = cmd->cmd_id;
+ }
+ }
+
+ queue_tmr_ring(udev, tmr);
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+}
+
+static bool tcmu_handle_completion(struct tcmu_cmd *cmd,
+ struct tcmu_cmd_entry *entry, bool keep_buf)
{
struct se_cmd *se_cmd = cmd->se_cmd;
struct tcmu_dev *udev = cmd->tcmu_dev;
bool read_len_valid = false;
+ bool ret = true;
uint32_t read_len;
/*
@@ -1130,6 +1334,13 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
WARN_ON_ONCE(se_cmd);
goto out;
}
+ if (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
+ pr_err("cmd_id %u already completed with KEEP_BUF, ring is broken\n",
+ entry->hdr.cmd_id);
+ set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
+ ret = false;
+ goto out;
+ }
list_del_init(&cmd->queue_entry);
@@ -1170,6 +1381,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
}
done:
+ se_cmd->priv = NULL;
if (read_len_valid) {
pr_debug("read_len = %d\n", read_len);
target_complete_cmd_with_length(cmd->se_cmd,
@@ -1178,39 +1390,65 @@ done:
target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
out:
- cmd->se_cmd = NULL;
- tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
- tcmu_free_cmd(cmd);
+ if (!keep_buf) {
+ tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
+ tcmu_free_cmd(cmd);
+ } else {
+ /*
+ * Keep this command after completion, since userspace still
+ * needs the data buffer. Mark it with TCMU_CMD_BIT_KEEP_BUF
+ * and reset potential TCMU_CMD_BIT_EXPIRED, so we don't accept
+ * a second completion later.
+ * Userspace can free the buffer later by writing the cmd_id
+ * to new action attribute free_kept_buf.
+ */
+ clear_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
+ set_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags);
+ }
+ return ret;
}
-static void tcmu_set_next_deadline(struct list_head *queue,
- struct timer_list *timer)
+static int tcmu_run_tmr_queue(struct tcmu_dev *udev)
{
- struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
- unsigned long deadline = 0;
+ struct tcmu_tmr *tmr, *tmp;
+ LIST_HEAD(tmrs);
- list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
- if (!time_after(jiffies, tcmu_cmd->deadline)) {
- deadline = tcmu_cmd->deadline;
- break;
+ if (list_empty(&udev->tmr_queue))
+ return 1;
+
+ pr_debug("running %s's tmr queue\n", udev->name);
+
+ list_splice_init(&udev->tmr_queue, &tmrs);
+
+ list_for_each_entry_safe(tmr, tmp, &tmrs, queue_entry) {
+ list_del_init(&tmr->queue_entry);
+
+ pr_debug("removing tmr %p on dev %s from queue\n",
+ tmr, udev->name);
+
+ if (queue_tmr_ring(udev, tmr)) {
+ pr_debug("ran out of space during tmr queue run\n");
+ /*
+ * tmr was requeued, so just put all tmrs back in
+ * the queue
+ */
+ list_splice_tail(&tmrs, &udev->tmr_queue);
+ return 0;
}
}
- if (deadline)
- mod_timer(timer, deadline);
- else
- del_timer(timer);
+ return 1;
}
-static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
+static bool tcmu_handle_completions(struct tcmu_dev *udev)
{
struct tcmu_mailbox *mb;
struct tcmu_cmd *cmd;
- int handled = 0;
+ bool free_space = false;
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
pr_err("ring broken, not handling completions\n");
- return 0;
+ return false;
}
mb = udev->mb_addr;
@@ -1218,11 +1456,22 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
- struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
+ struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned;
+ bool keep_buf;
- tcmu_flush_dcache_range(entry, sizeof(*entry));
+ /*
+ * Flush max. up to end of cmd ring since current entry might
+ * be a padding that is shorter than sizeof(*entry)
+ */
+ size_t ring_left = head_to_end(udev->cmdr_last_cleaned,
+ udev->cmdr_size);
+ tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ?
+ ring_left : sizeof(*entry));
+
+ free_space = true;
- if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
+ if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD ||
+ tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_TMR) {
UPDATE_HEAD(udev->cmdr_last_cleaned,
tcmu_hdr_get_len(entry->hdr.len_op),
udev->cmdr_size);
@@ -1230,53 +1479,53 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
}
WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
- cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
+ keep_buf = !!(entry->hdr.uflags & TCMU_UFLAG_KEEP_BUF);
+ if (keep_buf)
+ cmd = xa_load(&udev->commands, entry->hdr.cmd_id);
+ else
+ cmd = xa_erase(&udev->commands, entry->hdr.cmd_id);
if (!cmd) {
pr_err("cmd_id %u not found, ring is broken\n",
entry->hdr.cmd_id);
set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
- break;
+ return false;
}
- tcmu_handle_completion(cmd, entry);
+ if (!tcmu_handle_completion(cmd, entry, keep_buf))
+ break;
UPDATE_HEAD(udev->cmdr_last_cleaned,
tcmu_hdr_get_len(entry->hdr.len_op),
udev->cmdr_size);
-
- handled++;
}
+ if (free_space)
+ free_space = tcmu_run_tmr_queue(udev);
- if (mb->cmd_tail == mb->cmd_head) {
- /* no more pending commands */
- del_timer(&udev->cmd_timer);
-
- if (list_empty(&udev->qfull_queue)) {
- /*
- * no more pending or waiting commands so try to
- * reclaim blocks if needed.
- */
- if (atomic_read(&global_db_count) >
- tcmu_global_max_blocks)
- schedule_delayed_work(&tcmu_unmap_work, 0);
- }
- } else if (udev->cmd_time_out) {
- tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
+ if (atomic_read(&global_page_count) > tcmu_global_max_pages &&
+ xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) {
+ /*
+ * Allocated blocks exceeded global block limit, currently no
+ * more pending or waiting commands so try to reclaim blocks.
+ */
+ schedule_delayed_work(&tcmu_unmap_work, 0);
}
+ if (udev->cmd_time_out)
+ tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
- return handled;
+ return free_space;
}
static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
{
struct se_cmd *se_cmd;
- if (!time_after(jiffies, cmd->deadline))
+ if (!time_after_eq(jiffies, cmd->deadline))
return;
set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
list_del_init(&cmd->queue_entry);
se_cmd = cmd->se_cmd;
+ se_cmd->priv = NULL;
cmd->se_cmd = NULL;
pr_debug("Timing out inflight cmd %u on dev %s.\n",
@@ -1289,7 +1538,7 @@ static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
{
struct se_cmd *se_cmd;
- if (!time_after(jiffies, cmd->deadline))
+ if (!time_after_eq(jiffies, cmd->deadline))
return;
pr_debug("Timing out queued cmd %p on dev %s.\n",
@@ -1299,6 +1548,7 @@ static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
se_cmd = cmd->se_cmd;
tcmu_free_cmd(cmd);
+ se_cmd->priv = NULL;
target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
}
@@ -1314,7 +1564,7 @@ static void tcmu_device_timedout(struct tcmu_dev *udev)
static void tcmu_cmd_timedout(struct timer_list *t)
{
- struct tcmu_dev *udev = from_timer(udev, t, cmd_timer);
+ struct tcmu_dev *udev = timer_container_of(udev, t, cmd_timer);
pr_debug("%s cmd timeout has expired\n", udev->name);
tcmu_device_timedout(udev);
@@ -1322,7 +1572,7 @@ static void tcmu_cmd_timedout(struct timer_list *t)
static void tcmu_qfull_timedout(struct timer_list *t)
{
- struct tcmu_dev *udev = from_timer(udev, t, qfull_timer);
+ struct tcmu_dev *udev = timer_container_of(udev, t, qfull_timer);
pr_debug("%s qfull timeout has expired\n", udev->name);
tcmu_device_timedout(udev);
@@ -1367,23 +1617,136 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
udev->cmd_time_out = TCMU_TIME_OUT;
udev->qfull_time_out = -1;
- udev->max_blocks = DATA_BLOCK_BITS_DEF;
+ udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF;
+ udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk;
+ udev->cmdr_size = CMDR_SIZE_DEF;
+ udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF);
+
mutex_init(&udev->cmdr_lock);
INIT_LIST_HEAD(&udev->node);
INIT_LIST_HEAD(&udev->timedout_entry);
INIT_LIST_HEAD(&udev->qfull_queue);
+ INIT_LIST_HEAD(&udev->tmr_queue);
INIT_LIST_HEAD(&udev->inflight_queue);
- idr_init(&udev->commands);
+ xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1);
timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
- INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
+ xa_init(&udev->data_pages);
return &udev->se_dev;
}
+static void tcmu_dev_call_rcu(struct rcu_head *p)
+{
+ struct se_device *dev = container_of(p, struct se_device, rcu_head);
+ struct tcmu_dev *udev = TCMU_DEV(dev);
+
+ kfree(udev->uio_info.name);
+ kfree(udev->name);
+ kfree(udev);
+}
+
+static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
+{
+ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ||
+ test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
+ kmem_cache_free(tcmu_cmd_cache, cmd);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static u32 tcmu_blocks_release(struct tcmu_dev *udev, unsigned long first,
+ unsigned long last)
+{
+ struct page *page;
+ unsigned long dpi;
+ u32 pages_freed = 0;
+
+ first = first * udev->data_pages_per_blk;
+ last = (last + 1) * udev->data_pages_per_blk - 1;
+ xa_for_each_range(&udev->data_pages, dpi, page, first, last) {
+ xa_erase(&udev->data_pages, dpi);
+ /*
+ * While reaching here there may be page faults occurring on
+ * the to-be-released pages. A race condition may occur if
+ * unmap_mapping_range() is called before page faults on these
+ * pages have completed; a valid but stale map is created.
+ *
+ * If another command subsequently runs and needs to extend
+ * dbi_thresh, it may reuse the slot corresponding to the
+ * previous page in data_bitmap. Though we will allocate a new
+ * page for the slot in data_area, no page fault will happen
+ * because we have a valid map. Therefore the command's data
+ * will be lost.
+ *
+ * We lock and unlock pages that are to be released to ensure
+ * all page faults have completed. This way
+ * unmap_mapping_range() can ensure stale maps are cleanly
+ * removed.
+ */
+ lock_page(page);
+ unlock_page(page);
+ __free_page(page);
+ pages_freed++;
+ }
+
+ atomic_sub(pages_freed, &global_page_count);
+
+ return pages_freed;
+}
+
+static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev)
+{
+ struct tcmu_tmr *tmr, *tmp;
+
+ list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) {
+ list_del_init(&tmr->queue_entry);
+ kfree(tmr);
+ }
+}
+
+static void tcmu_dev_kref_release(struct kref *kref)
+{
+ struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
+ struct se_device *dev = &udev->se_dev;
+ struct tcmu_cmd *cmd;
+ bool all_expired = true;
+ unsigned long i;
+
+ vfree(udev->mb_addr);
+ udev->mb_addr = NULL;
+
+ spin_lock_bh(&timed_out_udevs_lock);
+ if (!list_empty(&udev->timedout_entry))
+ list_del(&udev->timedout_entry);
+ spin_unlock_bh(&timed_out_udevs_lock);
+
+ /* Upper layer should drain all requests before calling this */
+ mutex_lock(&udev->cmdr_lock);
+ xa_for_each(&udev->commands, i, cmd) {
+ if (tcmu_check_and_free_pending_cmd(cmd) != 0)
+ all_expired = false;
+ }
+ /* There can be left over TMR cmds. Remove them. */
+ tcmu_remove_all_queued_tmr(udev);
+ if (!list_empty(&udev->qfull_queue))
+ all_expired = false;
+ xa_destroy(&udev->commands);
+ WARN_ON(!all_expired);
+
+ tcmu_blocks_release(udev, 0, udev->dbi_max);
+ bitmap_free(udev->data_bitmap);
+ mutex_unlock(&udev->cmdr_lock);
+
+ pr_debug("dev_kref_release\n");
+
+ call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
+}
+
static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
{
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
@@ -1412,6 +1775,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
* removed then LIO core will do the right thing and
* fail the retry.
*/
+ tcmu_cmd->se_cmd->priv = NULL;
target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
tcmu_free_cmd(tcmu_cmd);
continue;
@@ -1425,6 +1789,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
* Ignore scsi_ret for now. target_complete_cmd
* drops it.
*/
+ tcmu_cmd->se_cmd->priv = NULL;
target_complete_cmd(tcmu_cmd->se_cmd,
SAM_STAT_CHECK_CONDITION);
tcmu_free_cmd(tcmu_cmd);
@@ -1447,8 +1812,8 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
mutex_lock(&udev->cmdr_lock);
- tcmu_handle_completions(udev);
- run_qfull_queue(udev, false);
+ if (tcmu_handle_completions(udev))
+ run_qfull_queue(udev, false);
mutex_unlock(&udev->cmdr_lock);
return 0;
@@ -1471,13 +1836,15 @@ static int tcmu_find_mem_index(struct vm_area_struct *vma)
return -1;
}
-static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
+static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi)
{
struct page *page;
mutex_lock(&udev->cmdr_lock);
- page = tcmu_get_block_page(udev, dbi);
+ page = xa_load(&udev->data_pages, dpi);
if (likely(page)) {
+ get_page(page);
+ lock_page(page);
mutex_unlock(&udev->cmdr_lock);
return page;
}
@@ -1486,12 +1853,30 @@ static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
* Userspace messed up and passed in a address not in the
* data iov passed to it.
*/
- pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n",
- dbi, udev->name);
- page = NULL;
+ pr_err("Invalid addr to data page mapping (dpi %u) on device %s\n",
+ dpi, udev->name);
mutex_unlock(&udev->cmdr_lock);
- return page;
+ return NULL;
+}
+
+static void tcmu_vma_open(struct vm_area_struct *vma)
+{
+ struct tcmu_dev *udev = vma->vm_private_data;
+
+ pr_debug("vma_open\n");
+
+ kref_get(&udev->kref);
+}
+
+static void tcmu_vma_close(struct vm_area_struct *vma)
+{
+ struct tcmu_dev *udev = vma->vm_private_data;
+
+ pr_debug("vma_close\n");
+
+ /* release ref from tcmu_vma_open */
+ kref_put(&udev->kref, tcmu_dev_kref_release);
}
static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
@@ -1501,6 +1886,7 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
struct page *page;
unsigned long offset;
void *addr;
+ vm_fault_t ret = 0;
int mi = tcmu_find_mem_index(vmf->vma);
if (mi < 0)
@@ -1516,22 +1902,25 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
/* For the vmalloc()ed cmd area pages */
addr = (void *)(unsigned long)info->mem[mi].addr + offset;
page = vmalloc_to_page(addr);
+ get_page(page);
} else {
- uint32_t dbi;
+ uint32_t dpi;
/* For the dynamically growing data area pages */
- dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE;
- page = tcmu_try_get_block_page(udev, dbi);
+ dpi = (offset - udev->data_off) / PAGE_SIZE;
+ page = tcmu_try_get_data_page(udev, dpi);
if (!page)
return VM_FAULT_SIGBUS;
+ ret = VM_FAULT_LOCKED;
}
- get_page(page);
vmf->page = page;
- return 0;
+ return ret;
}
static const struct vm_operations_struct tcmu_vm_ops = {
+ .open = tcmu_vma_open,
+ .close = tcmu_vma_close,
.fault = tcmu_vma_fault,
};
@@ -1539,15 +1928,17 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
{
struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &tcmu_vm_ops;
vma->vm_private_data = udev;
/* Ensure the mmap is exactly the right size */
- if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT))
+ if (vma_pages(vma) != udev->mmap_pages)
return -EINVAL;
+ tcmu_vma_open(vma);
+
return 0;
}
@@ -1560,90 +1951,52 @@ static int tcmu_open(struct uio_info *info, struct inode *inode)
return -EBUSY;
udev->inode = inode;
- kref_get(&udev->kref);
pr_debug("open\n");
return 0;
}
-static void tcmu_dev_call_rcu(struct rcu_head *p)
-{
- struct se_device *dev = container_of(p, struct se_device, rcu_head);
- struct tcmu_dev *udev = TCMU_DEV(dev);
-
- kfree(udev->uio_info.name);
- kfree(udev->name);
- kfree(udev);
-}
-
-static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
-{
- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
- kmem_cache_free(tcmu_cmd_cache, cmd);
- return 0;
- }
- return -EINVAL;
-}
-
-static void tcmu_blocks_release(struct radix_tree_root *blocks,
- int start, int end)
-{
- int i;
- struct page *page;
-
- for (i = start; i < end; i++) {
- page = radix_tree_delete(blocks, i);
- if (page) {
- __free_page(page);
- atomic_dec(&global_db_count);
- }
- }
-}
-
-static void tcmu_dev_kref_release(struct kref *kref)
+static int tcmu_release(struct uio_info *info, struct inode *inode)
{
- struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
- struct se_device *dev = &udev->se_dev;
+ struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
struct tcmu_cmd *cmd;
- bool all_expired = true;
- int i;
+ unsigned long i;
+ bool freed = false;
- vfree(udev->mb_addr);
- udev->mb_addr = NULL;
+ mutex_lock(&udev->cmdr_lock);
- spin_lock_bh(&timed_out_udevs_lock);
- if (!list_empty(&udev->timedout_entry))
- list_del(&udev->timedout_entry);
- spin_unlock_bh(&timed_out_udevs_lock);
+ xa_for_each(&udev->commands, i, cmd) {
+ /* Cmds with KEEP_BUF set are no longer on the ring, but
+ * userspace still holds the data buffer. If userspace closes
+ * we implicitly free these cmds and buffers, since after new
+ * open the (new ?) userspace cannot find the cmd in the ring
+ * and thus never will release the buffer by writing cmd_id to
+ * free_kept_buf action attribute.
+ */
+ if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags))
+ continue;
+ pr_debug("removing KEEP_BUF cmd %u on dev %s from ring\n",
+ cmd->cmd_id, udev->name);
+ freed = true;
- /* Upper layer should drain all requests before calling this */
- mutex_lock(&udev->cmdr_lock);
- idr_for_each_entry(&udev->commands, cmd, i) {
- if (tcmu_check_and_free_pending_cmd(cmd) != 0)
- all_expired = false;
+ xa_erase(&udev->commands, i);
+ tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
+ tcmu_free_cmd(cmd);
}
- if (!list_empty(&udev->qfull_queue))
- all_expired = false;
- idr_destroy(&udev->commands);
- WARN_ON(!all_expired);
+ /*
+ * We only freed data space, not ring space. Therefore we dont call
+ * run_tmr_queue, but call run_qfull_queue if tmr_list is empty.
+ */
+ if (freed && list_empty(&udev->tmr_queue))
+ run_qfull_queue(udev, false);
- tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
- bitmap_free(udev->data_bitmap);
mutex_unlock(&udev->cmdr_lock);
- call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
-}
-
-static int tcmu_release(struct uio_info *info, struct inode *inode)
-{
- struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
-
clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
pr_debug("close\n");
- /* release ref from open */
- kref_put(&udev->kref, tcmu_dev_kref_release);
+
return 0;
}
@@ -1777,7 +2130,7 @@ static int tcmu_netlink_event_send(struct tcmu_dev *udev,
}
ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
- TCMU_MCGRP_CONFIG, GFP_KERNEL);
+ TCMU_MCGRP_CONFIG);
/* Wait during an add as the listener may not be up yet */
if (ret == 0 ||
@@ -1846,6 +2199,7 @@ static int tcmu_configure_device(struct se_device *dev)
struct tcmu_dev *udev = TCMU_DEV(dev);
struct uio_info *info;
struct tcmu_mailbox *mb;
+ size_t data_size;
int ret = 0;
ret = tcmu_update_uio_info(udev);
@@ -1862,34 +2216,38 @@ static int tcmu_configure_device(struct se_device *dev)
goto err_bitmap_alloc;
}
- udev->mb_addr = vzalloc(CMDR_SIZE);
- if (!udev->mb_addr) {
+ mb = vzalloc(udev->cmdr_size + CMDR_OFF);
+ if (!mb) {
ret = -ENOMEM;
goto err_vzalloc;
}
/* mailbox fits in first part of CMDR space */
- udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
- udev->data_off = CMDR_SIZE;
- udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE;
+ udev->mb_addr = mb;
+ udev->cmdr = (void *)mb + CMDR_OFF;
+ udev->data_off = udev->cmdr_size + CMDR_OFF;
+ data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT;
+ udev->mmap_pages = (data_size + udev->cmdr_size + CMDR_OFF) >> PAGE_SHIFT;
+ udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE;
udev->dbi_thresh = 0; /* Default in Idle state */
/* Initialise the mailbox of the ring buffer */
- mb = udev->mb_addr;
mb->version = TCMU_MAILBOX_VERSION;
- mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN;
+ mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC |
+ TCMU_MAILBOX_FLAG_CAP_READ_LEN |
+ TCMU_MAILBOX_FLAG_CAP_TMR |
+ TCMU_MAILBOX_FLAG_CAP_KEEP_BUF;
mb->cmdr_off = CMDR_OFF;
mb->cmdr_size = udev->cmdr_size;
WARN_ON(!PAGE_ALIGNED(udev->data_off));
- WARN_ON(udev->data_size % PAGE_SIZE);
- WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
+ WARN_ON(data_size % PAGE_SIZE);
info->version = __stringify(TCMU_MAILBOX_VERSION);
info->mem[0].name = "tcm-user command & data buffer";
info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
- info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE;
+ info->mem[0].size = data_size + udev->cmdr_size + CMDR_OFF;
info->mem[0].memtype = UIO_MEM_NONE;
info->irqcontrol = tcmu_irqcontrol;
@@ -1963,8 +2321,8 @@ static void tcmu_destroy_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
- del_timer_sync(&udev->cmd_timer);
- del_timer_sync(&udev->qfull_timer);
+ timer_delete_sync(&udev->cmd_timer);
+ timer_delete_sync(&udev->qfull_timer);
mutex_lock(&root_udev_mutex);
list_del(&udev->node);
@@ -2005,19 +2363,24 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
{
struct tcmu_mailbox *mb;
struct tcmu_cmd *cmd;
- int i;
+ unsigned long i;
mutex_lock(&udev->cmdr_lock);
- idr_for_each_entry(&udev->commands, cmd, i) {
- pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
- cmd->cmd_id, udev->name,
- test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
-
- idr_remove(&udev->commands, i);
- if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ xa_for_each(&udev->commands, i, cmd) {
+ pr_debug("removing cmd %u on dev %s from ring %s\n",
+ cmd->cmd_id, udev->name,
+ test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ?
+ "(is expired)" :
+ (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags) ?
+ "(is keep buffer)" : ""));
+
+ xa_erase(&udev->commands, i);
+ if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) &&
+ !test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
WARN_ON(!cmd->se_cmd);
list_del_init(&cmd->queue_entry);
+ cmd->se_cmd->priv = NULL;
if (err_level == 1) {
/*
* Userspace was not able to start the
@@ -2045,7 +2408,16 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
tcmu_flush_dcache_range(mb, sizeof(*mb));
clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
- del_timer(&udev->cmd_timer);
+ timer_delete(&udev->cmd_timer);
+
+ /*
+ * ring is empty and qfull queue never contains aborted commands.
+ * So TMRs in tmr queue do not contain relevant cmd_ids.
+ * After a ring reset userspace should do a fresh start, so
+ * even LUN RESET message is no longer relevant.
+ * Therefore remove all TMRs from qfull queue
+ */
+ tcmu_remove_all_queued_tmr(udev);
run_qfull_queue(udev, false);
@@ -2054,16 +2426,19 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
enum {
Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
- Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err,
+ Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_data_pages_per_blk,
+ Opt_cmd_ring_size_mb, Opt_err,
};
-static match_table_t tokens = {
+static const match_table_t tokens = {
{Opt_dev_config, "dev_config=%s"},
{Opt_dev_size, "dev_size=%s"},
{Opt_hw_block_size, "hw_block_size=%d"},
{Opt_hw_max_sectors, "hw_max_sectors=%d"},
{Opt_nl_reply_supported, "nl_reply_supported=%d"},
{Opt_max_data_area_mb, "max_data_area_mb=%d"},
+ {Opt_data_pages_per_blk, "data_pages_per_blk=%d"},
+ {Opt_cmd_ring_size_mb, "cmd_ring_size_mb=%d"},
{Opt_err, NULL}
};
@@ -2090,6 +2465,7 @@ static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
{
int val, ret;
+ uint32_t pages_per_blk = udev->data_pages_per_blk;
ret = match_int(arg, &val);
if (ret < 0) {
@@ -2097,11 +2473,20 @@ static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
ret);
return ret;
}
-
if (val <= 0) {
pr_err("Invalid max_data_area %d.\n", val);
return -EINVAL;
}
+ if (val > TCMU_PAGES_TO_MBS(tcmu_global_max_pages)) {
+ pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
+ val, TCMU_PAGES_TO_MBS(tcmu_global_max_pages));
+ val = TCMU_PAGES_TO_MBS(tcmu_global_max_pages);
+ }
+ if (TCMU_MBS_TO_PAGES(val) < pages_per_blk) {
+ pr_err("Invalid max_data_area %d (%zu pages): smaller than data_pages_per_blk (%u pages).\n",
+ val, TCMU_MBS_TO_PAGES(val), pages_per_blk);
+ return -EINVAL;
+ }
mutex_lock(&udev->cmdr_lock);
if (udev->data_bitmap) {
@@ -2110,11 +2495,75 @@ static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
goto unlock;
}
- udev->max_blocks = TCMU_MBS_TO_BLOCKS(val);
- if (udev->max_blocks > tcmu_global_max_blocks) {
- pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
- val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
- udev->max_blocks = tcmu_global_max_blocks;
+ udev->data_area_mb = val;
+ udev->max_blocks = TCMU_MBS_TO_PAGES(val) / pages_per_blk;
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return ret;
+}
+
+static int tcmu_set_data_pages_per_blk(struct tcmu_dev *udev, substring_t *arg)
+{
+ int val, ret;
+
+ ret = match_int(arg, &val);
+ if (ret < 0) {
+ pr_err("match_int() failed for data_pages_per_blk=. Error %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (val > TCMU_MBS_TO_PAGES(udev->data_area_mb)) {
+ pr_err("Invalid data_pages_per_blk %d: greater than max_data_area_mb %d -> %zd pages).\n",
+ val, udev->data_area_mb,
+ TCMU_MBS_TO_PAGES(udev->data_area_mb));
+ return -EINVAL;
+ }
+
+ mutex_lock(&udev->cmdr_lock);
+ if (udev->data_bitmap) {
+ pr_err("Cannot set data_pages_per_blk after it has been enabled.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ udev->data_pages_per_blk = val;
+ udev->max_blocks = TCMU_MBS_TO_PAGES(udev->data_area_mb) / val;
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return ret;
+}
+
+static int tcmu_set_cmd_ring_size(struct tcmu_dev *udev, substring_t *arg)
+{
+ int val, ret;
+
+ ret = match_int(arg, &val);
+ if (ret < 0) {
+ pr_err("match_int() failed for cmd_ring_size_mb=. Error %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (val <= 0) {
+ pr_err("Invalid cmd_ring_size_mb %d.\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&udev->cmdr_lock);
+ if (udev->data_bitmap) {
+ pr_err("Cannot set cmd_ring_size_mb after it has been enabled.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ udev->cmdr_size = (val << 20) - CMDR_OFF;
+ if (val > (MB_CMDR_SIZE_DEF >> 20)) {
+ pr_err("%d is too large. Adjusting cmd_ring_size_mb to global limit of %u\n",
+ val, (MB_CMDR_SIZE_DEF >> 20));
+ udev->cmdr_size = CMDR_SIZE_DEF;
}
unlock:
@@ -2173,6 +2622,12 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
case Opt_max_data_area_mb:
ret = tcmu_set_max_blocks_param(udev, &args[0]);
break;
+ case Opt_data_pages_per_blk:
+ ret = tcmu_set_data_pages_per_blk(udev, &args[0]);
+ break;
+ case Opt_cmd_ring_size_mb:
+ ret = tcmu_set_cmd_ring_size(udev, &args[0]);
+ break;
default:
break;
}
@@ -2193,8 +2648,10 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
bl = sprintf(b + bl, "Config: %s ",
udev->dev_config[0] ? udev->dev_config : "NULL");
bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
- bl += sprintf(b + bl, "MaxDataAreaMB: %u\n",
- TCMU_BLOCKS_TO_MBS(udev->max_blocks));
+ bl += sprintf(b + bl, "MaxDataAreaMB: %u ", udev->data_area_mb);
+ bl += sprintf(b + bl, "DataPagesPerBlk: %u ", udev->data_pages_per_blk);
+ bl += sprintf(b + bl, "CmdRingSizeMB: %u\n",
+ (udev->cmdr_size + CMDR_OFF) >> 20);
return bl;
}
@@ -2288,11 +2745,32 @@ static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
struct se_dev_attrib, da_group);
struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
- return snprintf(page, PAGE_SIZE, "%u\n",
- TCMU_BLOCKS_TO_MBS(udev->max_blocks));
+ return snprintf(page, PAGE_SIZE, "%u\n", udev->data_area_mb);
}
CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb);
+static ssize_t tcmu_data_pages_per_blk_show(struct config_item *item,
+ char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", udev->data_pages_per_blk);
+}
+CONFIGFS_ATTR_RO(tcmu_, data_pages_per_blk);
+
+static ssize_t tcmu_cmd_ring_size_mb_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%u\n",
+ (udev->cmdr_size + CMDR_OFF) >> 20);
+}
+CONFIGFS_ATTR_RO(tcmu_, cmd_ring_size_mb);
+
static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
@@ -2342,14 +2820,14 @@ static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
pr_err("Unable to reconfigure device\n");
return ret;
}
- strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
+ strscpy(udev->dev_config, page, TCMU_CONFIG_LEN);
ret = tcmu_update_uio_info(udev);
if (ret)
return ret;
return count;
}
- strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
+ strscpy(udev->dev_config, page, TCMU_CONFIG_LEN);
return count;
}
@@ -2493,6 +2971,39 @@ static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
}
CONFIGFS_ATTR(tcmu_, emulate_write_cache);
+static ssize_t tcmu_tmr_notification_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%i\n",
+ test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags));
+}
+
+static ssize_t tcmu_tmr_notification_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ u8 val;
+ int ret;
+
+ ret = kstrtou8(page, 0, &val);
+ if (ret < 0)
+ return ret;
+ if (val > 1)
+ return -EINVAL;
+
+ if (val)
+ set_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags);
+ else
+ clear_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags);
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, tmr_notification);
+
static ssize_t tcmu_block_dev_show(struct config_item *item, char *page)
{
struct se_device *se_dev = container_of(to_config_group(item),
@@ -2567,13 +3078,75 @@ static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
}
CONFIGFS_ATTR_WO(tcmu_, reset_ring);
+static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_device *se_dev = container_of(to_config_group(item),
+ struct se_device,
+ dev_action_group);
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+ struct tcmu_cmd *cmd;
+ u16 cmd_id;
+ int ret;
+
+ if (!target_dev_configured(&udev->se_dev)) {
+ pr_err("Device is not configured.\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtou16(page, 0, &cmd_id);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&udev->cmdr_lock);
+
+ {
+ XA_STATE(xas, &udev->commands, cmd_id);
+
+ xas_lock(&xas);
+ cmd = xas_load(&xas);
+ if (!cmd) {
+ pr_err("free_kept_buf: cmd_id %d not found\n", cmd_id);
+ count = -EINVAL;
+ xas_unlock(&xas);
+ goto out_unlock;
+ }
+ if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
+ pr_err("free_kept_buf: cmd_id %d was not completed with KEEP_BUF\n",
+ cmd_id);
+ count = -EINVAL;
+ xas_unlock(&xas);
+ goto out_unlock;
+ }
+ xas_store(&xas, NULL);
+ xas_unlock(&xas);
+ }
+
+ tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
+ tcmu_free_cmd(cmd);
+ /*
+ * We only freed data space, not ring space. Therefore we dont call
+ * run_tmr_queue, but call run_qfull_queue if tmr_list is empty.
+ */
+ if (list_empty(&udev->tmr_queue))
+ run_qfull_queue(udev, false);
+
+out_unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return count;
+}
+CONFIGFS_ATTR_WO(tcmu_, free_kept_buf);
+
static struct configfs_attribute *tcmu_attrib_attrs[] = {
&tcmu_attr_cmd_time_out,
&tcmu_attr_qfull_time_out,
&tcmu_attr_max_data_area_mb,
+ &tcmu_attr_data_pages_per_blk,
+ &tcmu_attr_cmd_ring_size_mb,
&tcmu_attr_dev_config,
&tcmu_attr_dev_size,
&tcmu_attr_emulate_write_cache,
+ &tcmu_attr_tmr_notification,
&tcmu_attr_nl_reply_supported,
NULL,
};
@@ -2583,6 +3156,7 @@ static struct configfs_attribute **tcmu_attrs;
static struct configfs_attribute *tcmu_action_attrs[] = {
&tcmu_attr_block_dev,
&tcmu_attr_reset_ring,
+ &tcmu_attr_free_kept_buf,
NULL,
};
@@ -2598,7 +3172,10 @@ static struct target_backend_ops tcmu_ops = {
.configure_device = tcmu_configure_device,
.destroy_device = tcmu_destroy_device,
.free_device = tcmu_free_device,
+ .unplug_device = tcmu_unplug_device,
+ .plug_device = tcmu_plug_device,
.parse_cdb = tcmu_parse_cdb,
+ .tmr_notify = tcmu_tmr_notify,
.set_configfs_dev_params = tcmu_set_configfs_dev_params,
.show_configfs_dev_params = tcmu_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
@@ -2610,9 +3187,10 @@ static void find_free_blocks(void)
{
struct tcmu_dev *udev;
loff_t off;
- u32 start, end, block, total_freed = 0;
+ u32 pages_freed, total_pages_freed = 0;
+ u32 start, end, block, total_blocks_freed = 0;
- if (atomic_read(&global_db_count) <= tcmu_global_max_blocks)
+ if (atomic_read(&global_page_count) <= tcmu_global_max_pages)
return;
mutex_lock(&root_udev_mutex);
@@ -2625,7 +3203,8 @@ static void find_free_blocks(void)
}
/* Try to complete the finished commands first */
- tcmu_handle_completions(udev);
+ if (tcmu_handle_completions(udev))
+ run_qfull_queue(udev, false);
/* Skip the udevs in idle */
if (!udev->dbi_thresh) {
@@ -2651,21 +3230,33 @@ static void find_free_blocks(void)
udev->dbi_max = block;
}
+ /*
+ * Release the block pages.
+ *
+ * Also note that since tcmu_vma_fault() gets an extra page
+ * refcount, tcmu_blocks_release() won't free pages if pages
+ * are mapped. This means it is safe to call
+ * tcmu_blocks_release() before unmap_mapping_range() which
+ * drops the refcount of any pages it unmaps and thus releases
+ * them.
+ */
+ pages_freed = tcmu_blocks_release(udev, start, end - 1);
+
/* Here will truncate the data area from off */
- off = udev->data_off + start * DATA_BLOCK_SIZE;
+ off = udev->data_off + (loff_t)start * udev->data_blk_size;
unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
- /* Release the block pages */
- tcmu_blocks_release(&udev->data_blocks, start, end);
mutex_unlock(&udev->cmdr_lock);
- total_freed += end - start;
- pr_debug("Freed %u blocks (total %u) from %s.\n", end - start,
- total_freed, udev->name);
+ total_pages_freed += pages_freed;
+ total_blocks_freed += end - start;
+ pr_debug("Freed %u pages (total %u) from %u blocks (total %u) from %s.\n",
+ pages_freed, total_pages_freed, end - start,
+ total_blocks_freed, udev->name);
}
mutex_unlock(&root_udev_mutex);
- if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
+ if (atomic_read(&global_page_count) > tcmu_global_max_pages)
schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000));
}