summaryrefslogtreecommitdiff
path: root/drivers/target
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/Kconfig17
-rw-r--r--drivers/target/Makefile6
-rw-r--r--drivers/target/iscsi/Kconfig18
-rw-r--r--drivers/target/iscsi/Makefile3
-rw-r--r--drivers/target/iscsi/cxgbit/Kconfig8
-rw-r--r--drivers/target/iscsi/cxgbit/Makefile8
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit.h351
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c2018
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c330
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_lro.h73
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c746
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c1654
-rw-r--r--drivers/target/iscsi/iscsi_target.c2522
-rw-r--r--drivers/target/iscsi/iscsi_target.h56
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c526
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.h26
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c1745
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h876
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.c47
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.h14
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c35
-rw-r--r--drivers/target/iscsi/iscsi_target_device.h8
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c214
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.h27
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c246
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.h46
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c125
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h26
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c938
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h29
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c774
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.h18
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c26
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.h9
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c499
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h32
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.c62
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.h14
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c749
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.h64
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c107
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.h19
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c276
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h32
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.c544
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.h87
-rw-r--r--drivers/target/iscsi/iscsi_target_transport.c9
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c754
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h110
-rw-r--r--drivers/target/loopback/Kconfig2
-rw-r--r--drivers/target/loopback/Makefile1
-rw-r--r--drivers/target/loopback/tcm_loop.c1132
-rw-r--r--drivers/target/loopback/tcm_loop.h33
-rw-r--r--drivers/target/sbp/Kconfig1
-rw-r--r--drivers/target/sbp/Makefile1
-rw-r--r--drivers/target/sbp/sbp_target.c665
-rw-r--r--drivers/target/sbp/sbp_target.h12
-rw-r--r--drivers/target/target_core_alua.c1343
-rw-r--r--drivers/target/target_core_alua.h74
-rw-r--r--drivers/target/target_core_configfs.c3329
-rw-r--r--drivers/target/target_core_device.c1797
-rw-r--r--drivers/target/target_core_fabric_configfs.c982
-rw-r--r--drivers/target/target_core_fabric_lib.c463
-rw-r--r--drivers/target/target_core_file.c670
-rw-r--r--drivers/target/target_core_file.h12
-rw-r--r--drivers/target/target_core_hba.c128
-rw-r--r--drivers/target/target_core_iblock.c864
-rw-r--r--drivers/target/target_core_iblock.h22
-rw-r--r--drivers/target/target_core_internal.h168
-rw-r--r--drivers/target/target_core_pr.c1336
-rw-r--r--drivers/target/target_core_pr.h26
-rw-r--r--drivers/target/target_core_pscsi.c609
-rw-r--r--drivers/target/target_core_pscsi.h23
-rw-r--r--drivers/target/target_core_rd.c392
-rw-r--r--drivers/target/target_core_rd.h10
-rw-r--r--drivers/target/target_core_sbc.c1213
-rw-r--r--drivers/target/target_core_spc.c1674
-rw-r--r--drivers/target/target_core_stat.c1725
-rw-r--r--drivers/target/target_core_tmr.c374
-rw-r--r--drivers/target/target_core_tpg.c955
-rw-r--r--drivers/target/target_core_transport.c3364
-rw-r--r--drivers/target/target_core_ua.c154
-rw-r--r--drivers/target/target_core_ua.h15
-rw-r--r--drivers/target/target_core_user.c3393
-rw-r--r--drivers/target/target_core_xcopy.c1041
-rw-r--r--drivers/target/target_core_xcopy.h68
-rw-r--r--drivers/target/tcm_fc/Kconfig1
-rw-r--r--drivers/target/tcm_fc/Makefile1
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h39
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c153
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c405
-rw-r--r--drivers/target/tcm_fc/tfc_io.c59
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c154
-rw-r--r--drivers/target/tcm_remote/Kconfig8
-rw-r--r--drivers/target/tcm_remote/Makefile2
-rw-r--r--drivers/target/tcm_remote/tcm_remote.c268
-rw-r--r--drivers/target/tcm_remote/tcm_remote.h20
98 files changed, 28583 insertions, 17528 deletions
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 18303686eb58..92641d39126a 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -1,8 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
menuconfig TARGET_CORE
tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
- depends on SCSI && BLOCK
+ depends on BLOCK
select CONFIGFS_FS
+ select CRC_T10DIF
+ select SCSI_COMMON
+ select SGL_ALLOC
default n
help
Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
@@ -13,6 +17,7 @@ if TARGET_CORE
config TCM_IBLOCK
tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
+ select BLK_DEV_INTEGRITY
help
Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
access to Linux/Block devices using BIO
@@ -25,13 +30,23 @@ config TCM_FILEIO
config TCM_PSCSI
tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI"
+ depends on SCSI
help
Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
passthrough access to Linux/SCSI device
+config TCM_USER2
+ tristate "TCM/USER Subsystem Plugin for Linux"
+ depends on UIO && NET
+ help
+ Say Y here to enable the TCM/USER subsystem plugin for a userspace
+ process to handle requests. This is version 2 of the ABI; version 1
+ is obsolete.
+
source "drivers/target/loopback/Kconfig"
source "drivers/target/tcm_fc/Kconfig"
source "drivers/target/iscsi/Kconfig"
source "drivers/target/sbp/Kconfig"
+source "drivers/target/tcm_remote/Kconfig"
endif
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 9fdcb561422f..431b84abfb94 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
target_core_mod-y := target_core_configfs.o \
target_core_device.o \
@@ -13,7 +14,8 @@ target_core_mod-y := target_core_configfs.o \
target_core_spc.o \
target_core_ua.o \
target_core_rd.o \
- target_core_stat.o
+ target_core_stat.o \
+ target_core_xcopy.o
obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
@@ -21,9 +23,11 @@ obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o
obj-$(CONFIG_TCM_FILEIO) += target_core_file.o
obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
+obj-$(CONFIG_TCM_USER2) += target_core_user.o
# Fabric modules
obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
obj-$(CONFIG_TCM_FC) += tcm_fc/
obj-$(CONFIG_ISCSI_TARGET) += iscsi/
obj-$(CONFIG_SBP_TARGET) += sbp/
+obj-$(CONFIG_REMOTE_TARGET) += tcm_remote/
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig
index 8345fb457a40..70d76f3dd693 100644
--- a/drivers/target/iscsi/Kconfig
+++ b/drivers/target/iscsi/Kconfig
@@ -1,9 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
config ISCSI_TARGET
- tristate "Linux-iSCSI.org iSCSI Target Mode Stack"
- depends on NET
+ tristate "SCSI Target Mode Stack"
+ depends on INET
+ select CRC32
select CRYPTO
- select CRYPTO_CRC32C
- select CRYPTO_CRC32C_INTEL if X86
+ select CRYPTO_HASH
help
- Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI
- Target Mode Stack.
+ Say M to enable the SCSI target mode stack. A SCSI target mode stack
+ is software that makes local storage available over a storage network
+ to a SCSI initiator system. The supported storage network technologies
+ include iSCSI, Fibre Channel and the SCSI RDMA Protocol (SRP).
+ Configuration of the SCSI target mode stack happens through configfs.
+
+source "drivers/target/iscsi/cxgbit/Kconfig"
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile
index 13a92403fe3e..8c9ae96b760d 100644
--- a/drivers/target/iscsi/Makefile
+++ b/drivers/target/iscsi/Makefile
@@ -1,6 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
iscsi_target_mod-y += iscsi_target_parameters.o \
iscsi_target_seq_pdu_list.o \
- iscsi_target_tq.o \
iscsi_target_auth.o \
iscsi_target_datain_values.o \
iscsi_target_device.o \
@@ -19,3 +19,4 @@ iscsi_target_mod-y += iscsi_target_parameters.o \
iscsi_target_transport.o
obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o
+obj-$(CONFIG_ISCSI_TARGET_CXGB4) += cxgbit/
diff --git a/drivers/target/iscsi/cxgbit/Kconfig b/drivers/target/iscsi/cxgbit/Kconfig
new file mode 100644
index 000000000000..bdeefa75f29e
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config ISCSI_TARGET_CXGB4
+ tristate "Chelsio iSCSI target offload driver"
+ depends on ISCSI_TARGET && CHELSIO_T4 && INET
+ select CHELSIO_LIB
+ help
+ To compile this driver as module, choose M here: the module
+ will be called cxgbit.
diff --git a/drivers/target/iscsi/cxgbit/Makefile b/drivers/target/iscsi/cxgbit/Makefile
new file mode 100644
index 000000000000..0dcaf2006f78
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4
+ccflags-y += -I $(srctree)/drivers/net/ethernet/chelsio/libcxgb
+ccflags-y += -I $(srctree)/drivers/target/iscsi
+
+obj-$(CONFIG_ISCSI_TARGET_CXGB4) += cxgbit.o
+
+cxgbit-y := cxgbit_main.o cxgbit_cm.o cxgbit_target.o cxgbit_ddp.o
diff --git a/drivers/target/iscsi/cxgbit/cxgbit.h b/drivers/target/iscsi/cxgbit/cxgbit.h
new file mode 100644
index 000000000000..aff727629663
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit.h
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ */
+
+#ifndef __CXGBIT_H__
+#define __CXGBIT_H__
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/completion.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/inet.h>
+#include <linux/wait.h>
+#include <linux/kref.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+
+#include <asm/byteorder.h>
+
+#include <net/net_namespace.h>
+
+#include <target/iscsi/iscsi_transport.h>
+#include <iscsi_target_parameters.h>
+#include <iscsi_target_login.h>
+
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "cxgb4.h"
+#include "cxgb4_uld.h"
+#include "l2t.h"
+#include "libcxgb_ppm.h"
+#include "cxgbit_lro.h"
+
+extern struct mutex cdev_list_lock;
+extern struct list_head cdev_list_head;
+struct cxgbit_np;
+
+struct cxgbit_sock;
+
+struct cxgbit_cmd {
+ struct scatterlist sg;
+ struct cxgbi_task_tag_info ttinfo;
+ bool setup_ddp;
+ bool release;
+};
+
+#define CXGBIT_MAX_ISO_PAYLOAD \
+ min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535)
+
+struct cxgbit_iso_info {
+ u8 flags;
+ u32 mpdu;
+ u32 len;
+ u32 burst_len;
+};
+
+enum cxgbit_skcb_flags {
+ SKCBF_TX_NEED_HDR = (1 << 0), /* packet needs a header */
+ SKCBF_TX_FLAG_COMPL = (1 << 1), /* wr completion flag */
+ SKCBF_TX_ISO = (1 << 2), /* iso cpl in tx skb */
+ SKCBF_RX_LRO = (1 << 3), /* lro skb */
+};
+
+struct cxgbit_skb_rx_cb {
+ u8 opcode;
+ void *pdu_cb;
+ void (*backlog_fn)(struct cxgbit_sock *, struct sk_buff *);
+};
+
+struct cxgbit_skb_tx_cb {
+ u8 submode;
+ u32 extra_len;
+};
+
+union cxgbit_skb_cb {
+ struct {
+ u8 flags;
+ union {
+ struct cxgbit_skb_tx_cb tx;
+ struct cxgbit_skb_rx_cb rx;
+ };
+ };
+
+ struct {
+ /* This member must be first. */
+ struct l2t_skb_cb l2t;
+ struct sk_buff *wr_next;
+ };
+};
+
+#define CXGBIT_SKB_CB(skb) ((union cxgbit_skb_cb *)&((skb)->cb[0]))
+#define cxgbit_skcb_flags(skb) (CXGBIT_SKB_CB(skb)->flags)
+#define cxgbit_skcb_submode(skb) (CXGBIT_SKB_CB(skb)->tx.submode)
+#define cxgbit_skcb_tx_wr_next(skb) (CXGBIT_SKB_CB(skb)->wr_next)
+#define cxgbit_skcb_tx_extralen(skb) (CXGBIT_SKB_CB(skb)->tx.extra_len)
+#define cxgbit_skcb_rx_opcode(skb) (CXGBIT_SKB_CB(skb)->rx.opcode)
+#define cxgbit_skcb_rx_backlog_fn(skb) (CXGBIT_SKB_CB(skb)->rx.backlog_fn)
+#define cxgbit_rx_pdu_cb(skb) (CXGBIT_SKB_CB(skb)->rx.pdu_cb)
+
+static inline void *cplhdr(struct sk_buff *skb)
+{
+ return skb->data;
+}
+
+enum cxgbit_cdev_flags {
+ CDEV_STATE_UP = 0,
+ CDEV_ISO_ENABLE,
+ CDEV_DDP_ENABLE,
+};
+
+#define NP_INFO_HASH_SIZE 32
+
+struct np_info {
+ struct np_info *next;
+ struct cxgbit_np *cnp;
+ unsigned int stid;
+};
+
+struct cxgbit_list_head {
+ struct list_head list;
+ /* device lock */
+ spinlock_t lock;
+};
+
+struct cxgbit_device {
+ struct list_head list;
+ struct cxgb4_lld_info lldi;
+ struct np_info *np_hash_tab[NP_INFO_HASH_SIZE];
+ /* np lock */
+ spinlock_t np_lock;
+ u8 selectq[MAX_NPORTS][2];
+ struct cxgbit_list_head cskq;
+ u32 mdsl;
+ struct kref kref;
+ unsigned long flags;
+};
+
+struct cxgbit_wr_wait {
+ struct completion completion;
+ int ret;
+};
+
+enum cxgbit_csk_state {
+ CSK_STATE_IDLE = 0,
+ CSK_STATE_LISTEN,
+ CSK_STATE_CONNECTING,
+ CSK_STATE_ESTABLISHED,
+ CSK_STATE_ABORTING,
+ CSK_STATE_CLOSING,
+ CSK_STATE_MORIBUND,
+ CSK_STATE_DEAD,
+};
+
+enum cxgbit_csk_flags {
+ CSK_TX_DATA_SENT = 0,
+ CSK_LOGIN_PDU_DONE,
+ CSK_LOGIN_DONE,
+ CSK_DDP_ENABLE,
+ CSK_ABORT_RPL_WAIT,
+};
+
+struct cxgbit_sock_common {
+ struct cxgbit_device *cdev;
+ struct sockaddr_storage local_addr;
+ struct sockaddr_storage remote_addr;
+ struct cxgbit_wr_wait wr_wait;
+ enum cxgbit_csk_state state;
+ unsigned long flags;
+};
+
+struct cxgbit_np {
+ struct cxgbit_sock_common com;
+ wait_queue_head_t accept_wait;
+ struct iscsi_np *np;
+ struct completion accept_comp;
+ struct list_head np_accept_list;
+ /* np accept lock */
+ spinlock_t np_accept_lock;
+ struct kref kref;
+ unsigned int stid;
+};
+
+struct cxgbit_sock {
+ struct cxgbit_sock_common com;
+ struct cxgbit_np *cnp;
+ struct iscsit_conn *conn;
+ struct l2t_entry *l2t;
+ struct dst_entry *dst;
+ struct list_head list;
+ struct sk_buff_head rxq;
+ struct sk_buff_head txq;
+ struct sk_buff_head ppodq;
+ struct sk_buff_head backlogq;
+ struct sk_buff_head skbq;
+ struct sk_buff *wr_pending_head;
+ struct sk_buff *wr_pending_tail;
+ struct sk_buff *skb;
+ struct sk_buff *lro_skb;
+ struct sk_buff *lro_hskb;
+ struct list_head accept_node;
+ /* socket lock */
+ spinlock_t lock;
+ wait_queue_head_t waitq;
+ bool lock_owner;
+ struct kref kref;
+ u32 max_iso_npdu;
+ u32 wr_cred;
+ u32 wr_una_cred;
+ u32 wr_max_cred;
+ u32 snd_una;
+ u32 tid;
+ u32 snd_nxt;
+ u32 rcv_nxt;
+ u32 smac_idx;
+ u32 tx_chan;
+ u32 mtu;
+ u32 write_seq;
+ u32 rx_credits;
+ u32 snd_win;
+ u32 rcv_win;
+ u16 mss;
+ u16 emss;
+ u16 plen;
+ u16 rss_qid;
+ u16 txq_idx;
+ u16 ctrlq_idx;
+ u8 tos;
+ u8 port_id;
+#define CXGBIT_SUBMODE_HCRC 0x1
+#define CXGBIT_SUBMODE_DCRC 0x2
+ u8 submode;
+#ifdef CONFIG_CHELSIO_T4_DCB
+ u8 dcb_priority;
+#endif
+ u8 snd_wscale;
+};
+
+void _cxgbit_free_cdev(struct kref *kref);
+void _cxgbit_free_csk(struct kref *kref);
+void _cxgbit_free_cnp(struct kref *kref);
+
+static inline void cxgbit_get_cdev(struct cxgbit_device *cdev)
+{
+ kref_get(&cdev->kref);
+}
+
+static inline void cxgbit_put_cdev(struct cxgbit_device *cdev)
+{
+ kref_put(&cdev->kref, _cxgbit_free_cdev);
+}
+
+static inline void cxgbit_get_csk(struct cxgbit_sock *csk)
+{
+ kref_get(&csk->kref);
+}
+
+static inline void cxgbit_put_csk(struct cxgbit_sock *csk)
+{
+ kref_put(&csk->kref, _cxgbit_free_csk);
+}
+
+static inline void cxgbit_get_cnp(struct cxgbit_np *cnp)
+{
+ kref_get(&cnp->kref);
+}
+
+static inline void cxgbit_put_cnp(struct cxgbit_np *cnp)
+{
+ kref_put(&cnp->kref, _cxgbit_free_cnp);
+}
+
+static inline void cxgbit_sock_reset_wr_list(struct cxgbit_sock *csk)
+{
+ csk->wr_pending_tail = NULL;
+ csk->wr_pending_head = NULL;
+}
+
+static inline struct sk_buff *cxgbit_sock_peek_wr(const struct cxgbit_sock *csk)
+{
+ return csk->wr_pending_head;
+}
+
+static inline void
+cxgbit_sock_enqueue_wr(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ cxgbit_skcb_tx_wr_next(skb) = NULL;
+
+ skb_get(skb);
+
+ if (!csk->wr_pending_head)
+ csk->wr_pending_head = skb;
+ else
+ cxgbit_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
+ csk->wr_pending_tail = skb;
+}
+
+static inline struct sk_buff *cxgbit_sock_dequeue_wr(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb = csk->wr_pending_head;
+
+ if (likely(skb)) {
+ csk->wr_pending_head = cxgbit_skcb_tx_wr_next(skb);
+ cxgbit_skcb_tx_wr_next(skb) = NULL;
+ }
+ return skb;
+}
+
+typedef void (*cxgbit_cplhandler_func)(struct cxgbit_device *,
+ struct sk_buff *);
+
+int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
+int cxgbit_setup_conn_digest(struct cxgbit_sock *);
+int cxgbit_accept_np(struct iscsi_np *, struct iscsit_conn *);
+void cxgbit_free_np(struct iscsi_np *);
+void cxgbit_abort_conn(struct cxgbit_sock *csk);
+void cxgbit_free_conn(struct iscsit_conn *);
+extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
+int cxgbit_get_login_rx(struct iscsit_conn *, struct iscsi_login *);
+int cxgbit_rx_data_ack(struct cxgbit_sock *);
+int cxgbit_l2t_send(struct cxgbit_device *, struct sk_buff *,
+ struct l2t_entry *);
+void cxgbit_push_tx_frames(struct cxgbit_sock *);
+int cxgbit_put_login_tx(struct iscsit_conn *, struct iscsi_login *, u32);
+int cxgbit_xmit_pdu(struct iscsit_conn *, struct iscsit_cmd *,
+ struct iscsi_datain_req *, const void *, u32);
+void cxgbit_get_r2t_ttt(struct iscsit_conn *, struct iscsit_cmd *,
+ struct iscsi_r2t *);
+u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *);
+int cxgbit_ofld_send(struct cxgbit_device *, struct sk_buff *);
+void cxgbit_get_rx_pdu(struct iscsit_conn *);
+int cxgbit_validate_params(struct iscsit_conn *);
+struct cxgbit_device *cxgbit_find_device(struct net_device *, u8 *);
+
+/* DDP */
+int cxgbit_ddp_init(struct cxgbit_device *);
+int cxgbit_setup_conn_pgidx(struct cxgbit_sock *, u32);
+int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsit_cmd *);
+void cxgbit_unmap_cmd(struct iscsit_conn *, struct iscsit_cmd *);
+
+static inline
+struct cxgbi_ppm *cdev2ppm(struct cxgbit_device *cdev)
+{
+ return (struct cxgbi_ppm *)(*cdev->lldi.iscsi_ppm);
+}
+#endif /* __CXGBIT_H__ */
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
new file mode 100644
index 000000000000..d9204c590d9a
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -0,0 +1,2018 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+
+#include <net/neighbour.h>
+#include <net/netevent.h>
+#include <net/route.h>
+#include <net/tcp.h>
+#include <net/ip6_route.h>
+#include <net/addrconf.h>
+
+#include <libcxgb_cm.h>
+#include "cxgbit.h"
+#include "clip_tbl.h"
+
+static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
+{
+ wr_waitp->ret = 0;
+ reinit_completion(&wr_waitp->completion);
+}
+
+static void
+cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
+{
+ if (ret == CPL_ERR_NONE)
+ wr_waitp->ret = 0;
+ else
+ wr_waitp->ret = -EIO;
+
+ if (wr_waitp->ret)
+ pr_err("%s: err:%u", func, ret);
+
+ complete(&wr_waitp->completion);
+}
+
+static int
+cxgbit_wait_for_reply(struct cxgbit_device *cdev,
+ struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
+ const char *func)
+{
+ int ret;
+
+ if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
+ wr_waitp->ret = -EIO;
+ goto out;
+ }
+
+ ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
+ if (!ret) {
+ pr_info("%s - Device %s not responding tid %u\n",
+ func, pci_name(cdev->lldi.pdev), tid);
+ wr_waitp->ret = -ETIMEDOUT;
+ }
+out:
+ if (wr_waitp->ret)
+ pr_info("%s: FW reply %d tid %u\n",
+ pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
+ return wr_waitp->ret;
+}
+
+static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
+{
+ return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
+}
+
+static struct np_info *
+cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
+ unsigned int stid)
+{
+ struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
+
+ if (p) {
+ int bucket = cxgbit_np_hashfn(cnp);
+
+ p->cnp = cnp;
+ p->stid = stid;
+ spin_lock(&cdev->np_lock);
+ p->next = cdev->np_hash_tab[bucket];
+ cdev->np_hash_tab[bucket] = p;
+ spin_unlock(&cdev->np_lock);
+ }
+
+ return p;
+}
+
+static int
+cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
+{
+ int stid = -1, bucket = cxgbit_np_hashfn(cnp);
+ struct np_info *p;
+
+ spin_lock(&cdev->np_lock);
+ for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
+ if (p->cnp == cnp) {
+ stid = p->stid;
+ break;
+ }
+ }
+ spin_unlock(&cdev->np_lock);
+
+ return stid;
+}
+
+static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
+{
+ int stid = -1, bucket = cxgbit_np_hashfn(cnp);
+ struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
+
+ spin_lock(&cdev->np_lock);
+ for (p = *prev; p; prev = &p->next, p = p->next) {
+ if (p->cnp == cnp) {
+ stid = p->stid;
+ *prev = p->next;
+ kfree(p);
+ break;
+ }
+ }
+ spin_unlock(&cdev->np_lock);
+
+ return stid;
+}
+
+void _cxgbit_free_cnp(struct kref *kref)
+{
+ struct cxgbit_np *cnp;
+
+ cnp = container_of(kref, struct cxgbit_np, kref);
+ kfree(cnp);
+}
+
+static int
+cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
+ struct cxgbit_np *cnp)
+{
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
+ &cnp->com.local_addr;
+ int addr_type;
+ int ret;
+
+ pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
+ __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
+
+ addr_type = ipv6_addr_type((const struct in6_addr *)
+ &sin6->sin6_addr);
+ if (addr_type != IPV6_ADDR_ANY) {
+ ret = cxgb4_clip_get(cdev->lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr, 1);
+ if (ret) {
+ pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
+ sin6->sin6_addr.s6_addr, ret);
+ return -ENOMEM;
+ }
+ }
+
+ cxgbit_get_cnp(cnp);
+ cxgbit_init_wr_wait(&cnp->com.wr_wait);
+
+ ret = cxgb4_create_server6(cdev->lldi.ports[0],
+ stid, &sin6->sin6_addr,
+ sin6->sin6_port,
+ cdev->lldi.rxq_ids[0]);
+ if (!ret)
+ ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
+ 0, 10, __func__);
+ else if (ret > 0)
+ ret = net_xmit_errno(ret);
+ else
+ cxgbit_put_cnp(cnp);
+
+ if (ret) {
+ if (ret != -ETIMEDOUT)
+ cxgb4_clip_release(cdev->lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr, 1);
+
+ pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
+ ret, stid, sin6->sin6_addr.s6_addr,
+ ntohs(sin6->sin6_port));
+ }
+
+ return ret;
+}
+
+static int
+cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
+ struct cxgbit_np *cnp)
+{
+ struct sockaddr_in *sin = (struct sockaddr_in *)
+ &cnp->com.local_addr;
+ int ret;
+
+ pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
+ __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
+
+ cxgbit_get_cnp(cnp);
+ cxgbit_init_wr_wait(&cnp->com.wr_wait);
+
+ ret = cxgb4_create_server(cdev->lldi.ports[0],
+ stid, sin->sin_addr.s_addr,
+ sin->sin_port, 0,
+ cdev->lldi.rxq_ids[0]);
+ if (!ret)
+ ret = cxgbit_wait_for_reply(cdev,
+ &cnp->com.wr_wait,
+ 0, 10, __func__);
+ else if (ret > 0)
+ ret = net_xmit_errno(ret);
+ else
+ cxgbit_put_cnp(cnp);
+
+ if (ret)
+ pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
+ ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
+ return ret;
+}
+
+struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
+{
+ struct cxgbit_device *cdev;
+ u8 i;
+
+ list_for_each_entry(cdev, &cdev_list_head, list) {
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+
+ for (i = 0; i < lldi->nports; i++) {
+ if (lldi->ports[i] == ndev) {
+ if (port_id)
+ *port_id = i;
+ return cdev;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
+{
+ if (ndev->priv_flags & IFF_BONDING) {
+ pr_err("Bond devices are not supported. Interface:%s\n",
+ ndev->name);
+ return NULL;
+ }
+
+ if (is_vlan_dev(ndev))
+ return vlan_dev_real_dev(ndev);
+
+ return ndev;
+}
+
+static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
+{
+ struct net_device *ndev;
+
+ ndev = __ip_dev_find(&init_net, saddr, false);
+ if (!ndev)
+ return NULL;
+
+ return cxgbit_get_real_dev(ndev);
+}
+
+static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
+{
+ struct net_device *ndev = NULL;
+ bool found = false;
+
+ if (IS_ENABLED(CONFIG_IPV6)) {
+ for_each_netdev_rcu(&init_net, ndev)
+ if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return NULL;
+ return cxgbit_get_real_dev(ndev);
+}
+
+static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
+{
+ struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
+ int ss_family = sockaddr->ss_family;
+ struct net_device *ndev = NULL;
+ struct cxgbit_device *cdev = NULL;
+
+ rcu_read_lock();
+ if (ss_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)sockaddr;
+ ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
+ } else if (ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)sockaddr;
+ ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
+ }
+ if (!ndev)
+ goto out;
+
+ cdev = cxgbit_find_device(ndev, NULL);
+out:
+ rcu_read_unlock();
+ return cdev;
+}
+
+static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
+{
+ struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
+ int ss_family = sockaddr->ss_family;
+ int addr_type;
+
+ if (ss_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)sockaddr;
+ if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
+ return true;
+ } else if (ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)sockaddr;
+ addr_type = ipv6_addr_type((const struct in6_addr *)
+ &sin6->sin6_addr);
+ if (addr_type == IPV6_ADDR_ANY)
+ return true;
+ }
+ return false;
+}
+
+static int
+__cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
+{
+ int stid, ret;
+ int ss_family = cnp->com.local_addr.ss_family;
+
+ if (!test_bit(CDEV_STATE_UP, &cdev->flags))
+ return -EINVAL;
+
+ stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
+ if (stid < 0)
+ return -EINVAL;
+
+ if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
+ cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
+ return -EINVAL;
+ }
+
+ if (ss_family == AF_INET)
+ ret = cxgbit_create_server4(cdev, stid, cnp);
+ else
+ ret = cxgbit_create_server6(cdev, stid, cnp);
+
+ if (ret) {
+ if (ret != -ETIMEDOUT)
+ cxgb4_free_stid(cdev->lldi.tids, stid,
+ ss_family);
+ cxgbit_np_hash_del(cdev, cnp);
+ return ret;
+ }
+ return ret;
+}
+
+static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
+{
+ struct cxgbit_device *cdev;
+ int ret = -1;
+
+ mutex_lock(&cdev_list_lock);
+ cdev = cxgbit_find_np_cdev(cnp);
+ if (!cdev)
+ goto out;
+
+ if (cxgbit_np_hash_find(cdev, cnp) >= 0)
+ goto out;
+
+ if (__cxgbit_setup_cdev_np(cdev, cnp))
+ goto out;
+
+ cnp->com.cdev = cdev;
+ ret = 0;
+out:
+ mutex_unlock(&cdev_list_lock);
+ return ret;
+}
+
+static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
+{
+ struct cxgbit_device *cdev;
+ int ret;
+ u32 count = 0;
+
+ mutex_lock(&cdev_list_lock);
+ list_for_each_entry(cdev, &cdev_list_head, list) {
+ if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
+ mutex_unlock(&cdev_list_lock);
+ return -1;
+ }
+ }
+
+ list_for_each_entry(cdev, &cdev_list_head, list) {
+ ret = __cxgbit_setup_cdev_np(cdev, cnp);
+ if (ret == -ETIMEDOUT)
+ break;
+ if (ret != 0)
+ continue;
+ count++;
+ }
+ mutex_unlock(&cdev_list_lock);
+
+ return count ? 0 : -1;
+}
+
+int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
+{
+ struct cxgbit_np *cnp;
+ int ret;
+
+ if ((ksockaddr->ss_family != AF_INET) &&
+ (ksockaddr->ss_family != AF_INET6))
+ return -EINVAL;
+
+ cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
+ if (!cnp)
+ return -ENOMEM;
+
+ init_waitqueue_head(&cnp->accept_wait);
+ init_completion(&cnp->com.wr_wait.completion);
+ init_completion(&cnp->accept_comp);
+ INIT_LIST_HEAD(&cnp->np_accept_list);
+ spin_lock_init(&cnp->np_accept_lock);
+ kref_init(&cnp->kref);
+ memcpy(&np->np_sockaddr, ksockaddr,
+ sizeof(struct sockaddr_storage));
+ memcpy(&cnp->com.local_addr, &np->np_sockaddr,
+ sizeof(cnp->com.local_addr));
+
+ cnp->np = np;
+ cnp->com.cdev = NULL;
+
+ if (cxgbit_inaddr_any(cnp))
+ ret = cxgbit_setup_all_np(cnp);
+ else
+ ret = cxgbit_setup_cdev_np(cnp);
+
+ if (ret) {
+ cxgbit_put_cnp(cnp);
+ return -EINVAL;
+ }
+
+ np->np_context = cnp;
+ cnp->com.state = CSK_STATE_LISTEN;
+ return 0;
+}
+
+static void
+cxgbit_set_conn_info(struct iscsi_np *np, struct iscsit_conn *conn,
+ struct cxgbit_sock *csk)
+{
+ conn->login_family = np->np_sockaddr.ss_family;
+ conn->login_sockaddr = csk->com.remote_addr;
+ conn->local_sockaddr = csk->com.local_addr;
+}
+
+int cxgbit_accept_np(struct iscsi_np *np, struct iscsit_conn *conn)
+{
+ struct cxgbit_np *cnp = np->np_context;
+ struct cxgbit_sock *csk;
+ int ret = 0;
+
+accept_wait:
+ ret = wait_for_completion_interruptible(&cnp->accept_comp);
+ if (ret)
+ return -ENODEV;
+
+ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
+ spin_unlock_bh(&np->np_thread_lock);
+ /**
+ * No point in stalling here when np_thread
+ * is in state RESET/SHUTDOWN/EXIT - bail
+ **/
+ return -ENODEV;
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+
+ spin_lock_bh(&cnp->np_accept_lock);
+ if (list_empty(&cnp->np_accept_list)) {
+ spin_unlock_bh(&cnp->np_accept_lock);
+ goto accept_wait;
+ }
+
+ csk = list_first_entry(&cnp->np_accept_list,
+ struct cxgbit_sock,
+ accept_node);
+
+ list_del_init(&csk->accept_node);
+ spin_unlock_bh(&cnp->np_accept_lock);
+ conn->context = csk;
+ csk->conn = conn;
+
+ cxgbit_set_conn_info(np, conn, csk);
+ return 0;
+}
+
+static int
+__cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
+{
+ int stid, ret;
+ bool ipv6 = false;
+
+ stid = cxgbit_np_hash_del(cdev, cnp);
+ if (stid < 0)
+ return -EINVAL;
+ if (!test_bit(CDEV_STATE_UP, &cdev->flags))
+ return -EINVAL;
+
+ if (cnp->np->np_sockaddr.ss_family == AF_INET6)
+ ipv6 = true;
+
+ cxgbit_get_cnp(cnp);
+ cxgbit_init_wr_wait(&cnp->com.wr_wait);
+ ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
+ cdev->lldi.rxq_ids[0], ipv6);
+
+ if (ret > 0)
+ ret = net_xmit_errno(ret);
+
+ if (ret) {
+ cxgbit_put_cnp(cnp);
+ return ret;
+ }
+
+ ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
+ 0, 10, __func__);
+ if (ret == -ETIMEDOUT)
+ return ret;
+
+ if (ipv6 && cnp->com.cdev) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
+ cxgb4_clip_release(cdev->lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr,
+ 1);
+ }
+
+ cxgb4_free_stid(cdev->lldi.tids, stid,
+ cnp->com.local_addr.ss_family);
+ return 0;
+}
+
+static void cxgbit_free_all_np(struct cxgbit_np *cnp)
+{
+ struct cxgbit_device *cdev;
+ int ret;
+
+ mutex_lock(&cdev_list_lock);
+ list_for_each_entry(cdev, &cdev_list_head, list) {
+ ret = __cxgbit_free_cdev_np(cdev, cnp);
+ if (ret == -ETIMEDOUT)
+ break;
+ }
+ mutex_unlock(&cdev_list_lock);
+}
+
+static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
+{
+ struct cxgbit_device *cdev;
+ bool found = false;
+
+ mutex_lock(&cdev_list_lock);
+ list_for_each_entry(cdev, &cdev_list_head, list) {
+ if (cdev == cnp->com.cdev) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ goto out;
+
+ __cxgbit_free_cdev_np(cdev, cnp);
+out:
+ mutex_unlock(&cdev_list_lock);
+}
+
+static void __cxgbit_free_conn(struct cxgbit_sock *csk);
+
+void cxgbit_free_np(struct iscsi_np *np)
+{
+ struct cxgbit_np *cnp = np->np_context;
+ struct cxgbit_sock *csk, *tmp;
+
+ cnp->com.state = CSK_STATE_DEAD;
+ if (cnp->com.cdev)
+ cxgbit_free_cdev_np(cnp);
+ else
+ cxgbit_free_all_np(cnp);
+
+ spin_lock_bh(&cnp->np_accept_lock);
+ list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
+ list_del_init(&csk->accept_node);
+ __cxgbit_free_conn(csk);
+ }
+ spin_unlock_bh(&cnp->np_accept_lock);
+
+ np->np_context = NULL;
+ cxgbit_put_cnp(cnp);
+}
+
+static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+ u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
+ NULL, NULL);
+
+ cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
+ __skb_queue_tail(&csk->txq, skb);
+ cxgbit_push_tx_frames(csk);
+}
+
+static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
+{
+ struct cxgbit_sock *csk = handle;
+
+ pr_debug("%s cxgbit_device %p\n", __func__, handle);
+ kfree_skb(skb);
+ cxgbit_put_csk(csk);
+}
+
+static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
+{
+ struct cxgbit_device *cdev = handle;
+ struct cpl_abort_req *req = cplhdr(skb);
+
+ pr_debug("%s cdev %p\n", __func__, cdev);
+ req->cmd = CPL_ABORT_NO_RST;
+ cxgbit_ofld_send(cdev, skb);
+}
+
+static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+ u32 len = roundup(sizeof(struct cpl_abort_req), 16);
+
+ pr_debug("%s: csk %p tid %u; state %d\n",
+ __func__, csk, csk->tid, csk->com.state);
+
+ __skb_queue_purge(&csk->txq);
+
+ if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
+ cxgbit_send_tx_flowc_wr(csk);
+
+ skb = __skb_dequeue(&csk->skbq);
+ cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
+ csk->com.cdev, cxgbit_abort_arp_failure);
+
+ return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
+}
+
+static void
+__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ __kfree_skb(skb);
+
+ if (csk->com.state != CSK_STATE_ESTABLISHED)
+ goto no_abort;
+
+ set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
+ csk->com.state = CSK_STATE_ABORTING;
+
+ cxgbit_send_abort_req(csk);
+
+ return;
+
+no_abort:
+ cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
+ cxgbit_put_csk(csk);
+}
+
+void cxgbit_abort_conn(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
+
+ cxgbit_get_csk(csk);
+ cxgbit_init_wr_wait(&csk->com.wr_wait);
+
+ spin_lock_bh(&csk->lock);
+ if (csk->lock_owner) {
+ cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
+ __skb_queue_tail(&csk->backlogq, skb);
+ } else {
+ __cxgbit_abort_conn(csk, skb);
+ }
+ spin_unlock_bh(&csk->lock);
+
+ cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
+ csk->tid, 600, __func__);
+}
+
+static void __cxgbit_free_conn(struct cxgbit_sock *csk)
+{
+ struct iscsit_conn *conn = csk->conn;
+ bool release = false;
+
+ pr_debug("%s: state %d\n",
+ __func__, csk->com.state);
+
+ spin_lock_bh(&csk->lock);
+ switch (csk->com.state) {
+ case CSK_STATE_ESTABLISHED:
+ if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
+ csk->com.state = CSK_STATE_CLOSING;
+ cxgbit_send_halfclose(csk);
+ } else {
+ csk->com.state = CSK_STATE_ABORTING;
+ cxgbit_send_abort_req(csk);
+ }
+ break;
+ case CSK_STATE_CLOSING:
+ csk->com.state = CSK_STATE_MORIBUND;
+ cxgbit_send_halfclose(csk);
+ break;
+ case CSK_STATE_DEAD:
+ release = true;
+ break;
+ default:
+ pr_err("%s: csk %p; state %d\n",
+ __func__, csk, csk->com.state);
+ }
+ spin_unlock_bh(&csk->lock);
+
+ if (release)
+ cxgbit_put_csk(csk);
+}
+
+void cxgbit_free_conn(struct iscsit_conn *conn)
+{
+ __cxgbit_free_conn(conn->context);
+}
+
+static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
+{
+ csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
+ ((csk->com.remote_addr.ss_family == AF_INET) ?
+ sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
+ sizeof(struct tcphdr);
+ csk->mss = csk->emss;
+ if (TCPOPT_TSTAMP_G(opt))
+ csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
+ if (csk->emss < 128)
+ csk->emss = 128;
+ if (csk->emss & 7)
+ pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
+ TCPOPT_MSS_G(opt), csk->mss, csk->emss);
+ pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
+ csk->mss, csk->emss);
+}
+
+static void cxgbit_free_skb(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+
+ __skb_queue_purge(&csk->txq);
+ __skb_queue_purge(&csk->rxq);
+ __skb_queue_purge(&csk->backlogq);
+ __skb_queue_purge(&csk->ppodq);
+ __skb_queue_purge(&csk->skbq);
+
+ while ((skb = cxgbit_sock_dequeue_wr(csk)))
+ kfree_skb(skb);
+
+ __kfree_skb(csk->lro_hskb);
+}
+
+void _cxgbit_free_csk(struct kref *kref)
+{
+ struct cxgbit_sock *csk;
+ struct cxgbit_device *cdev;
+
+ csk = container_of(kref, struct cxgbit_sock, kref);
+
+ pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
+
+ if (csk->com.local_addr.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
+ &csk->com.local_addr;
+ cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
+ (const u32 *)
+ &sin6->sin6_addr.s6_addr, 1);
+ }
+
+ cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
+ csk->com.local_addr.ss_family);
+ dst_release(csk->dst);
+ cxgb4_l2t_release(csk->l2t);
+
+ cdev = csk->com.cdev;
+ spin_lock_bh(&cdev->cskq.lock);
+ list_del(&csk->list);
+ spin_unlock_bh(&cdev->cskq.lock);
+
+ cxgbit_free_skb(csk);
+ cxgbit_put_cnp(csk->cnp);
+ cxgbit_put_cdev(cdev);
+
+ kfree(csk);
+}
+
+static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
+{
+ unsigned int linkspeed;
+ u8 scale;
+
+ linkspeed = pi->link_cfg.speed;
+ scale = linkspeed / SPEED_10000;
+
+#define CXGBIT_10G_RCV_WIN (256 * 1024)
+ csk->rcv_win = CXGBIT_10G_RCV_WIN;
+ if (scale)
+ csk->rcv_win *= scale;
+ csk->rcv_win = min(csk->rcv_win, RCV_BUFSIZ_M << 10);
+
+#define CXGBIT_10G_SND_WIN (256 * 1024)
+ csk->snd_win = CXGBIT_10G_SND_WIN;
+ if (scale)
+ csk->snd_win *= scale;
+ csk->snd_win = min(csk->snd_win, 512U * 1024);
+
+ pr_debug("%s snd_win %d rcv_win %d\n",
+ __func__, csk->snd_win, csk->rcv_win);
+}
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
+{
+ return ndev->dcbnl_ops->getstate(ndev);
+}
+
+static int cxgbit_select_priority(int pri_mask)
+{
+ if (!pri_mask)
+ return 0;
+
+ return (ffs(pri_mask) - 1);
+}
+
+static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
+{
+ int ret;
+ u8 caps;
+
+ struct dcb_app iscsi_dcb_app = {
+ .protocol = local_port
+ };
+
+ ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
+
+ if (ret)
+ return 0;
+
+ if (caps & DCB_CAP_DCBX_VER_IEEE) {
+ iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM;
+ ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
+ if (!ret) {
+ iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
+ ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
+ }
+ } else if (caps & DCB_CAP_DCBX_VER_CEE) {
+ iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
+
+ ret = dcb_getapp(ndev, &iscsi_dcb_app);
+ }
+
+ pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
+
+ return cxgbit_select_priority(ret);
+}
+#endif
+
+static int
+cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
+ u16 local_port, struct dst_entry *dst,
+ struct cxgbit_device *cdev)
+{
+ struct neighbour *n;
+ int ret, step;
+ struct net_device *ndev;
+ u16 rxq_idx, port_id;
+#ifdef CONFIG_CHELSIO_T4_DCB
+ u8 priority = 0;
+#endif
+
+ n = dst_neigh_lookup(dst, peer_ip);
+ if (!n)
+ return -ENODEV;
+
+ rcu_read_lock();
+ if (!(n->nud_state & NUD_VALID))
+ neigh_event_send(n, NULL);
+
+ ret = -ENOMEM;
+ if (n->dev->flags & IFF_LOOPBACK) {
+ if (iptype == 4)
+ ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
+ else if (IS_ENABLED(CONFIG_IPV6))
+ ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
+ else
+ ndev = NULL;
+
+ if (!ndev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
+ n, ndev, 0);
+ if (!csk->l2t)
+ goto out;
+ csk->mtu = ndev->mtu;
+ csk->tx_chan = cxgb4_port_chan(ndev);
+ csk->smac_idx =
+ ((struct port_info *)netdev_priv(ndev))->smt_idx;
+ step = cdev->lldi.ntxq /
+ cdev->lldi.nchan;
+ csk->txq_idx = cxgb4_port_idx(ndev) * step;
+ step = cdev->lldi.nrxq /
+ cdev->lldi.nchan;
+ csk->ctrlq_idx = cxgb4_port_idx(ndev);
+ csk->rss_qid = cdev->lldi.rxq_ids[
+ cxgb4_port_idx(ndev) * step];
+ csk->port_id = cxgb4_port_idx(ndev);
+ cxgbit_set_tcp_window(csk,
+ (struct port_info *)netdev_priv(ndev));
+ } else {
+ ndev = cxgbit_get_real_dev(n->dev);
+ if (!ndev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+ if (cxgbit_get_iscsi_dcb_state(ndev))
+ priority = cxgbit_get_iscsi_dcb_priority(ndev,
+ local_port);
+
+ csk->dcb_priority = priority;
+
+ csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
+#else
+ csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
+#endif
+ if (!csk->l2t)
+ goto out;
+ port_id = cxgb4_port_idx(ndev);
+ csk->mtu = dst_mtu(dst);
+ csk->tx_chan = cxgb4_port_chan(ndev);
+ csk->smac_idx =
+ ((struct port_info *)netdev_priv(ndev))->smt_idx;
+ step = cdev->lldi.ntxq /
+ cdev->lldi.nports;
+ csk->txq_idx = (port_id * step) +
+ (cdev->selectq[port_id][0]++ % step);
+ csk->ctrlq_idx = cxgb4_port_idx(ndev);
+ step = cdev->lldi.nrxq /
+ cdev->lldi.nports;
+ rxq_idx = (port_id * step) +
+ (cdev->selectq[port_id][1]++ % step);
+ csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
+ csk->port_id = port_id;
+ cxgbit_set_tcp_window(csk,
+ (struct port_info *)netdev_priv(ndev));
+ }
+ ret = 0;
+out:
+ rcu_read_unlock();
+ neigh_release(n);
+ return ret;
+}
+
+int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ int ret = 0;
+
+ if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
+ kfree_skb(skb);
+ pr_err("%s - device not up - dropping\n", __func__);
+ return -EIO;
+ }
+
+ ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
+ if (ret < 0)
+ kfree_skb(skb);
+ return ret < 0 ? ret : 0;
+}
+
+static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
+{
+ u32 len = roundup(sizeof(struct cpl_tid_release), 16);
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ cxgb_mk_tid_release(skb, len, tid, 0);
+ cxgbit_ofld_send(cdev, skb);
+}
+
+int
+cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
+ struct l2t_entry *l2e)
+{
+ int ret = 0;
+
+ if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
+ kfree_skb(skb);
+ pr_err("%s - device not up - dropping\n", __func__);
+ return -EIO;
+ }
+
+ ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
+ if (ret < 0)
+ kfree_skb(skb);
+ return ret < 0 ? ret : 0;
+}
+
+static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ if (csk->com.state != CSK_STATE_ESTABLISHED) {
+ __kfree_skb(skb);
+ return;
+ }
+
+ cxgbit_ofld_send(csk->com.cdev, skb);
+}
+
+/*
+ * CPL connection rx data ack: host ->
+ * Send RX credits through an RX_DATA_ACK CPL message.
+ * Returns the number of credits sent.
+ */
+int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+ u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
+ u32 credit_dack;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -1;
+
+ credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(3) |
+ RX_CREDITS_V(csk->rx_credits);
+
+ cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
+ credit_dack);
+
+ csk->rx_credits = 0;
+
+ spin_lock_bh(&csk->lock);
+ if (csk->lock_owner) {
+ cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
+ __skb_queue_tail(&csk->backlogq, skb);
+ spin_unlock_bh(&csk->lock);
+ return 0;
+ }
+
+ cxgbit_send_rx_credits(csk, skb);
+ spin_unlock_bh(&csk->lock);
+
+ return 0;
+}
+
+#define FLOWC_WR_NPARAMS_MIN 9
+#define FLOWC_WR_NPARAMS_MAX 11
+static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+ u32 len, flowclen;
+ u8 i;
+
+ flowclen = offsetof(struct fw_flowc_wr,
+ mnemval[FLOWC_WR_NPARAMS_MAX]);
+
+ len = max_t(u32, sizeof(struct cpl_abort_req),
+ sizeof(struct cpl_abort_rpl));
+
+ len = max(len, flowclen);
+ len = roundup(len, 16);
+
+ for (i = 0; i < 3; i++) {
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+ __skb_queue_tail(&csk->skbq, skb);
+ }
+
+ skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
+ csk->lro_hskb = skb;
+
+ return 0;
+out:
+ __skb_queue_purge(&csk->skbq);
+ return -ENOMEM;
+}
+
+static void
+cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
+{
+ struct sk_buff *skb;
+ const struct tcphdr *tcph;
+ struct cpl_t5_pass_accept_rpl *rpl5;
+ struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
+ unsigned int len = roundup(sizeof(*rpl5), 16);
+ unsigned int mtu_idx;
+ u64 opt0;
+ u32 opt2, hlen;
+ u32 wscale;
+ u32 win;
+
+ pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+ cxgbit_put_csk(csk);
+ return;
+ }
+
+ rpl5 = __skb_put_zero(skb, len);
+
+ INIT_TP_WR(rpl5, csk->tid);
+ OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
+ csk->tid));
+ cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
+ req->tcpopt.tstamp,
+ (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+ wscale = cxgb_compute_wscale(csk->rcv_win);
+ /*
+ * Specify the largest window that will fit in opt0. The
+ * remainder will be specified in the rx_data_ack.
+ */
+ win = csk->rcv_win >> 10;
+ if (win > RCV_BUFSIZ_M)
+ win = RCV_BUFSIZ_M;
+ opt0 = TCAM_BYPASS_F |
+ WND_SCALE_V(wscale) |
+ MSS_IDX_V(mtu_idx) |
+ L2T_IDX_V(csk->l2t->idx) |
+ TX_CHAN_V(csk->tx_chan) |
+ SMAC_SEL_V(csk->smac_idx) |
+ DSCP_V(csk->tos >> 2) |
+ ULP_MODE_V(ULP_MODE_ISCSI) |
+ RCV_BUFSIZ_V(win);
+
+ opt2 = RX_CHANNEL_V(0) |
+ RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
+
+ if (!is_t5(lldi->adapter_type))
+ opt2 |= RX_FC_DISABLE_F;
+
+ if (req->tcpopt.tstamp)
+ opt2 |= TSTAMPS_EN_F;
+ if (req->tcpopt.sack)
+ opt2 |= SACK_EN_F;
+ if (wscale)
+ opt2 |= WND_SCALE_EN_F;
+
+ hlen = ntohl(req->hdr_len);
+
+ if (is_t5(lldi->adapter_type))
+ tcph = (struct tcphdr *)((u8 *)(req + 1) +
+ ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen));
+ else
+ tcph = (struct tcphdr *)((u8 *)(req + 1) +
+ T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
+
+ if (tcph->ece && tcph->cwr)
+ opt2 |= CCTRL_ECN_V(1);
+
+ opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
+
+ opt2 |= T5_ISS_F;
+ rpl5->iss = cpu_to_be32((get_random_u32() & ~7UL) - 1);
+
+ opt2 |= T5_OPT_2_VALID_F;
+
+ rpl5->opt0 = cpu_to_be64(opt0);
+ rpl5->opt2 = cpu_to_be32(opt2);
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
+ t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
+ cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
+}
+
+static void
+cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbit_sock *csk = NULL;
+ struct cxgbit_np *cnp;
+ struct cpl_pass_accept_req *req = cplhdr(skb);
+ unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
+ struct tid_info *t = cdev->lldi.tids;
+ unsigned int tid = GET_TID(req);
+ u16 peer_mss = ntohs(req->tcpopt.mss);
+ unsigned short hdrs;
+
+ struct dst_entry *dst;
+ __u8 local_ip[16], peer_ip[16];
+ __be16 local_port, peer_port;
+ int ret;
+ int iptype;
+
+ pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
+ __func__, cdev, stid, tid);
+
+ cnp = lookup_stid(t, stid);
+ if (!cnp) {
+ pr_err("%s connect request on invalid stid %d\n",
+ __func__, stid);
+ goto rel_skb;
+ }
+
+ if (cnp->com.state != CSK_STATE_LISTEN) {
+ pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
+ __func__);
+ goto reject;
+ }
+
+ csk = lookup_tid(t, tid);
+ if (csk) {
+ pr_err("%s csk not null tid %u\n",
+ __func__, tid);
+ goto rel_skb;
+ }
+
+ cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
+ peer_ip, &local_port, &peer_port);
+
+ /* Find output route */
+ if (iptype == 4) {
+ pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
+ "lport %d rport %d peer_mss %d\n"
+ , __func__, cnp, tid,
+ local_ip, peer_ip, ntohs(local_port),
+ ntohs(peer_port), peer_mss);
+ dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
+ *(__be32 *)local_ip,
+ *(__be32 *)peer_ip,
+ local_port, peer_port,
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
+ } else {
+ pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
+ "lport %d rport %d peer_mss %d\n"
+ , __func__, cnp, tid,
+ local_ip, peer_ip, ntohs(local_port),
+ ntohs(peer_port), peer_mss);
+ dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
+ local_ip, peer_ip,
+ local_port, peer_port,
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
+ ((struct sockaddr_in6 *)
+ &cnp->com.local_addr)->sin6_scope_id);
+ }
+ if (!dst) {
+ pr_err("%s - failed to find dst entry!\n",
+ __func__);
+ goto reject;
+ }
+
+ csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
+ if (!csk) {
+ dst_release(dst);
+ goto rel_skb;
+ }
+
+ ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
+ dst, cdev);
+ if (ret) {
+ pr_err("%s - failed to allocate l2t entry!\n",
+ __func__);
+ dst_release(dst);
+ kfree(csk);
+ goto reject;
+ }
+
+ kref_init(&csk->kref);
+ init_completion(&csk->com.wr_wait.completion);
+
+ INIT_LIST_HEAD(&csk->accept_node);
+
+ hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
+ sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0);
+ if (peer_mss && csk->mtu > (peer_mss + hdrs))
+ csk->mtu = peer_mss + hdrs;
+
+ csk->com.state = CSK_STATE_CONNECTING;
+ csk->com.cdev = cdev;
+ csk->cnp = cnp;
+ csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+ csk->dst = dst;
+ csk->tid = tid;
+ csk->wr_cred = cdev->lldi.wr_cred -
+ DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
+ csk->wr_max_cred = csk->wr_cred;
+ csk->wr_una_cred = 0;
+
+ if (iptype == 4) {
+ struct sockaddr_in *sin = (struct sockaddr_in *)
+ &csk->com.local_addr;
+ sin->sin_family = AF_INET;
+ sin->sin_port = local_port;
+ sin->sin_addr.s_addr = *(__be32 *)local_ip;
+
+ sin = (struct sockaddr_in *)&csk->com.remote_addr;
+ sin->sin_family = AF_INET;
+ sin->sin_port = peer_port;
+ sin->sin_addr.s_addr = *(__be32 *)peer_ip;
+ } else {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
+ &csk->com.local_addr;
+
+ sin6->sin6_family = PF_INET6;
+ sin6->sin6_port = local_port;
+ memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
+ cxgb4_clip_get(cdev->lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr,
+ 1);
+
+ sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
+ sin6->sin6_family = PF_INET6;
+ sin6->sin6_port = peer_port;
+ memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
+ }
+
+ skb_queue_head_init(&csk->rxq);
+ skb_queue_head_init(&csk->txq);
+ skb_queue_head_init(&csk->ppodq);
+ skb_queue_head_init(&csk->backlogq);
+ skb_queue_head_init(&csk->skbq);
+ cxgbit_sock_reset_wr_list(csk);
+ spin_lock_init(&csk->lock);
+ init_waitqueue_head(&csk->waitq);
+ csk->lock_owner = false;
+
+ if (cxgbit_alloc_csk_skb(csk)) {
+ dst_release(dst);
+ kfree(csk);
+ goto rel_skb;
+ }
+
+ cxgbit_get_cnp(cnp);
+ cxgbit_get_cdev(cdev);
+
+ spin_lock(&cdev->cskq.lock);
+ list_add_tail(&csk->list, &cdev->cskq.list);
+ spin_unlock(&cdev->cskq.lock);
+ cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
+ cxgbit_pass_accept_rpl(csk, req);
+ goto rel_skb;
+
+reject:
+ cxgbit_release_tid(cdev, tid);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static u32
+cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
+ u32 *flowclenp)
+{
+ u32 nparams, flowclen16, flowclen;
+
+ nparams = FLOWC_WR_NPARAMS_MIN;
+
+ if (csk->snd_wscale)
+ nparams++;
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+ nparams++;
+#endif
+ flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
+ flowclen16 = DIV_ROUND_UP(flowclen, 16);
+ flowclen = flowclen16 * 16;
+ /*
+ * Return the number of 16-byte credits used by the flowc request.
+ * Pass back the nparams and actual flowc length if requested.
+ */
+ if (nparamsp)
+ *nparamsp = nparams;
+ if (flowclenp)
+ *flowclenp = flowclen;
+ return flowclen16;
+}
+
+u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
+{
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct fw_flowc_wr *flowc;
+ u32 nparams, flowclen16, flowclen;
+ struct sk_buff *skb;
+ u8 index;
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+ u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
+#endif
+
+ flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
+
+ skb = __skb_dequeue(&csk->skbq);
+ flowc = __skb_put_zero(skb, flowclen);
+
+ flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
+ FW_FLOWC_WR_NPARAMS_V(nparams));
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
+ FW_WR_FLOWID_V(csk->tid));
+ flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
+ flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
+ (csk->com.cdev->lldi.pf));
+ flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
+ flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
+ flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
+ flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
+ flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
+ flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
+ flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
+ flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
+ flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
+ flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
+ flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
+ flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
+ flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
+ flowc->mnemval[7].val = cpu_to_be32(csk->emss);
+
+ flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
+ if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
+ flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
+ else
+ flowc->mnemval[8].val = cpu_to_be32(16384);
+
+ index = 9;
+
+ if (csk->snd_wscale) {
+ flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
+ flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
+ index++;
+ }
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+ flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
+ if (vlan == VLAN_NONE) {
+ pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
+ flowc->mnemval[index].val = cpu_to_be32(0);
+ } else
+ flowc->mnemval[index].val = cpu_to_be32(
+ (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
+#endif
+
+ pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
+ " rcv_seq = %u; snd_win = %u; emss = %u\n",
+ __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
+ csk->rcv_nxt, csk->snd_win, csk->emss);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
+ cxgbit_ofld_send(csk->com.cdev, skb);
+ return flowclen16;
+}
+
+static int
+cxgbit_send_tcb_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ spin_lock_bh(&csk->lock);
+ if (unlikely(csk->com.state != CSK_STATE_ESTABLISHED)) {
+ spin_unlock_bh(&csk->lock);
+ pr_err("%s: csk 0x%p, tid %u, state %u\n",
+ __func__, csk, csk->tid, csk->com.state);
+ __kfree_skb(skb);
+ return -1;
+ }
+
+ cxgbit_get_csk(csk);
+ cxgbit_init_wr_wait(&csk->com.wr_wait);
+ cxgbit_ofld_send(csk->com.cdev, skb);
+ spin_unlock_bh(&csk->lock);
+
+ return 0;
+}
+
+int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+ struct cpl_set_tcb_field *req;
+ u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
+ u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
+ unsigned int len = roundup(sizeof(*req), 16);
+ int ret;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ /* set up ulp submode */
+ req = __skb_put_zero(skb, len);
+
+ INIT_TP_WR(req, csk->tid);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
+ req->word_cookie = htons(0);
+ req->mask = cpu_to_be64(0x3 << 4);
+ req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
+ (dcrc ? ULP_CRC_DATA : 0)) << 4);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
+
+ if (cxgbit_send_tcb_skb(csk, skb))
+ return -1;
+
+ ret = cxgbit_wait_for_reply(csk->com.cdev,
+ &csk->com.wr_wait,
+ csk->tid, 5, __func__);
+ if (ret)
+ return -1;
+
+ return 0;
+}
+
+int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
+{
+ struct sk_buff *skb;
+ struct cpl_set_tcb_field *req;
+ unsigned int len = roundup(sizeof(*req), 16);
+ int ret;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ req = __skb_put_zero(skb, len);
+
+ INIT_TP_WR(req, csk->tid);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
+ req->word_cookie = htons(0);
+ req->mask = cpu_to_be64(0x3 << 8);
+ req->val = cpu_to_be64(pg_idx << 8);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
+
+ if (cxgbit_send_tcb_skb(csk, skb))
+ return -1;
+
+ ret = cxgbit_wait_for_reply(csk->com.cdev,
+ &csk->com.wr_wait,
+ csk->tid, 5, __func__);
+ if (ret)
+ return -1;
+
+ return 0;
+}
+
+static void
+cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cpl_pass_open_rpl *rpl = cplhdr(skb);
+ struct tid_info *t = cdev->lldi.tids;
+ unsigned int stid = GET_TID(rpl);
+ struct cxgbit_np *cnp = lookup_stid(t, stid);
+
+ pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
+ __func__, cnp, stid, rpl->status);
+
+ if (!cnp) {
+ pr_info("%s stid %d lookup failure\n", __func__, stid);
+ goto rel_skb;
+ }
+
+ cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
+ cxgbit_put_cnp(cnp);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void
+cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
+ struct tid_info *t = cdev->lldi.tids;
+ unsigned int stid = GET_TID(rpl);
+ struct cxgbit_np *cnp = lookup_stid(t, stid);
+
+ pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
+ __func__, cnp, stid, rpl->status);
+
+ if (!cnp) {
+ pr_info("%s stid %d lookup failure\n", __func__, stid);
+ goto rel_skb;
+ }
+
+ cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
+ cxgbit_put_cnp(cnp);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void
+cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cpl_pass_establish *req = cplhdr(skb);
+ struct tid_info *t = cdev->lldi.tids;
+ unsigned int tid = GET_TID(req);
+ struct cxgbit_sock *csk;
+ struct cxgbit_np *cnp;
+ u16 tcp_opt = be16_to_cpu(req->tcp_opt);
+ u32 snd_isn = be32_to_cpu(req->snd_isn);
+ u32 rcv_isn = be32_to_cpu(req->rcv_isn);
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find connection for tid %u.\n", tid);
+ goto rel_skb;
+ }
+ cnp = csk->cnp;
+
+ pr_debug("%s: csk %p; tid %u; cnp %p\n",
+ __func__, csk, tid, cnp);
+
+ csk->write_seq = snd_isn;
+ csk->snd_una = snd_isn;
+ csk->snd_nxt = snd_isn;
+
+ csk->rcv_nxt = rcv_isn;
+
+ csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
+ cxgbit_set_emss(csk, tcp_opt);
+ dst_confirm(csk->dst);
+ csk->com.state = CSK_STATE_ESTABLISHED;
+ spin_lock_bh(&cnp->np_accept_lock);
+ list_add_tail(&csk->accept_node, &cnp->np_accept_list);
+ spin_unlock_bh(&cnp->np_accept_lock);
+ complete(&cnp->accept_comp);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ cxgbit_skcb_flags(skb) = 0;
+ spin_lock_bh(&csk->rxq.lock);
+ __skb_queue_tail(&csk->rxq, skb);
+ spin_unlock_bh(&csk->rxq.lock);
+ wake_up(&csk->waitq);
+}
+
+static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ pr_debug("%s: csk %p; tid %u; state %d\n",
+ __func__, csk, csk->tid, csk->com.state);
+
+ switch (csk->com.state) {
+ case CSK_STATE_ESTABLISHED:
+ csk->com.state = CSK_STATE_CLOSING;
+ cxgbit_queue_rx_skb(csk, skb);
+ return;
+ case CSK_STATE_CLOSING:
+ /* simultaneous close */
+ csk->com.state = CSK_STATE_MORIBUND;
+ break;
+ case CSK_STATE_MORIBUND:
+ csk->com.state = CSK_STATE_DEAD;
+ cxgbit_put_csk(csk);
+ break;
+ case CSK_STATE_ABORTING:
+ break;
+ default:
+ pr_info("%s: cpl_peer_close in bad state %d\n",
+ __func__, csk->com.state);
+ }
+
+ __kfree_skb(skb);
+}
+
+static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ pr_debug("%s: csk %p; tid %u; state %d\n",
+ __func__, csk, csk->tid, csk->com.state);
+
+ switch (csk->com.state) {
+ case CSK_STATE_CLOSING:
+ csk->com.state = CSK_STATE_MORIBUND;
+ break;
+ case CSK_STATE_MORIBUND:
+ csk->com.state = CSK_STATE_DEAD;
+ cxgbit_put_csk(csk);
+ break;
+ case CSK_STATE_ABORTING:
+ case CSK_STATE_DEAD:
+ break;
+ default:
+ pr_info("%s: cpl_close_con_rpl in bad state %d\n",
+ __func__, csk->com.state);
+ }
+
+ __kfree_skb(skb);
+}
+
+static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ struct cpl_abort_req_rss *hdr = cplhdr(skb);
+ unsigned int tid = GET_TID(hdr);
+ struct sk_buff *rpl_skb;
+ bool release = false;
+ bool wakeup_thread = false;
+ u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
+
+ pr_debug("%s: csk %p; tid %u; state %d\n",
+ __func__, csk, tid, csk->com.state);
+
+ if (cxgb_is_neg_adv(hdr->status)) {
+ pr_err("%s: got neg advise %d on tid %u\n",
+ __func__, hdr->status, tid);
+ goto rel_skb;
+ }
+
+ switch (csk->com.state) {
+ case CSK_STATE_CONNECTING:
+ case CSK_STATE_MORIBUND:
+ csk->com.state = CSK_STATE_DEAD;
+ release = true;
+ break;
+ case CSK_STATE_ESTABLISHED:
+ csk->com.state = CSK_STATE_DEAD;
+ wakeup_thread = true;
+ break;
+ case CSK_STATE_CLOSING:
+ csk->com.state = CSK_STATE_DEAD;
+ if (!csk->conn)
+ release = true;
+ break;
+ case CSK_STATE_ABORTING:
+ break;
+ default:
+ pr_info("%s: cpl_abort_req_rss in bad state %d\n",
+ __func__, csk->com.state);
+ csk->com.state = CSK_STATE_DEAD;
+ }
+
+ __skb_queue_purge(&csk->txq);
+
+ if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
+ cxgbit_send_tx_flowc_wr(csk);
+
+ rpl_skb = __skb_dequeue(&csk->skbq);
+
+ cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
+ cxgbit_ofld_send(csk->com.cdev, rpl_skb);
+
+ if (wakeup_thread) {
+ cxgbit_queue_rx_skb(csk, skb);
+ return;
+ }
+
+ if (release)
+ cxgbit_put_csk(csk);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+
+ pr_debug("%s: csk %p; tid %u; state %d\n",
+ __func__, csk, csk->tid, csk->com.state);
+
+ switch (csk->com.state) {
+ case CSK_STATE_ABORTING:
+ csk->com.state = CSK_STATE_DEAD;
+ if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
+ cxgbit_wake_up(&csk->com.wr_wait, __func__,
+ rpl->status);
+ cxgbit_put_csk(csk);
+ break;
+ default:
+ pr_info("%s: cpl_abort_rpl_rss in state %d\n",
+ __func__, csk->com.state);
+ }
+
+ __kfree_skb(skb);
+}
+
+static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
+{
+ const struct sk_buff *skb = csk->wr_pending_head;
+ u32 credit = 0;
+
+ if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
+ pr_err("csk 0x%p, tid %u, credit %u > %u\n",
+ csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
+ return true;
+ }
+
+ while (skb) {
+ credit += (__force u32)skb->csum;
+ skb = cxgbit_skcb_tx_wr_next(skb);
+ }
+
+ if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
+ pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
+ csk, csk->tid, csk->wr_cred,
+ credit, csk->wr_max_cred);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
+ u32 credits = rpl->credits;
+ u32 snd_una = ntohl(rpl->snd_una);
+
+ csk->wr_cred += credits;
+ if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
+ csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
+
+ while (credits) {
+ struct sk_buff *p = cxgbit_sock_peek_wr(csk);
+ u32 csum;
+
+ if (unlikely(!p)) {
+ pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
+ csk, csk->tid, credits,
+ csk->wr_cred, csk->wr_una_cred);
+ break;
+ }
+
+ csum = (__force u32)p->csum;
+ if (unlikely(credits < csum)) {
+ pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
+ csk, csk->tid,
+ credits, csk->wr_cred, csk->wr_una_cred,
+ csum);
+ p->csum = (__force __wsum)(csum - credits);
+ break;
+ }
+
+ cxgbit_sock_dequeue_wr(csk);
+ credits -= csum;
+ kfree_skb(p);
+ }
+
+ if (unlikely(cxgbit_credit_err(csk))) {
+ cxgbit_queue_rx_skb(csk, skb);
+ return;
+ }
+
+ if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
+ if (unlikely(before(snd_una, csk->snd_una))) {
+ pr_warn("csk 0x%p,%u, snd_una %u/%u.",
+ csk, csk->tid, snd_una,
+ csk->snd_una);
+ goto rel_skb;
+ }
+
+ if (csk->snd_una != snd_una) {
+ csk->snd_una = snd_una;
+ dst_confirm(csk->dst);
+ }
+ }
+
+ if (skb_queue_len(&csk->txq))
+ cxgbit_push_tx_frames(csk);
+
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbit_sock *csk;
+ struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
+ unsigned int tid = GET_TID(rpl);
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+ struct tid_info *t = lldi->tids;
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find connection for tid %u.\n", tid);
+ goto rel_skb;
+ } else {
+ cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
+ }
+
+ cxgbit_put_csk(csk);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbit_sock *csk;
+ struct cpl_rx_data *cpl = cplhdr(skb);
+ unsigned int tid = GET_TID(cpl);
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+ struct tid_info *t = lldi->tids;
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find conn. for tid %u.\n", tid);
+ goto rel_skb;
+ }
+
+ cxgbit_queue_rx_skb(csk, skb);
+ return;
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void
+__cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ spin_lock(&csk->lock);
+ if (csk->lock_owner) {
+ __skb_queue_tail(&csk->backlogq, skb);
+ spin_unlock(&csk->lock);
+ return;
+ }
+
+ cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
+ spin_unlock(&csk->lock);
+}
+
+static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ cxgbit_get_csk(csk);
+ __cxgbit_process_rx_cpl(csk, skb);
+ cxgbit_put_csk(csk);
+}
+
+static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbit_sock *csk;
+ struct cpl_tx_data *cpl = cplhdr(skb);
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+ struct tid_info *t = lldi->tids;
+ unsigned int tid = GET_TID(cpl);
+ u8 opcode = cxgbit_skcb_rx_opcode(skb);
+ bool ref = true;
+
+ switch (opcode) {
+ case CPL_FW4_ACK:
+ cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
+ ref = false;
+ break;
+ case CPL_PEER_CLOSE:
+ cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
+ break;
+ case CPL_CLOSE_CON_RPL:
+ cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
+ break;
+ case CPL_ABORT_REQ_RSS:
+ cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
+ break;
+ case CPL_ABORT_RPL_RSS:
+ cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
+ break;
+ default:
+ goto rel_skb;
+ }
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find conn. for tid %u.\n", tid);
+ goto rel_skb;
+ }
+
+ if (ref)
+ cxgbit_process_rx_cpl(csk, skb);
+ else
+ __cxgbit_process_rx_cpl(csk, skb);
+
+ return;
+rel_skb:
+ __kfree_skb(skb);
+}
+
+cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
+ [CPL_PASS_OPEN_RPL] = cxgbit_pass_open_rpl,
+ [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
+ [CPL_PASS_ACCEPT_REQ] = cxgbit_pass_accept_req,
+ [CPL_PASS_ESTABLISH] = cxgbit_pass_establish,
+ [CPL_SET_TCB_RPL] = cxgbit_set_tcb_rpl,
+ [CPL_RX_DATA] = cxgbit_rx_data,
+ [CPL_FW4_ACK] = cxgbit_rx_cpl,
+ [CPL_PEER_CLOSE] = cxgbit_rx_cpl,
+ [CPL_CLOSE_CON_RPL] = cxgbit_rx_cpl,
+ [CPL_ABORT_REQ_RSS] = cxgbit_rx_cpl,
+ [CPL_ABORT_RPL_RSS] = cxgbit_rx_cpl,
+};
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
new file mode 100644
index 000000000000..17fd0d8cc490
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ */
+
+#include "cxgbit.h"
+
+static void
+cxgbit_set_one_ppod(struct cxgbi_pagepod *ppod,
+ struct cxgbi_task_tag_info *ttinfo,
+ struct scatterlist **sg_pp, unsigned int *sg_off)
+{
+ struct scatterlist *sg = sg_pp ? *sg_pp : NULL;
+ unsigned int offset = sg_off ? *sg_off : 0;
+ dma_addr_t addr = 0UL;
+ unsigned int len = 0;
+ int i;
+
+ memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr));
+
+ if (sg) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ }
+
+ for (i = 0; i < PPOD_PAGES_MAX; i++) {
+ if (sg) {
+ ppod->addr[i] = cpu_to_be64(addr + offset);
+ offset += PAGE_SIZE;
+ if (offset == (len + sg->offset)) {
+ offset = 0;
+ sg = sg_next(sg);
+ if (sg) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ }
+ }
+ } else {
+ ppod->addr[i] = 0ULL;
+ }
+ }
+
+ /*
+ * the fifth address needs to be repeated in the next ppod, so do
+ * not move sg
+ */
+ if (sg_pp) {
+ *sg_pp = sg;
+ *sg_off = offset;
+ }
+
+ if (offset == len) {
+ offset = 0;
+ if (sg) {
+ sg = sg_next(sg);
+ if (sg)
+ addr = sg_dma_address(sg);
+ }
+ }
+ ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL;
+}
+
+static struct sk_buff *
+cxgbit_ppod_init_idata(struct cxgbit_device *cdev, struct cxgbi_ppm *ppm,
+ unsigned int idx, unsigned int npods, unsigned int tid)
+{
+ struct ulp_mem_io *req;
+ struct ulptx_idata *idata;
+ unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
+ unsigned int dlen = npods << PPOD_SIZE_SHIFT;
+ unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
+ sizeof(struct ulptx_idata) + dlen, 16);
+ struct sk_buff *skb;
+
+ skb = alloc_skb(wr_len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ req = __skb_put(skb, wr_len);
+ INIT_ULPTX_WR(req, wr_len, 0, tid);
+ req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
+ FW_WR_ATOMIC_V(0));
+ req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
+ ULP_MEMIO_ORDER_V(0) |
+ T5_ULP_MEMIO_IMM_V(1));
+ req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
+ req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
+ req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
+
+ idata = (struct ulptx_idata *)(req + 1);
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
+ idata->len = htonl(dlen);
+
+ return skb;
+}
+
+static int
+cxgbit_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
+ struct cxgbi_task_tag_info *ttinfo, unsigned int idx,
+ unsigned int npods, struct scatterlist **sg_pp,
+ unsigned int *sg_off)
+{
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct sk_buff *skb;
+ struct ulp_mem_io *req;
+ struct ulptx_idata *idata;
+ struct cxgbi_pagepod *ppod;
+ unsigned int i;
+
+ skb = cxgbit_ppod_init_idata(cdev, ppm, idx, npods, csk->tid);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (struct ulp_mem_io *)skb->data;
+ idata = (struct ulptx_idata *)(req + 1);
+ ppod = (struct cxgbi_pagepod *)(idata + 1);
+
+ for (i = 0; i < npods; i++, ppod++)
+ cxgbit_set_one_ppod(ppod, ttinfo, sg_pp, sg_off);
+
+ __skb_queue_tail(&csk->ppodq, skb);
+
+ return 0;
+}
+
+static int
+cxgbit_ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
+ struct cxgbi_task_tag_info *ttinfo)
+{
+ unsigned int pidx = ttinfo->idx;
+ unsigned int npods = ttinfo->npods;
+ unsigned int i, cnt;
+ struct scatterlist *sg = ttinfo->sgl;
+ unsigned int offset = 0;
+ int ret = 0;
+
+ for (i = 0; i < npods; i += cnt, pidx += cnt) {
+ cnt = npods - i;
+
+ if (cnt > ULPMEM_IDATA_MAX_NPPODS)
+ cnt = ULPMEM_IDATA_MAX_NPPODS;
+
+ ret = cxgbit_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
+ &sg, &offset);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int cxgbit_ddp_sgl_check(struct scatterlist *sg,
+ unsigned int nents)
+{
+ unsigned int last_sgidx = nents - 1;
+ unsigned int i;
+
+ for (i = 0; i < nents; i++, sg = sg_next(sg)) {
+ unsigned int len = sg->length + sg->offset;
+
+ if ((sg->offset & 0x3) || (i && sg->offset) ||
+ ((i != last_sgidx) && (len != PAGE_SIZE))) {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo,
+ unsigned int xferlen)
+{
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+ struct scatterlist *sgl = ttinfo->sgl;
+ unsigned int sgcnt = ttinfo->nents;
+ unsigned int sg_offset = sgl->offset;
+ int ret;
+
+ if ((xferlen < DDP_THRESHOLD) || (!sgcnt)) {
+ pr_debug("ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
+ ppm, ppm->tformat.pgsz_idx_dflt,
+ xferlen, ttinfo->nents);
+ return -EINVAL;
+ }
+
+ if (cxgbit_ddp_sgl_check(sgl, sgcnt) < 0)
+ return -EINVAL;
+
+ ttinfo->nr_pages = (xferlen + sgl->offset +
+ (1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT;
+
+ /*
+ * the ddp tag will be used for the ttt in the outgoing r2t pdu
+ */
+ ret = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx,
+ &ttinfo->tag, 0);
+ if (ret < 0)
+ return ret;
+ ttinfo->npods = ret;
+
+ sgl->offset = 0;
+ ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
+ sgl->offset = sg_offset;
+ if (!ret) {
+ pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
+ __func__, 0, xferlen, sgcnt);
+ goto rel_ppods;
+ }
+
+ cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset,
+ xferlen, &ttinfo->hdr);
+
+ ret = cxgbit_ddp_set_map(ppm, csk, ttinfo);
+ if (ret < 0) {
+ __skb_queue_purge(&csk->ppodq);
+ dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
+ goto rel_ppods;
+ }
+
+ return 0;
+
+rel_ppods:
+ cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
+ return -EINVAL;
+}
+
+void
+cxgbit_get_r2t_ttt(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
+ struct iscsi_r2t *r2t)
+{
+ struct cxgbit_sock *csk = conn->context;
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
+ struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
+ int ret;
+
+ if ((!ccmd->setup_ddp) ||
+ (!test_bit(CSK_DDP_ENABLE, &csk->com.flags)))
+ goto out;
+
+ ccmd->setup_ddp = false;
+
+ ttinfo->sgl = cmd->se_cmd.t_data_sg;
+ ttinfo->nents = cmd->se_cmd.t_data_nents;
+
+ ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length);
+ if (ret < 0) {
+ pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
+ csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
+
+ ttinfo->sgl = NULL;
+ ttinfo->nents = 0;
+ } else {
+ ccmd->release = true;
+ }
+out:
+ pr_debug("cdev 0x%p, cmd 0x%p, tag 0x%x\n", cdev, cmd, ttinfo->tag);
+ r2t->targ_xfer_tag = ttinfo->tag;
+}
+
+void cxgbit_unmap_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
+{
+ struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
+
+ if (ccmd->release) {
+ if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
+ put_page(sg_page(&ccmd->sg));
+ } else {
+ struct cxgbit_sock *csk = conn->context;
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+ struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
+
+ /* Abort the TCP conn if DDP is not complete to
+ * avoid any possibility of DDP after freeing
+ * the cmd.
+ */
+ if (unlikely(cmd->write_data_done !=
+ cmd->se_cmd.data_length))
+ cxgbit_abort_conn(csk);
+
+ if (unlikely(ttinfo->sgl)) {
+ dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
+ ttinfo->nents, DMA_FROM_DEVICE);
+ ttinfo->nents = 0;
+ ttinfo->sgl = NULL;
+ }
+ cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
+ }
+ ccmd->release = false;
+ }
+}
+
+int cxgbit_ddp_init(struct cxgbit_device *cdev)
+{
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+ struct net_device *ndev = cdev->lldi.ports[0];
+ struct cxgbi_tag_format tformat;
+ int ret, i;
+
+ if (!lldi->vr->iscsi.size) {
+ pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
+ return -EACCES;
+ }
+
+ memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
+ for (i = 0; i < 4; i++)
+ tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
+ & 0xF;
+ cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
+
+ ret = cxgbi_ppm_init(lldi->iscsi_ppm, cdev->lldi.ports[0],
+ cdev->lldi.pdev, &cdev->lldi, &tformat,
+ lldi->vr->iscsi.size, lldi->iscsi_llimit,
+ lldi->vr->iscsi.start, 2,
+ lldi->vr->ppod_edram.start,
+ lldi->vr->ppod_edram.size);
+ if (ret >= 0) {
+ struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*lldi->iscsi_ppm);
+
+ if ((ppm->tformat.pgsz_idx_dflt < DDP_PGIDX_MAX) &&
+ (ppm->ppmax >= 1024))
+ set_bit(CDEV_DDP_ENABLE, &cdev->flags);
+ ret = 0;
+ }
+
+ return ret;
+}
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_lro.h b/drivers/target/iscsi/cxgbit/cxgbit_lro.h
new file mode 100644
index 000000000000..dcaed3a1d23f
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_lro.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#ifndef __CXGBIT_LRO_H__
+#define __CXGBIT_LRO_H__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#define LRO_FLUSH_LEN_MAX 65535
+
+struct cxgbit_lro_cb {
+ struct cxgbit_sock *csk;
+ u32 pdu_totallen;
+ u32 offset;
+ u8 pdu_idx;
+ bool complete;
+};
+
+enum cxgbit_pducb_flags {
+ PDUCBF_RX_HDR = (1 << 0), /* received pdu header */
+ PDUCBF_RX_DATA = (1 << 1), /* received pdu payload */
+ PDUCBF_RX_STATUS = (1 << 2), /* received ddp status */
+ PDUCBF_RX_DATA_DDPD = (1 << 3), /* pdu payload ddp'd */
+ PDUCBF_RX_DDP_CMP = (1 << 4), /* ddp completion */
+ PDUCBF_RX_HCRC_ERR = (1 << 5), /* header digest error */
+ PDUCBF_RX_DCRC_ERR = (1 << 6), /* data digest error */
+};
+
+struct cxgbit_lro_pdu_cb {
+ u8 flags;
+ u8 frags;
+ u8 hfrag_idx;
+ u8 nr_dfrags;
+ u8 dfrag_idx;
+ bool complete;
+ u32 seq;
+ u32 pdulen;
+ u32 hlen;
+ u32 dlen;
+ u32 doffset;
+ u32 ddigest;
+ void *hdr;
+};
+
+#define LRO_SKB_MAX_HEADROOM \
+ (sizeof(struct cxgbit_lro_cb) + \
+ (MAX_SKB_FRAGS * sizeof(struct cxgbit_lro_pdu_cb)))
+
+#define LRO_SKB_MIN_HEADROOM \
+ (sizeof(struct cxgbit_lro_cb) + \
+ sizeof(struct cxgbit_lro_pdu_cb))
+
+#define cxgbit_skb_lro_cb(skb) ((struct cxgbit_lro_cb *)skb->data)
+#define cxgbit_skb_lro_pdu_cb(skb, i) \
+ ((struct cxgbit_lro_pdu_cb *)(skb->data + sizeof(struct cxgbit_lro_cb) \
+ + (i * sizeof(struct cxgbit_lro_pdu_cb))))
+
+#define CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */
+#define CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT 19 /* pad error */
+#define CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */
+#define CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */
+
+#endif /*__CXGBIT_LRO_H_*/
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
new file mode 100644
index 000000000000..2c1950df3b3e
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -0,0 +1,746 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ */
+
+#define DRV_NAME "cxgbit"
+#define DRV_VERSION "1.0.0-ko"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
+#include "cxgbit.h"
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+#include <net/dcbevent.h>
+#include "cxgb4_dcb.h"
+#endif
+
+LIST_HEAD(cdev_list_head);
+/* cdev list lock */
+DEFINE_MUTEX(cdev_list_lock);
+
+void _cxgbit_free_cdev(struct kref *kref)
+{
+ struct cxgbit_device *cdev;
+
+ cdev = container_of(kref, struct cxgbit_device, kref);
+
+ cxgbi_ppm_release(cdev2ppm(cdev));
+ kfree(cdev);
+}
+
+static void cxgbit_set_mdsl(struct cxgbit_device *cdev)
+{
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+ u32 mdsl;
+
+#define CXGBIT_T5_MAX_PDU_LEN 16224
+#define CXGBIT_PDU_NONPAYLOAD_LEN 312 /* 48(BHS) + 256(AHS) + 8(Digest) */
+ if (is_t5(lldi->adapter_type)) {
+ mdsl = min_t(u32, lldi->iscsi_iolen - CXGBIT_PDU_NONPAYLOAD_LEN,
+ CXGBIT_T5_MAX_PDU_LEN - CXGBIT_PDU_NONPAYLOAD_LEN);
+ } else {
+ mdsl = lldi->iscsi_iolen - CXGBIT_PDU_NONPAYLOAD_LEN;
+ mdsl = min(mdsl, 16384U);
+ }
+
+ mdsl = round_down(mdsl, 4);
+ mdsl = min_t(u32, mdsl, 4 * PAGE_SIZE);
+ mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE);
+
+ cdev->mdsl = mdsl;
+}
+
+static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi)
+{
+ struct cxgbit_device *cdev;
+
+ if (is_t4(lldi->adapter_type))
+ return ERR_PTR(-ENODEV);
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&cdev->kref);
+ spin_lock_init(&cdev->np_lock);
+
+ cdev->lldi = *lldi;
+
+ cxgbit_set_mdsl(cdev);
+
+ if (cxgbit_ddp_init(cdev) < 0) {
+ kfree(cdev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!test_bit(CDEV_DDP_ENABLE, &cdev->flags))
+ pr_info("cdev %s ddp init failed\n",
+ pci_name(lldi->pdev));
+
+ if (lldi->fw_vers >= 0x10d2b00)
+ set_bit(CDEV_ISO_ENABLE, &cdev->flags);
+
+ spin_lock_init(&cdev->cskq.lock);
+ INIT_LIST_HEAD(&cdev->cskq.list);
+
+ mutex_lock(&cdev_list_lock);
+ list_add_tail(&cdev->list, &cdev_list_head);
+ mutex_unlock(&cdev_list_lock);
+
+ pr_info("cdev %s added for iSCSI target transport\n",
+ pci_name(lldi->pdev));
+
+ return cdev;
+}
+
+static void cxgbit_close_conn(struct cxgbit_device *cdev)
+{
+ struct cxgbit_sock *csk;
+ struct sk_buff *skb;
+ bool wakeup_thread = false;
+
+ spin_lock_bh(&cdev->cskq.lock);
+ list_for_each_entry(csk, &cdev->cskq.list, list) {
+ skb = alloc_skb(0, GFP_ATOMIC);
+ if (!skb)
+ continue;
+
+ spin_lock_bh(&csk->rxq.lock);
+ __skb_queue_tail(&csk->rxq, skb);
+ if (skb_queue_len(&csk->rxq) == 1)
+ wakeup_thread = true;
+ spin_unlock_bh(&csk->rxq.lock);
+
+ if (wakeup_thread) {
+ wake_up(&csk->waitq);
+ wakeup_thread = false;
+ }
+ }
+ spin_unlock_bh(&cdev->cskq.lock);
+}
+
+static void cxgbit_detach_cdev(struct cxgbit_device *cdev)
+{
+ bool free_cdev = false;
+
+ spin_lock_bh(&cdev->cskq.lock);
+ if (list_empty(&cdev->cskq.list))
+ free_cdev = true;
+ spin_unlock_bh(&cdev->cskq.lock);
+
+ if (free_cdev) {
+ mutex_lock(&cdev_list_lock);
+ list_del(&cdev->list);
+ mutex_unlock(&cdev_list_lock);
+
+ cxgbit_put_cdev(cdev);
+ } else {
+ cxgbit_close_conn(cdev);
+ }
+}
+
+static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state)
+{
+ struct cxgbit_device *cdev = handle;
+
+ switch (state) {
+ case CXGB4_STATE_UP:
+ set_bit(CDEV_STATE_UP, &cdev->flags);
+ pr_info("cdev %s state UP.\n", pci_name(cdev->lldi.pdev));
+ break;
+ case CXGB4_STATE_START_RECOVERY:
+ clear_bit(CDEV_STATE_UP, &cdev->flags);
+ cxgbit_close_conn(cdev);
+ pr_info("cdev %s state RECOVERY.\n", pci_name(cdev->lldi.pdev));
+ break;
+ case CXGB4_STATE_DOWN:
+ pr_info("cdev %s state DOWN.\n", pci_name(cdev->lldi.pdev));
+ break;
+ case CXGB4_STATE_DETACH:
+ clear_bit(CDEV_STATE_UP, &cdev->flags);
+ pr_info("cdev %s state DETACH.\n", pci_name(cdev->lldi.pdev));
+ cxgbit_detach_cdev(cdev);
+ break;
+ default:
+ pr_info("cdev %s unknown state %d.\n",
+ pci_name(cdev->lldi.pdev), state);
+ break;
+ }
+ return 0;
+}
+
+static void
+cxgbit_process_ddpvld(struct cxgbit_sock *csk, struct cxgbit_lro_pdu_cb *pdu_cb,
+ u32 ddpvld)
+{
+
+ if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) {
+ pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", csk->tid, ddpvld);
+ pdu_cb->flags |= PDUCBF_RX_HCRC_ERR;
+ }
+
+ if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) {
+ pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", csk->tid, ddpvld);
+ pdu_cb->flags |= PDUCBF_RX_DCRC_ERR;
+ }
+
+ if (ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT))
+ pr_info("tid 0x%x, status 0x%x, pad bad.\n", csk->tid, ddpvld);
+
+ if ((ddpvld & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) &&
+ (!(pdu_cb->flags & PDUCBF_RX_DATA))) {
+ pdu_cb->flags |= PDUCBF_RX_DATA_DDPD;
+ }
+}
+
+static void
+cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp)
+{
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
+ lro_cb->pdu_idx);
+ struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1);
+
+ cxgbit_process_ddpvld(lro_cb->csk, pdu_cb, be32_to_cpu(cpl->ddpvld));
+
+ pdu_cb->flags |= PDUCBF_RX_STATUS;
+ pdu_cb->ddigest = ntohl(cpl->ulp_crc);
+ pdu_cb->pdulen = ntohs(cpl->len);
+
+ if (pdu_cb->flags & PDUCBF_RX_HDR)
+ pdu_cb->complete = true;
+
+ lro_cb->pdu_totallen += pdu_cb->pdulen;
+ lro_cb->complete = true;
+ lro_cb->pdu_idx++;
+}
+
+static void
+cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl,
+ unsigned int offset)
+{
+ u8 skb_frag_idx = skb_shinfo(skb)->nr_frags;
+ u8 i;
+
+ /* usually there's just one frag */
+ __skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page,
+ gl->frags[0].offset + offset,
+ gl->frags[0].size - offset);
+ for (i = 1; i < gl->nfrags; i++)
+ __skb_fill_page_desc(skb, skb_frag_idx + i,
+ gl->frags[i].page,
+ gl->frags[i].offset,
+ gl->frags[i].size);
+
+ skb_shinfo(skb)->nr_frags += gl->nfrags;
+
+ /* get a reference to the last page, we don't own it */
+ get_page(gl->frags[gl->nfrags - 1].page);
+}
+
+static void
+cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl)
+{
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
+ lro_cb->pdu_idx);
+ u32 len, offset;
+
+ if (op == CPL_ISCSI_HDR) {
+ struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)gl->va;
+
+ offset = sizeof(struct cpl_iscsi_hdr);
+ pdu_cb->flags |= PDUCBF_RX_HDR;
+ pdu_cb->seq = ntohl(cpl->seq);
+ len = ntohs(cpl->len);
+ pdu_cb->hdr = gl->va + offset;
+ pdu_cb->hlen = len;
+ pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
+
+ if (unlikely(gl->nfrags > 1))
+ cxgbit_skcb_flags(skb) = 0;
+
+ lro_cb->complete = false;
+ } else if (op == CPL_ISCSI_DATA) {
+ struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va;
+
+ offset = sizeof(struct cpl_iscsi_data);
+ pdu_cb->flags |= PDUCBF_RX_DATA;
+ len = ntohs(cpl->len);
+ pdu_cb->dlen = len;
+ pdu_cb->doffset = lro_cb->offset;
+ pdu_cb->nr_dfrags = gl->nfrags;
+ pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags;
+ lro_cb->complete = false;
+ } else {
+ struct cpl_rx_iscsi_cmp *cpl;
+
+ cpl = (struct cpl_rx_iscsi_cmp *)gl->va;
+ offset = sizeof(struct cpl_rx_iscsi_cmp);
+ pdu_cb->flags |= (PDUCBF_RX_HDR | PDUCBF_RX_STATUS);
+ len = be16_to_cpu(cpl->len);
+ pdu_cb->hdr = gl->va + offset;
+ pdu_cb->hlen = len;
+ pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
+ pdu_cb->ddigest = be32_to_cpu(cpl->ulp_crc);
+ pdu_cb->pdulen = ntohs(cpl->len);
+
+ if (unlikely(gl->nfrags > 1))
+ cxgbit_skcb_flags(skb) = 0;
+
+ cxgbit_process_ddpvld(lro_cb->csk, pdu_cb,
+ be32_to_cpu(cpl->ddpvld));
+
+ if (pdu_cb->flags & PDUCBF_RX_DATA_DDPD) {
+ pdu_cb->flags |= PDUCBF_RX_DDP_CMP;
+ pdu_cb->complete = true;
+ } else if (pdu_cb->flags & PDUCBF_RX_DATA) {
+ pdu_cb->complete = true;
+ }
+
+ lro_cb->pdu_totallen += pdu_cb->hlen + pdu_cb->dlen;
+ lro_cb->complete = true;
+ lro_cb->pdu_idx++;
+ }
+
+ cxgbit_copy_frags(skb, gl, offset);
+
+ pdu_cb->frags += gl->nfrags;
+ lro_cb->offset += len;
+ skb->len += len;
+ skb->data_len += len;
+ skb->truesize += len;
+}
+
+static struct sk_buff *
+cxgbit_lro_init_skb(struct cxgbit_sock *csk, u8 op, const struct pkt_gl *gl,
+ const __be64 *rsp, struct napi_struct *napi)
+{
+ struct sk_buff *skb;
+ struct cxgbit_lro_cb *lro_cb;
+
+ skb = napi_alloc_skb(napi, LRO_SKB_MAX_HEADROOM);
+
+ if (unlikely(!skb))
+ return NULL;
+
+ memset(skb->data, 0, LRO_SKB_MAX_HEADROOM);
+
+ cxgbit_skcb_flags(skb) |= SKCBF_RX_LRO;
+
+ lro_cb = cxgbit_skb_lro_cb(skb);
+
+ cxgbit_get_csk(csk);
+
+ lro_cb->csk = csk;
+
+ return skb;
+}
+
+static void cxgbit_queue_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ bool wakeup_thread = false;
+
+ spin_lock(&csk->rxq.lock);
+ __skb_queue_tail(&csk->rxq, skb);
+ if (skb_queue_len(&csk->rxq) == 1)
+ wakeup_thread = true;
+ spin_unlock(&csk->rxq.lock);
+
+ if (wakeup_thread)
+ wake_up(&csk->waitq);
+}
+
+static void cxgbit_lro_flush(struct t4_lro_mgr *lro_mgr, struct sk_buff *skb)
+{
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ struct cxgbit_sock *csk = lro_cb->csk;
+
+ csk->lro_skb = NULL;
+
+ __skb_unlink(skb, &lro_mgr->lroq);
+ cxgbit_queue_lro_skb(csk, skb);
+
+ cxgbit_put_csk(csk);
+
+ lro_mgr->lro_pkts++;
+ lro_mgr->lro_session_cnt--;
+}
+
+static void cxgbit_uld_lro_flush(struct t4_lro_mgr *lro_mgr)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_peek(&lro_mgr->lroq)))
+ cxgbit_lro_flush(lro_mgr, skb);
+}
+
+static int
+cxgbit_lro_receive(struct cxgbit_sock *csk, u8 op, const __be64 *rsp,
+ const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
+ struct napi_struct *napi)
+{
+ struct sk_buff *skb;
+ struct cxgbit_lro_cb *lro_cb;
+
+ if (!csk) {
+ pr_err("%s: csk NULL, op 0x%x.\n", __func__, op);
+ goto out;
+ }
+
+ if (csk->lro_skb)
+ goto add_packet;
+
+start_lro:
+ if (lro_mgr->lro_session_cnt >= MAX_LRO_SESSIONS) {
+ cxgbit_uld_lro_flush(lro_mgr);
+ goto start_lro;
+ }
+
+ skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi);
+ if (unlikely(!skb))
+ goto out;
+
+ csk->lro_skb = skb;
+
+ __skb_queue_tail(&lro_mgr->lroq, skb);
+ lro_mgr->lro_session_cnt++;
+
+add_packet:
+ skb = csk->lro_skb;
+ lro_cb = cxgbit_skb_lro_cb(skb);
+
+ if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) >
+ MAX_SKB_FRAGS) || (lro_cb->pdu_totallen >= LRO_FLUSH_LEN_MAX))) ||
+ (lro_cb->pdu_idx >= MAX_SKB_FRAGS)) {
+ cxgbit_lro_flush(lro_mgr, skb);
+ goto start_lro;
+ }
+
+ if (gl)
+ cxgbit_lro_add_packet_gl(skb, op, gl);
+ else
+ cxgbit_lro_add_packet_rsp(skb, op, rsp);
+
+ lro_mgr->lro_merged++;
+
+ return 0;
+
+out:
+ return -1;
+}
+
+static int
+cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
+ const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
+ struct napi_struct *napi)
+{
+ struct cxgbit_device *cdev = hndl;
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+ struct cpl_tx_data *rpl = NULL;
+ struct cxgbit_sock *csk = NULL;
+ unsigned int tid = 0;
+ struct sk_buff *skb;
+ unsigned int op = *(u8 *)rsp;
+ bool lro_flush = true;
+
+ switch (op) {
+ case CPL_ISCSI_HDR:
+ case CPL_ISCSI_DATA:
+ case CPL_RX_ISCSI_CMP:
+ case CPL_RX_ISCSI_DDP:
+ case CPL_FW4_ACK:
+ lro_flush = false;
+ fallthrough;
+ case CPL_ABORT_RPL_RSS:
+ case CPL_PASS_ESTABLISH:
+ case CPL_PEER_CLOSE:
+ case CPL_CLOSE_CON_RPL:
+ case CPL_ABORT_REQ_RSS:
+ case CPL_SET_TCB_RPL:
+ case CPL_RX_DATA:
+ rpl = gl ? (struct cpl_tx_data *)gl->va :
+ (struct cpl_tx_data *)(rsp + 1);
+ tid = GET_TID(rpl);
+ csk = lookup_tid(lldi->tids, tid);
+ break;
+ default:
+ break;
+ }
+
+ if (csk && csk->lro_skb && lro_flush)
+ cxgbit_lro_flush(lro_mgr, csk->lro_skb);
+
+ if (!gl) {
+ unsigned int len;
+
+ if (op == CPL_RX_ISCSI_DDP) {
+ if (!cxgbit_lro_receive(csk, op, rsp, NULL, lro_mgr,
+ napi))
+ return 0;
+ }
+
+ len = 64 - sizeof(struct rsp_ctrl) - 8;
+ skb = napi_alloc_skb(napi, len);
+ if (!skb)
+ goto nomem;
+ __skb_put(skb, len);
+ skb_copy_to_linear_data(skb, &rsp[1], len);
+ } else {
+ if (unlikely(op != *(u8 *)gl->va)) {
+ pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
+ gl->va, be64_to_cpu(*rsp),
+ get_unaligned_be64(gl->va),
+ gl->tot_len);
+ return 0;
+ }
+
+ if ((op == CPL_ISCSI_HDR) || (op == CPL_ISCSI_DATA) ||
+ (op == CPL_RX_ISCSI_CMP)) {
+ if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr,
+ napi))
+ return 0;
+ }
+
+#define RX_PULL_LEN 128
+ skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
+ if (unlikely(!skb))
+ goto nomem;
+ }
+
+ rpl = (struct cpl_tx_data *)skb->data;
+ op = rpl->ot.opcode;
+ cxgbit_skcb_rx_opcode(skb) = op;
+
+ pr_debug("cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
+ cdev, op, rpl->ot.opcode_tid,
+ ntohl(rpl->ot.opcode_tid), skb);
+
+ if (op < NUM_CPL_CMDS && cxgbit_cplhandlers[op]) {
+ cxgbit_cplhandlers[op](cdev, skb);
+ } else {
+ pr_err("No handler for opcode 0x%x.\n", op);
+ __kfree_skb(skb);
+ }
+ return 0;
+nomem:
+ pr_err("%s OOM bailing out.\n", __func__);
+ return 1;
+}
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+struct cxgbit_dcb_work {
+ struct dcb_app_type dcb_app;
+ struct work_struct work;
+};
+
+static void
+cxgbit_update_dcb_priority(struct cxgbit_device *cdev, u8 port_id,
+ u8 dcb_priority, u16 port_num)
+{
+ struct cxgbit_sock *csk;
+ struct sk_buff *skb;
+ u16 local_port;
+ bool wakeup_thread = false;
+
+ spin_lock_bh(&cdev->cskq.lock);
+ list_for_each_entry(csk, &cdev->cskq.list, list) {
+ if (csk->port_id != port_id)
+ continue;
+
+ if (csk->com.local_addr.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sock_in6;
+
+ sock_in6 = (struct sockaddr_in6 *)&csk->com.local_addr;
+ local_port = ntohs(sock_in6->sin6_port);
+ } else {
+ struct sockaddr_in *sock_in;
+
+ sock_in = (struct sockaddr_in *)&csk->com.local_addr;
+ local_port = ntohs(sock_in->sin_port);
+ }
+
+ if (local_port != port_num)
+ continue;
+
+ if (csk->dcb_priority == dcb_priority)
+ continue;
+
+ skb = alloc_skb(0, GFP_ATOMIC);
+ if (!skb)
+ continue;
+
+ spin_lock(&csk->rxq.lock);
+ __skb_queue_tail(&csk->rxq, skb);
+ if (skb_queue_len(&csk->rxq) == 1)
+ wakeup_thread = true;
+ spin_unlock(&csk->rxq.lock);
+
+ if (wakeup_thread) {
+ wake_up(&csk->waitq);
+ wakeup_thread = false;
+ }
+ }
+ spin_unlock_bh(&cdev->cskq.lock);
+}
+
+static void cxgbit_dcb_workfn(struct work_struct *work)
+{
+ struct cxgbit_dcb_work *dcb_work;
+ struct net_device *ndev;
+ struct cxgbit_device *cdev = NULL;
+ struct dcb_app_type *iscsi_app;
+ u8 priority, port_id = 0xff;
+
+ dcb_work = container_of(work, struct cxgbit_dcb_work, work);
+ iscsi_app = &dcb_work->dcb_app;
+
+ if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
+ if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) &&
+ (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY))
+ goto out;
+
+ priority = iscsi_app->app.priority;
+
+ } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
+ if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
+ goto out;
+
+ if (!iscsi_app->app.priority)
+ goto out;
+
+ priority = ffs(iscsi_app->app.priority) - 1;
+ } else {
+ goto out;
+ }
+
+ pr_debug("priority for ifid %d is %u\n",
+ iscsi_app->ifindex, priority);
+
+ ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
+
+ if (!ndev)
+ goto out;
+
+ mutex_lock(&cdev_list_lock);
+ cdev = cxgbit_find_device(ndev, &port_id);
+
+ dev_put(ndev);
+
+ if (!cdev) {
+ mutex_unlock(&cdev_list_lock);
+ goto out;
+ }
+
+ cxgbit_update_dcb_priority(cdev, port_id, priority,
+ iscsi_app->app.protocol);
+ mutex_unlock(&cdev_list_lock);
+out:
+ kfree(dcb_work);
+}
+
+static int
+cxgbit_dcbevent_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct cxgbit_dcb_work *dcb_work;
+ struct dcb_app_type *dcb_app = data;
+
+ dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC);
+ if (!dcb_work)
+ return NOTIFY_DONE;
+
+ dcb_work->dcb_app = *dcb_app;
+ INIT_WORK(&dcb_work->work, cxgbit_dcb_workfn);
+ schedule_work(&dcb_work->work);
+ return NOTIFY_OK;
+}
+#endif
+
+static enum target_prot_op cxgbit_get_sup_prot_ops(struct iscsit_conn *conn)
+{
+ return TARGET_PROT_NORMAL;
+}
+
+static struct iscsit_transport cxgbit_transport = {
+ .name = DRV_NAME,
+ .transport_type = ISCSI_CXGBIT,
+ .rdma_shutdown = false,
+ .priv_size = sizeof(struct cxgbit_cmd),
+ .owner = THIS_MODULE,
+ .iscsit_setup_np = cxgbit_setup_np,
+ .iscsit_accept_np = cxgbit_accept_np,
+ .iscsit_free_np = cxgbit_free_np,
+ .iscsit_free_conn = cxgbit_free_conn,
+ .iscsit_get_login_rx = cxgbit_get_login_rx,
+ .iscsit_put_login_tx = cxgbit_put_login_tx,
+ .iscsit_immediate_queue = iscsit_immediate_queue,
+ .iscsit_response_queue = iscsit_response_queue,
+ .iscsit_get_dataout = iscsit_build_r2ts_for_cmd,
+ .iscsit_queue_data_in = iscsit_queue_rsp,
+ .iscsit_queue_status = iscsit_queue_rsp,
+ .iscsit_xmit_pdu = cxgbit_xmit_pdu,
+ .iscsit_get_r2t_ttt = cxgbit_get_r2t_ttt,
+ .iscsit_get_rx_pdu = cxgbit_get_rx_pdu,
+ .iscsit_validate_params = cxgbit_validate_params,
+ .iscsit_unmap_cmd = cxgbit_unmap_cmd,
+ .iscsit_aborted_task = iscsit_aborted_task,
+ .iscsit_get_sup_prot_ops = cxgbit_get_sup_prot_ops,
+};
+
+static struct cxgb4_uld_info cxgbit_uld_info = {
+ .name = DRV_NAME,
+ .nrxq = MAX_ULD_QSETS,
+ .ntxq = MAX_ULD_QSETS,
+ .rxq_size = 1024,
+ .lro = true,
+ .add = cxgbit_uld_add,
+ .state_change = cxgbit_uld_state_change,
+ .lro_rx_handler = cxgbit_uld_lro_rx_handler,
+ .lro_flush = cxgbit_uld_lro_flush,
+};
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+static struct notifier_block cxgbit_dcbevent_nb = {
+ .notifier_call = cxgbit_dcbevent_notify,
+};
+#endif
+
+static int __init cxgbit_init(void)
+{
+ cxgb4_register_uld(CXGB4_ULD_ISCSIT, &cxgbit_uld_info);
+ iscsit_register_transport(&cxgbit_transport);
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+ pr_info("%s dcb enabled.\n", DRV_NAME);
+ register_dcbevent_notifier(&cxgbit_dcbevent_nb);
+#endif
+ BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) <
+ sizeof(union cxgbit_skb_cb));
+ return 0;
+}
+
+static void __exit cxgbit_exit(void)
+{
+ struct cxgbit_device *cdev, *tmp;
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+ unregister_dcbevent_notifier(&cxgbit_dcbevent_nb);
+#endif
+ mutex_lock(&cdev_list_lock);
+ list_for_each_entry_safe(cdev, tmp, &cdev_list_head, list) {
+ list_del(&cdev->list);
+ cxgbit_put_cdev(cdev);
+ }
+ mutex_unlock(&cdev_list_lock);
+ iscsit_unregister_transport(&cxgbit_transport);
+ cxgb4_unregister_uld(CXGB4_ULD_ISCSIT);
+}
+
+module_init(cxgbit_init);
+module_exit(cxgbit_exit);
+
+MODULE_DESCRIPTION("Chelsio iSCSI target offload driver");
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
new file mode 100644
index 000000000000..3698f2eb097e
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -0,0 +1,1654 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/sched/signal.h>
+
+#include <linux/unaligned.h>
+#include <net/tcp.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include "cxgbit.h"
+
+struct sge_opaque_hdr {
+ void *dev;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
+static const u8 cxgbit_digest_len[] = {0, 4, 4, 8};
+
+#define TX_HDR_LEN (sizeof(struct sge_opaque_hdr) + \
+ sizeof(struct fw_ofld_tx_data_wr))
+
+static struct sk_buff *
+__cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len, bool iso)
+{
+ struct sk_buff *skb = NULL;
+ u8 submode = 0;
+ int errcode;
+ static const u32 hdr_len = TX_HDR_LEN + ISCSI_HDR_LEN;
+
+ if (len) {
+ skb = alloc_skb_with_frags(hdr_len, len,
+ 0, &errcode,
+ GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, TX_HDR_LEN);
+ skb_reset_transport_header(skb);
+ __skb_put(skb, ISCSI_HDR_LEN);
+ skb->data_len = len;
+ skb->len += len;
+ submode |= (csk->submode & CXGBIT_SUBMODE_DCRC);
+
+ } else {
+ u32 iso_len = iso ? sizeof(struct cpl_tx_data_iso) : 0;
+
+ skb = alloc_skb(hdr_len + iso_len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, TX_HDR_LEN + iso_len);
+ skb_reset_transport_header(skb);
+ __skb_put(skb, ISCSI_HDR_LEN);
+ }
+
+ submode |= (csk->submode & CXGBIT_SUBMODE_HCRC);
+ cxgbit_skcb_submode(skb) = submode;
+ cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[submode];
+ cxgbit_skcb_flags(skb) |= SKCBF_TX_NEED_HDR;
+ return skb;
+}
+
+static struct sk_buff *cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len)
+{
+ return __cxgbit_alloc_skb(csk, len, false);
+}
+
+/*
+ * cxgbit_is_ofld_imm - check whether a packet can be sent as immediate data
+ * @skb: the packet
+ *
+ * Returns true if a packet can be sent as an offload WR with immediate
+ * data. We currently use the same limit as for Ethernet packets.
+ */
+static int cxgbit_is_ofld_imm(const struct sk_buff *skb)
+{
+ int length = skb->len;
+
+ if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
+ length += sizeof(struct fw_ofld_tx_data_wr);
+
+ if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO))
+ length += sizeof(struct cpl_tx_data_iso);
+
+ return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
+}
+
+/*
+ * cxgbit_sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int cxgbit_sgl_len(unsigned int n)
+{
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/*
+ * cxgbit_calc_tx_flits_ofld - calculate # of flits for an offload packet
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for the given offload packet.
+ * These packets are already fully constructed and no additional headers
+ * will be added.
+ */
+static unsigned int cxgbit_calc_tx_flits_ofld(const struct sk_buff *skb)
+{
+ unsigned int flits, cnt;
+
+ if (cxgbit_is_ofld_imm(skb))
+ return DIV_ROUND_UP(skb->len, 8);
+ flits = skb_transport_offset(skb) / 8;
+ cnt = skb_shinfo(skb)->nr_frags;
+ if (skb_tail_pointer(skb) != skb_transport_header(skb))
+ cnt++;
+ return flits + cxgbit_sgl_len(cnt);
+}
+
+#define CXGBIT_ISO_FSLICE 0x1
+#define CXGBIT_ISO_LSLICE 0x2
+static void
+cxgbit_cpl_tx_data_iso(struct sk_buff *skb, struct cxgbit_iso_info *iso_info)
+{
+ struct cpl_tx_data_iso *cpl;
+ unsigned int submode = cxgbit_skcb_submode(skb);
+ unsigned int fslice = !!(iso_info->flags & CXGBIT_ISO_FSLICE);
+ unsigned int lslice = !!(iso_info->flags & CXGBIT_ISO_LSLICE);
+
+ cpl = __skb_push(skb, sizeof(*cpl));
+
+ cpl->op_to_scsi = htonl(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) |
+ CPL_TX_DATA_ISO_FIRST_V(fslice) |
+ CPL_TX_DATA_ISO_LAST_V(lslice) |
+ CPL_TX_DATA_ISO_CPLHDRLEN_V(0) |
+ CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) |
+ CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) |
+ CPL_TX_DATA_ISO_IMMEDIATE_V(0) |
+ CPL_TX_DATA_ISO_SCSI_V(2));
+
+ cpl->ahs_len = 0;
+ cpl->mpdu = htons(DIV_ROUND_UP(iso_info->mpdu, 4));
+ cpl->burst_size = htonl(DIV_ROUND_UP(iso_info->burst_len, 4));
+ cpl->len = htonl(iso_info->len);
+ cpl->reserved2_seglen_offset = htonl(0);
+ cpl->datasn_offset = htonl(0);
+ cpl->buffer_offset = htonl(0);
+ cpl->reserved3 = 0;
+
+ __skb_pull(skb, sizeof(*cpl));
+}
+
+static void
+cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen,
+ u32 len, u32 credits, u32 compl)
+{
+ struct fw_ofld_tx_data_wr *req;
+ const struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
+ u32 submode = cxgbit_skcb_submode(skb);
+ u32 wr_ulp_mode = 0;
+ u32 hdr_size = sizeof(*req);
+ u32 opcode = FW_OFLD_TX_DATA_WR;
+ u32 immlen = 0;
+ u32 force = is_t5(lldi->adapter_type) ? TX_FORCE_V(!submode) :
+ T6_TX_FORCE_F;
+
+ if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) {
+ opcode = FW_ISCSI_TX_DATA_WR;
+ immlen += sizeof(struct cpl_tx_data_iso);
+ hdr_size += sizeof(struct cpl_tx_data_iso);
+ submode |= 8;
+ }
+
+ if (cxgbit_is_ofld_imm(skb))
+ immlen += dlen;
+
+ req = __skb_push(skb, hdr_size);
+ req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) |
+ FW_WR_COMPL_V(compl) |
+ FW_WR_IMMDLEN_V(immlen));
+ req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
+ FW_WR_LEN16_V(credits));
+ req->plen = htonl(len);
+ wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI) |
+ FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
+
+ req->tunnel_to_proxy = htonl(wr_ulp_mode | force |
+ FW_OFLD_TX_DATA_WR_SHOVE_F);
+}
+
+static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+void cxgbit_push_tx_frames(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+
+ while (csk->wr_cred && ((skb = skb_peek(&csk->txq)) != NULL)) {
+ u32 dlen = skb->len;
+ u32 len = skb->len;
+ u32 credits_needed;
+ u32 compl = 0;
+ u32 flowclen16 = 0;
+ u32 iso_cpl_len = 0;
+
+ if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO)
+ iso_cpl_len = sizeof(struct cpl_tx_data_iso);
+
+ if (cxgbit_is_ofld_imm(skb))
+ credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16);
+ else
+ credits_needed = DIV_ROUND_UP((8 *
+ cxgbit_calc_tx_flits_ofld(skb)) +
+ iso_cpl_len, 16);
+
+ if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
+ credits_needed += DIV_ROUND_UP(
+ sizeof(struct fw_ofld_tx_data_wr), 16);
+ /*
+ * Assumes the initial credits is large enough to support
+ * fw_flowc_wr plus largest possible first payload
+ */
+
+ if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) {
+ flowclen16 = cxgbit_send_tx_flowc_wr(csk);
+ csk->wr_cred -= flowclen16;
+ csk->wr_una_cred += flowclen16;
+ }
+
+ if (csk->wr_cred < credits_needed) {
+ pr_debug("csk 0x%p, skb %u/%u, wr %d < %u.\n",
+ csk, skb->len, skb->data_len,
+ credits_needed, csk->wr_cred);
+ break;
+ }
+ __skb_unlink(skb, &csk->txq);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
+ skb->csum = (__force __wsum)(credits_needed + flowclen16);
+ csk->wr_cred -= credits_needed;
+ csk->wr_una_cred += credits_needed;
+
+ pr_debug("csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
+ csk, skb->len, skb->data_len, credits_needed,
+ csk->wr_cred, csk->wr_una_cred);
+
+ if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) {
+ len += cxgbit_skcb_tx_extralen(skb);
+
+ if ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) ||
+ (!before(csk->write_seq,
+ csk->snd_una + csk->snd_win))) {
+ compl = 1;
+ csk->wr_una_cred = 0;
+ }
+
+ cxgbit_tx_data_wr(csk, skb, dlen, len, credits_needed,
+ compl);
+ csk->snd_nxt += len;
+
+ } else if ((cxgbit_skcb_flags(skb) & SKCBF_TX_FLAG_COMPL) ||
+ (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
+ struct cpl_close_con_req *req =
+ (struct cpl_close_con_req *)skb->data;
+ req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
+ csk->wr_una_cred = 0;
+ }
+
+ cxgbit_sock_enqueue_wr(csk, skb);
+ t4_set_arp_err_handler(skb, csk,
+ cxgbit_arp_failure_skb_discard);
+
+ pr_debug("csk 0x%p,%u, skb 0x%p, %u.\n",
+ csk, csk->tid, skb, len);
+
+ cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
+ }
+}
+
+static void cxgbit_unlock_sock(struct cxgbit_sock *csk)
+{
+ struct sk_buff_head backlogq;
+ struct sk_buff *skb;
+ void (*fn)(struct cxgbit_sock *, struct sk_buff *);
+
+ skb_queue_head_init(&backlogq);
+
+ spin_lock_bh(&csk->lock);
+ while (skb_queue_len(&csk->backlogq)) {
+ skb_queue_splice_init(&csk->backlogq, &backlogq);
+ spin_unlock_bh(&csk->lock);
+
+ while ((skb = __skb_dequeue(&backlogq))) {
+ fn = cxgbit_skcb_rx_backlog_fn(skb);
+ fn(csk, skb);
+ }
+
+ spin_lock_bh(&csk->lock);
+ }
+
+ csk->lock_owner = false;
+ spin_unlock_bh(&csk->lock);
+}
+
+static int cxgbit_queue_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ int ret = 0;
+
+ spin_lock_bh(&csk->lock);
+ csk->lock_owner = true;
+ spin_unlock_bh(&csk->lock);
+
+ if (unlikely((csk->com.state != CSK_STATE_ESTABLISHED) ||
+ signal_pending(current))) {
+ __kfree_skb(skb);
+ __skb_queue_purge(&csk->ppodq);
+ ret = -1;
+ goto unlock;
+ }
+
+ csk->write_seq += skb->len +
+ cxgbit_skcb_tx_extralen(skb);
+
+ skb_queue_splice_tail_init(&csk->ppodq, &csk->txq);
+ __skb_queue_tail(&csk->txq, skb);
+ cxgbit_push_tx_frames(csk);
+
+unlock:
+ cxgbit_unlock_sock(csk);
+ return ret;
+}
+
+static int
+cxgbit_map_skb(struct iscsit_cmd *cmd, struct sk_buff *skb, u32 data_offset,
+ u32 data_length)
+{
+ u32 i = 0, nr_frags = MAX_SKB_FRAGS;
+ u32 padding = ((-data_length) & 3);
+ struct scatterlist *sg;
+ struct page *page;
+ unsigned int page_off;
+
+ if (padding)
+ nr_frags--;
+
+ /*
+ * We know each entry in t_data_sg contains a page.
+ */
+ sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
+ page_off = (data_offset % PAGE_SIZE);
+
+ while (data_length && (i < nr_frags)) {
+ u32 cur_len = min_t(u32, data_length, sg->length - page_off);
+
+ page = sg_page(sg);
+
+ get_page(page);
+ skb_fill_page_desc(skb, i, page, sg->offset + page_off,
+ cur_len);
+ skb->data_len += cur_len;
+ skb->len += cur_len;
+ skb->truesize += cur_len;
+
+ data_length -= cur_len;
+ page_off = 0;
+ sg = sg_next(sg);
+ i++;
+ }
+
+ if (data_length)
+ return -1;
+
+ if (padding) {
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return -1;
+ skb_fill_page_desc(skb, i, page, 0, padding);
+ skb->data_len += padding;
+ skb->len += padding;
+ skb->truesize += padding;
+ }
+
+ return 0;
+}
+
+static int
+cxgbit_tx_datain_iso(struct cxgbit_sock *csk, struct iscsit_cmd *cmd,
+ struct iscsi_datain_req *dr)
+{
+ struct iscsit_conn *conn = csk->conn;
+ struct sk_buff *skb;
+ struct iscsi_datain datain;
+ struct cxgbit_iso_info iso_info;
+ u32 data_length = cmd->se_cmd.data_length;
+ u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength;
+ u32 num_pdu, plen, tx_data = 0;
+ bool task_sense = !!(cmd->se_cmd.se_cmd_flags &
+ SCF_TRANSPORT_TASK_SENSE);
+ bool set_statsn = false;
+ int ret = -1;
+
+ while (data_length) {
+ num_pdu = (data_length + mrdsl - 1) / mrdsl;
+ if (num_pdu > csk->max_iso_npdu)
+ num_pdu = csk->max_iso_npdu;
+
+ plen = num_pdu * mrdsl;
+ if (plen > data_length)
+ plen = data_length;
+
+ skb = __cxgbit_alloc_skb(csk, 0, true);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ memset(skb->data, 0, ISCSI_HDR_LEN);
+ cxgbit_skcb_flags(skb) |= SKCBF_TX_ISO;
+ cxgbit_skcb_submode(skb) |= (csk->submode &
+ CXGBIT_SUBMODE_DCRC);
+ cxgbit_skcb_tx_extralen(skb) = (num_pdu *
+ cxgbit_digest_len[cxgbit_skcb_submode(skb)]) +
+ ((num_pdu - 1) * ISCSI_HDR_LEN);
+
+ memset(&datain, 0, sizeof(struct iscsi_datain));
+ memset(&iso_info, 0, sizeof(iso_info));
+
+ if (!tx_data)
+ iso_info.flags |= CXGBIT_ISO_FSLICE;
+
+ if (!(data_length - plen)) {
+ iso_info.flags |= CXGBIT_ISO_LSLICE;
+ if (!task_sense) {
+ datain.flags = ISCSI_FLAG_DATA_STATUS;
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ cmd->stat_sn = conn->stat_sn++;
+ set_statsn = true;
+ }
+ }
+
+ iso_info.burst_len = num_pdu * mrdsl;
+ iso_info.mpdu = mrdsl;
+ iso_info.len = ISCSI_HDR_LEN + plen;
+
+ cxgbit_cpl_tx_data_iso(skb, &iso_info);
+
+ datain.offset = tx_data;
+ datain.data_sn = cmd->data_sn - 1;
+
+ iscsit_build_datain_pdu(cmd, conn, &datain,
+ (struct iscsi_data_rsp *)skb->data,
+ set_statsn);
+
+ ret = cxgbit_map_skb(cmd, skb, tx_data, plen);
+ if (unlikely(ret)) {
+ __kfree_skb(skb);
+ goto out;
+ }
+
+ ret = cxgbit_queue_skb(csk, skb);
+ if (unlikely(ret))
+ goto out;
+
+ tx_data += plen;
+ data_length -= plen;
+
+ cmd->read_data_done += plen;
+ cmd->data_sn += num_pdu;
+ }
+
+ dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static int
+cxgbit_tx_datain(struct cxgbit_sock *csk, struct iscsit_cmd *cmd,
+ const struct iscsi_datain *datain)
+{
+ struct sk_buff *skb;
+ int ret = 0;
+
+ skb = cxgbit_alloc_skb(csk, 0);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
+
+ if (datain->length) {
+ cxgbit_skcb_submode(skb) |= (csk->submode &
+ CXGBIT_SUBMODE_DCRC);
+ cxgbit_skcb_tx_extralen(skb) =
+ cxgbit_digest_len[cxgbit_skcb_submode(skb)];
+ }
+
+ ret = cxgbit_map_skb(cmd, skb, datain->offset, datain->length);
+ if (ret < 0) {
+ __kfree_skb(skb);
+ return ret;
+ }
+
+ return cxgbit_queue_skb(csk, skb);
+}
+
+static int
+cxgbit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
+ struct iscsi_datain_req *dr,
+ const struct iscsi_datain *datain)
+{
+ struct cxgbit_sock *csk = conn->context;
+ u32 data_length = cmd->se_cmd.data_length;
+ u32 padding = ((-data_length) & 3);
+ u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength;
+
+ if ((data_length > mrdsl) && (!dr->recovery) &&
+ (!padding) && (!datain->offset) && csk->max_iso_npdu) {
+ atomic_long_add(data_length - datain->length,
+ &conn->sess->tx_data_octets);
+ return cxgbit_tx_datain_iso(csk, cmd, dr);
+ }
+
+ return cxgbit_tx_datain(csk, cmd, datain);
+}
+
+static int
+cxgbit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
+ const void *data_buf, u32 data_buf_len)
+{
+ struct cxgbit_sock *csk = conn->context;
+ struct sk_buff *skb;
+ u32 padding = ((-data_buf_len) & 3);
+
+ skb = cxgbit_alloc_skb(csk, data_buf_len + padding);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
+
+ if (data_buf_len) {
+ u32 pad_bytes = 0;
+
+ skb_store_bits(skb, ISCSI_HDR_LEN, data_buf, data_buf_len);
+
+ if (padding)
+ skb_store_bits(skb, ISCSI_HDR_LEN + data_buf_len,
+ &pad_bytes, padding);
+ }
+
+ cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[
+ cxgbit_skcb_submode(skb)];
+
+ return cxgbit_queue_skb(csk, skb);
+}
+
+int
+cxgbit_xmit_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
+ struct iscsi_datain_req *dr, const void *buf, u32 buf_len)
+{
+ if (dr)
+ return cxgbit_xmit_datain_pdu(conn, cmd, dr, buf);
+ else
+ return cxgbit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
+}
+
+int cxgbit_validate_params(struct iscsit_conn *conn)
+{
+ struct cxgbit_sock *csk = conn->context;
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct iscsi_param *param;
+ u32 max_xmitdsl;
+
+ param = iscsi_find_param_from_key(MAXXMITDATASEGMENTLENGTH,
+ conn->param_list);
+ if (!param)
+ return -1;
+
+ if (kstrtou32(param->value, 0, &max_xmitdsl) < 0)
+ return -1;
+
+ if (max_xmitdsl > cdev->mdsl) {
+ if (iscsi_change_param_sprintf(
+ conn, "MaxXmitDataSegmentLength=%u", cdev->mdsl))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int cxgbit_set_digest(struct cxgbit_sock *csk)
+{
+ struct iscsit_conn *conn = csk->conn;
+ struct iscsi_param *param;
+
+ param = iscsi_find_param_from_key(HEADERDIGEST, conn->param_list);
+ if (!param) {
+ pr_err("param not found key %s\n", HEADERDIGEST);
+ return -1;
+ }
+
+ if (!strcmp(param->value, CRC32C))
+ csk->submode |= CXGBIT_SUBMODE_HCRC;
+
+ param = iscsi_find_param_from_key(DATADIGEST, conn->param_list);
+ if (!param) {
+ csk->submode = 0;
+ pr_err("param not found key %s\n", DATADIGEST);
+ return -1;
+ }
+
+ if (!strcmp(param->value, CRC32C))
+ csk->submode |= CXGBIT_SUBMODE_DCRC;
+
+ if (cxgbit_setup_conn_digest(csk)) {
+ csk->submode = 0;
+ return -1;
+ }
+
+ return 0;
+}
+
+static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
+{
+ struct iscsit_conn *conn = csk->conn;
+ struct iscsi_conn_ops *conn_ops = conn->conn_ops;
+ struct iscsi_param *param;
+ u32 mrdsl, mbl;
+ u32 max_npdu, max_iso_npdu;
+ u32 max_iso_payload;
+
+ if (conn->login->leading_connection) {
+ param = iscsi_find_param_from_key(MAXBURSTLENGTH,
+ conn->param_list);
+ if (!param) {
+ pr_err("param not found key %s\n", MAXBURSTLENGTH);
+ return -1;
+ }
+
+ if (kstrtou32(param->value, 0, &mbl) < 0)
+ return -1;
+ } else {
+ mbl = conn->sess->sess_ops->MaxBurstLength;
+ }
+
+ mrdsl = conn_ops->MaxRecvDataSegmentLength;
+ max_npdu = mbl / mrdsl;
+
+ max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss);
+
+ max_iso_npdu = max_iso_payload /
+ (ISCSI_HDR_LEN + mrdsl +
+ cxgbit_digest_len[csk->submode]);
+
+ csk->max_iso_npdu = min(max_npdu, max_iso_npdu);
+
+ if (csk->max_iso_npdu <= 1)
+ csk->max_iso_npdu = 0;
+
+ return 0;
+}
+
+/*
+ * cxgbit_seq_pdu_inorder()
+ * @csk: pointer to cxgbit socket structure
+ *
+ * This function checks whether data sequence and data
+ * pdu are in order.
+ *
+ * Return: returns -1 on error, 0 if data sequence and
+ * data pdu are in order, 1 if data sequence or data pdu
+ * is not in order.
+ */
+static int cxgbit_seq_pdu_inorder(struct cxgbit_sock *csk)
+{
+ struct iscsit_conn *conn = csk->conn;
+ struct iscsi_param *param;
+
+ if (conn->login->leading_connection) {
+ param = iscsi_find_param_from_key(DATASEQUENCEINORDER,
+ conn->param_list);
+ if (!param) {
+ pr_err("param not found key %s\n", DATASEQUENCEINORDER);
+ return -1;
+ }
+
+ if (strcmp(param->value, YES))
+ return 1;
+
+ param = iscsi_find_param_from_key(DATAPDUINORDER,
+ conn->param_list);
+ if (!param) {
+ pr_err("param not found key %s\n", DATAPDUINORDER);
+ return -1;
+ }
+
+ if (strcmp(param->value, YES))
+ return 1;
+
+ } else {
+ if (!conn->sess->sess_ops->DataSequenceInOrder)
+ return 1;
+ if (!conn->sess->sess_ops->DataPDUInOrder)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int cxgbit_set_params(struct iscsit_conn *conn)
+{
+ struct cxgbit_sock *csk = conn->context;
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct cxgbi_ppm *ppm = *csk->com.cdev->lldi.iscsi_ppm;
+ struct iscsi_conn_ops *conn_ops = conn->conn_ops;
+ struct iscsi_param *param;
+ u8 erl;
+
+ if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl)
+ conn_ops->MaxRecvDataSegmentLength = cdev->mdsl;
+
+ if (cxgbit_set_digest(csk))
+ return -1;
+
+ if (conn->login->leading_connection) {
+ param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL,
+ conn->param_list);
+ if (!param) {
+ pr_err("param not found key %s\n", ERRORRECOVERYLEVEL);
+ return -1;
+ }
+ if (kstrtou8(param->value, 0, &erl) < 0)
+ return -1;
+ } else {
+ erl = conn->sess->sess_ops->ErrorRecoveryLevel;
+ }
+
+ if (!erl) {
+ int ret;
+
+ ret = cxgbit_seq_pdu_inorder(csk);
+ if (ret < 0) {
+ return -1;
+ } else if (ret > 0) {
+ if (is_t5(cdev->lldi.adapter_type))
+ goto enable_ddp;
+ else
+ return 0;
+ }
+
+ if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) {
+ if (cxgbit_set_iso_npdu(csk))
+ return -1;
+ }
+
+enable_ddp:
+ if (test_bit(CDEV_DDP_ENABLE, &cdev->flags)) {
+ if (cxgbit_setup_conn_pgidx(csk,
+ ppm->tformat.pgsz_idx_dflt))
+ return -1;
+ set_bit(CSK_DDP_ENABLE, &csk->com.flags);
+ }
+ }
+
+ return 0;
+}
+
+int
+cxgbit_put_login_tx(struct iscsit_conn *conn, struct iscsi_login *login,
+ u32 length)
+{
+ struct cxgbit_sock *csk = conn->context;
+ struct sk_buff *skb;
+ u32 padding_buf = 0;
+ u8 padding = ((-length) & 3);
+
+ skb = cxgbit_alloc_skb(csk, length + padding);
+ if (!skb)
+ return -ENOMEM;
+ skb_store_bits(skb, 0, login->rsp, ISCSI_HDR_LEN);
+ skb_store_bits(skb, ISCSI_HDR_LEN, login->rsp_buf, length);
+
+ if (padding)
+ skb_store_bits(skb, ISCSI_HDR_LEN + length,
+ &padding_buf, padding);
+
+ if (login->login_complete) {
+ if (cxgbit_set_params(conn)) {
+ kfree_skb(skb);
+ return -1;
+ }
+
+ set_bit(CSK_LOGIN_DONE, &csk->com.flags);
+ }
+
+ if (cxgbit_queue_skb(csk, skb))
+ return -1;
+
+ if ((!login->login_complete) && (!login->login_failed))
+ schedule_delayed_work(&conn->login_work, 0);
+
+ return 0;
+}
+
+static void
+cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
+ unsigned int nents, u32 skip)
+{
+ struct skb_seq_state st;
+ const u8 *buf;
+ unsigned int consumed = 0, buf_len;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(skb);
+
+ skb_prepare_seq_read(skb, pdu_cb->doffset,
+ pdu_cb->doffset + pdu_cb->dlen,
+ &st);
+
+ while (true) {
+ buf_len = skb_seq_read(consumed, &buf, &st);
+ if (!buf_len) {
+ skb_abort_seq_read(&st);
+ break;
+ }
+
+ consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf,
+ buf_len, skip + consumed);
+ }
+}
+
+static struct iscsit_cmd *cxgbit_allocate_cmd(struct cxgbit_sock *csk)
+{
+ struct iscsit_conn *conn = csk->conn;
+ struct cxgbi_ppm *ppm = cdev2ppm(csk->com.cdev);
+ struct cxgbit_cmd *ccmd;
+ struct iscsit_cmd *cmd;
+
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+ if (!cmd) {
+ pr_err("Unable to allocate iscsit_cmd + cxgbit_cmd\n");
+ return NULL;
+ }
+
+ ccmd = iscsit_priv_cmd(cmd);
+ ccmd->ttinfo.tag = ppm->tformat.no_ddp_mask;
+ ccmd->setup_ddp = true;
+
+ return cmd;
+}
+
+static int
+cxgbit_handle_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr,
+ u32 length)
+{
+ struct iscsit_conn *conn = cmd->conn;
+ struct cxgbit_sock *csk = conn->context;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+
+ if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
+ pr_err("ImmediateData CRC32C DataDigest error\n");
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from"
+ " Immediate Data digest failure while"
+ " in ERL=0.\n");
+ iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
+ (unsigned char *)hdr);
+ return IMMEDIATE_DATA_CANNOT_RECOVER;
+ }
+
+ iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
+ (unsigned char *)hdr);
+ return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
+ }
+
+ if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
+ struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
+ struct skb_shared_info *ssi = skb_shinfo(csk->skb);
+ skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx];
+
+ sg_init_table(&ccmd->sg, 1);
+ sg_set_page(&ccmd->sg, skb_frag_page(dfrag),
+ skb_frag_size(dfrag), skb_frag_off(dfrag));
+ get_page(skb_frag_page(dfrag));
+
+ cmd->se_cmd.t_data_sg = &ccmd->sg;
+ cmd->se_cmd.t_data_nents = 1;
+
+ ccmd->release = true;
+ } else {
+ struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0];
+ u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE));
+
+ cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0);
+ }
+
+ cmd->write_data_done += pdu_cb->dlen;
+
+ if (cmd->write_data_done == cmd->se_cmd.data_length) {
+ spin_lock_bh(&cmd->istate_lock);
+ cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
+ cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+ spin_unlock_bh(&cmd->istate_lock);
+ }
+
+ return IMMEDIATE_DATA_NORMAL_OPERATION;
+}
+
+static int
+cxgbit_get_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr,
+ bool dump_payload)
+{
+ struct iscsit_conn *conn = cmd->conn;
+ int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+ /*
+ * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
+ */
+ if (dump_payload)
+ goto after_immediate_data;
+
+ immed_ret = cxgbit_handle_immediate_data(cmd, hdr,
+ cmd->first_burst_len);
+after_immediate_data:
+ if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
+ /*
+ * A PDU/CmdSN carrying Immediate Data passed
+ * DataCRC, check against ExpCmdSN/MaxCmdSN if
+ * Immediate Bit is not set.
+ */
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
+ (unsigned char *)hdr,
+ hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return -1;
+
+ if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ target_put_sess_cmd(&cmd->se_cmd);
+ return 0;
+ } else if (cmd->unsolicited_data) {
+ iscsit_set_unsolicited_dataout(cmd);
+ }
+
+ } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
+ /*
+ * Immediate Data failed DataCRC and ERL>=1,
+ * silently drop this PDU and let the initiator
+ * plug the CmdSN gap.
+ *
+ * FIXME: Send Unsolicited NOPIN with reserved
+ * TTT here to help the initiator figure out
+ * the missing CmdSN, although they should be
+ * intelligent enough to determine the missing
+ * CmdSN and issue a retry to plug the sequence.
+ */
+ cmd->i_state = ISTATE_REMOVE;
+ iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+ } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
+ return -1;
+
+ return 0;
+}
+
+static int
+cxgbit_handle_scsi_cmd(struct cxgbit_sock *csk, struct iscsit_cmd *cmd)
+{
+ struct iscsit_conn *conn = csk->conn;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)pdu_cb->hdr;
+ int rc;
+ bool dump_payload = false;
+
+ rc = iscsit_setup_scsi_cmd(conn, cmd, (unsigned char *)hdr);
+ if (rc < 0)
+ return rc;
+
+ if (pdu_cb->dlen && (pdu_cb->dlen == cmd->se_cmd.data_length) &&
+ (pdu_cb->nr_dfrags == 1))
+ cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+
+ rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
+ if (rc < 0)
+ return 0;
+ else if (rc > 0)
+ dump_payload = true;
+
+ if (!pdu_cb->dlen)
+ return 0;
+
+ return cxgbit_get_immediate_data(cmd, hdr, dump_payload);
+}
+
+static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
+{
+ struct scatterlist *sg_start;
+ struct iscsit_conn *conn = csk->conn;
+ struct iscsit_cmd *cmd = NULL;
+ struct cxgbit_cmd *ccmd;
+ struct cxgbi_task_tag_info *ttinfo;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr;
+ u32 data_offset = be32_to_cpu(hdr->offset);
+ u32 data_len = ntoh24(hdr->dlength);
+ int rc, sg_nents, sg_off;
+ bool dcrc_err = false;
+
+ if (pdu_cb->flags & PDUCBF_RX_DDP_CMP) {
+ u32 offset = be32_to_cpu(hdr->offset);
+ u32 ddp_data_len;
+ bool success = false;
+
+ cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, 0);
+ if (!cmd)
+ return 0;
+
+ ddp_data_len = offset - cmd->write_data_done;
+ atomic_long_add(ddp_data_len, &conn->sess->rx_data_octets);
+
+ cmd->write_data_done = offset;
+ cmd->next_burst_len = ddp_data_len;
+ cmd->data_sn = be32_to_cpu(hdr->datasn);
+
+ rc = __iscsit_check_dataout_hdr(conn, (unsigned char *)hdr,
+ cmd, data_len, &success);
+ if (rc < 0)
+ return rc;
+ else if (!success)
+ return 0;
+ } else {
+ rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd);
+ if (rc < 0)
+ return rc;
+ else if (!cmd)
+ return 0;
+ }
+
+ if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
+ pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
+ " DataSN: 0x%08x\n",
+ hdr->itt, hdr->offset, data_len,
+ hdr->datasn);
+
+ dcrc_err = true;
+ goto check_payload;
+ }
+
+ pr_debug("DataOut data_len: %u, "
+ "write_data_done: %u, data_length: %u\n",
+ data_len, cmd->write_data_done,
+ cmd->se_cmd.data_length);
+
+ if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) {
+ u32 skip = data_offset % PAGE_SIZE;
+
+ sg_off = data_offset / PAGE_SIZE;
+ sg_start = &cmd->se_cmd.t_data_sg[sg_off];
+ sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE));
+
+ cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
+ }
+
+ ccmd = iscsit_priv_cmd(cmd);
+ ttinfo = &ccmd->ttinfo;
+
+ if (ccmd->release && ttinfo->sgl &&
+ (cmd->se_cmd.data_length == (cmd->write_data_done + data_len))) {
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+
+ dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl, ttinfo->nents,
+ DMA_FROM_DEVICE);
+ ttinfo->nents = 0;
+ ttinfo->sgl = NULL;
+ }
+
+check_payload:
+
+ rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static int cxgbit_handle_nop_out(struct cxgbit_sock *csk, struct iscsit_cmd *cmd)
+{
+ struct iscsit_conn *conn = csk->conn;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_nopout *hdr = (struct iscsi_nopout *)pdu_cb->hdr;
+ unsigned char *ping_data = NULL;
+ u32 payload_length = pdu_cb->dlen;
+ int ret;
+
+ ret = iscsit_setup_nop_out(conn, cmd, hdr);
+ if (ret < 0)
+ return 0;
+
+ if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from"
+ " NOPOUT Ping DataCRC failure while in"
+ " ERL=0.\n");
+ ret = -1;
+ goto out;
+ } else {
+ /*
+ * drop this PDU and let the
+ * initiator plug the CmdSN gap.
+ */
+ pr_info("Dropping NOPOUT"
+ " Command CmdSN: 0x%08x due to"
+ " DataCRC error.\n", hdr->cmdsn);
+ ret = 0;
+ goto out;
+ }
+ }
+
+ /*
+ * Handle NOP-OUT payload for traditional iSCSI sockets
+ */
+ if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
+ ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
+ if (!ping_data) {
+ pr_err("Unable to allocate memory for"
+ " NOPOUT ping data.\n");
+ ret = -1;
+ goto out;
+ }
+
+ skb_copy_bits(csk->skb, pdu_cb->doffset,
+ ping_data, payload_length);
+
+ ping_data[payload_length] = '\0';
+ /*
+ * Attach ping data to struct iscsit_cmd->buf_ptr.
+ */
+ cmd->buf_ptr = ping_data;
+ cmd->buf_ptr_size = payload_length;
+
+ pr_debug("Got %u bytes of NOPOUT ping"
+ " data.\n", payload_length);
+ pr_debug("Ping Data: \"%s\"\n", ping_data);
+ }
+
+ return iscsit_process_nop_out(conn, cmd, hdr);
+out:
+ if (cmd)
+ iscsit_free_cmd(cmd, false);
+ return ret;
+}
+
+static int
+cxgbit_handle_text_cmd(struct cxgbit_sock *csk, struct iscsit_cmd *cmd)
+{
+ struct iscsit_conn *conn = csk->conn;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_text *hdr = (struct iscsi_text *)pdu_cb->hdr;
+ u32 payload_length = pdu_cb->dlen;
+ int rc;
+ unsigned char *text_in = NULL;
+
+ rc = iscsit_setup_text_cmd(conn, cmd, hdr);
+ if (rc < 0)
+ return rc;
+
+ if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from"
+ " Text Data digest failure while in"
+ " ERL=0.\n");
+ goto reject;
+ } else {
+ /*
+ * drop this PDU and let the
+ * initiator plug the CmdSN gap.
+ */
+ pr_info("Dropping Text"
+ " Command CmdSN: 0x%08x due to"
+ " DataCRC error.\n", hdr->cmdsn);
+ return 0;
+ }
+ }
+
+ if (payload_length) {
+ text_in = kzalloc(payload_length, GFP_KERNEL);
+ if (!text_in) {
+ pr_err("Unable to allocate text_in of payload_length: %u\n",
+ payload_length);
+ return -ENOMEM;
+ }
+ skb_copy_bits(csk->skb, pdu_cb->doffset,
+ text_in, payload_length);
+
+ text_in[payload_length - 1] = '\0';
+
+ cmd->text_in_ptr = text_in;
+ }
+
+ return iscsit_process_text_cmd(conn, cmd, hdr);
+
+reject:
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+ pdu_cb->hdr);
+}
+
+static int cxgbit_target_rx_opcode(struct cxgbit_sock *csk)
+{
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_hdr *hdr = (struct iscsi_hdr *)pdu_cb->hdr;
+ struct iscsit_conn *conn = csk->conn;
+ struct iscsit_cmd *cmd = NULL;
+ u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
+ int ret = -EINVAL;
+
+ switch (opcode) {
+ case ISCSI_OP_SCSI_CMD:
+ cmd = cxgbit_allocate_cmd(csk);
+ if (!cmd)
+ goto reject;
+
+ ret = cxgbit_handle_scsi_cmd(csk, cmd);
+ break;
+ case ISCSI_OP_SCSI_DATA_OUT:
+ ret = cxgbit_handle_iscsi_dataout(csk);
+ break;
+ case ISCSI_OP_NOOP_OUT:
+ if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
+ cmd = cxgbit_allocate_cmd(csk);
+ if (!cmd)
+ goto reject;
+ }
+
+ ret = cxgbit_handle_nop_out(csk, cmd);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ cmd = cxgbit_allocate_cmd(csk);
+ if (!cmd)
+ goto reject;
+
+ ret = iscsit_handle_task_mgt_cmd(conn, cmd,
+ (unsigned char *)hdr);
+ break;
+ case ISCSI_OP_TEXT:
+ if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
+ cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
+ if (!cmd)
+ goto reject;
+ } else {
+ cmd = cxgbit_allocate_cmd(csk);
+ if (!cmd)
+ goto reject;
+ }
+
+ ret = cxgbit_handle_text_cmd(csk, cmd);
+ break;
+ case ISCSI_OP_LOGOUT:
+ cmd = cxgbit_allocate_cmd(csk);
+ if (!cmd)
+ goto reject;
+
+ ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
+ if (ret > 0)
+ wait_for_completion_timeout(&conn->conn_logout_comp,
+ SECONDS_FOR_LOGOUT_COMP
+ * HZ);
+ break;
+ case ISCSI_OP_SNACK:
+ ret = iscsit_handle_snack(conn, (unsigned char *)hdr);
+ break;
+ default:
+ pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
+ dump_stack();
+ break;
+ }
+
+ return ret;
+
+reject:
+ return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ (unsigned char *)hdr);
+ return ret;
+}
+
+static int cxgbit_rx_opcode(struct cxgbit_sock *csk)
+{
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsit_conn *conn = csk->conn;
+ struct iscsi_hdr *hdr = pdu_cb->hdr;
+ u8 opcode;
+
+ if (pdu_cb->flags & PDUCBF_RX_HCRC_ERR) {
+ atomic_long_inc(&conn->sess->conn_digest_errors);
+ goto transport_err;
+ }
+
+ if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
+ goto transport_err;
+
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+ if (conn->sess->sess_ops->SessionType &&
+ ((!(opcode & ISCSI_OP_TEXT)) ||
+ (!(opcode & ISCSI_OP_LOGOUT)))) {
+ pr_err("Received illegal iSCSI Opcode: 0x%02x"
+ " while in Discovery Session, rejecting.\n", opcode);
+ iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
+ goto transport_err;
+ }
+
+ if (cxgbit_target_rx_opcode(csk) < 0)
+ goto transport_err;
+
+ return 0;
+
+transport_err:
+ return -1;
+}
+
+static int cxgbit_rx_login_pdu(struct cxgbit_sock *csk)
+{
+ struct iscsit_conn *conn = csk->conn;
+ struct iscsi_login *login = conn->login;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_login_req *login_req;
+
+ login_req = (struct iscsi_login_req *)login->req;
+ memcpy(login_req, pdu_cb->hdr, sizeof(*login_req));
+
+ pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
+ " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
+ login_req->flags, login_req->itt, login_req->cmdsn,
+ login_req->exp_statsn, login_req->cid, pdu_cb->dlen);
+ /*
+ * Setup the initial iscsi_login values from the leading
+ * login request PDU.
+ */
+ if (login->first_request) {
+ login_req = (struct iscsi_login_req *)login->req;
+ login->leading_connection = (!login_req->tsih) ? 1 : 0;
+ login->current_stage = ISCSI_LOGIN_CURRENT_STAGE(
+ login_req->flags);
+ login->version_min = login_req->min_version;
+ login->version_max = login_req->max_version;
+ memcpy(login->isid, login_req->isid, 6);
+ login->cmd_sn = be32_to_cpu(login_req->cmdsn);
+ login->init_task_tag = login_req->itt;
+ login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
+ login->cid = be16_to_cpu(login_req->cid);
+ login->tsih = be16_to_cpu(login_req->tsih);
+ }
+
+ if (iscsi_target_check_login_request(conn, login) < 0)
+ return -1;
+
+ memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
+ skb_copy_bits(csk->skb, pdu_cb->doffset, login->req_buf, pdu_cb->dlen);
+
+ return 0;
+}
+
+static int
+cxgbit_process_iscsi_pdu(struct cxgbit_sock *csk, struct sk_buff *skb, int idx)
+{
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, idx);
+ int ret;
+
+ cxgbit_rx_pdu_cb(skb) = pdu_cb;
+
+ csk->skb = skb;
+
+ if (!test_bit(CSK_LOGIN_DONE, &csk->com.flags)) {
+ ret = cxgbit_rx_login_pdu(csk);
+ set_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
+ } else {
+ ret = cxgbit_rx_opcode(csk);
+ }
+
+ return ret;
+}
+
+static void cxgbit_lro_skb_dump(struct sk_buff *skb)
+{
+ struct skb_shared_info *ssi = skb_shinfo(skb);
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
+ u8 i;
+
+ pr_info("skb 0x%p, head 0x%p, 0x%p, len %u,%u, frags %u.\n",
+ skb, skb->head, skb->data, skb->len, skb->data_len,
+ ssi->nr_frags);
+ pr_info("skb 0x%p, lro_cb, csk 0x%p, pdu %u, %u.\n",
+ skb, lro_cb->csk, lro_cb->pdu_idx, lro_cb->pdu_totallen);
+
+ for (i = 0; i < lro_cb->pdu_idx; i++, pdu_cb++)
+ pr_info("skb 0x%p, pdu %d, %u, f 0x%x, seq 0x%x, dcrc 0x%x, "
+ "frags %u.\n",
+ skb, i, pdu_cb->pdulen, pdu_cb->flags, pdu_cb->seq,
+ pdu_cb->ddigest, pdu_cb->frags);
+ for (i = 0; i < ssi->nr_frags; i++)
+ pr_info("skb 0x%p, frag %d, off %u, sz %u.\n",
+ skb, i, skb_frag_off(&ssi->frags[i]),
+ skb_frag_size(&ssi->frags[i]));
+}
+
+static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb = csk->lro_hskb;
+ struct skb_shared_info *ssi = skb_shinfo(skb);
+ u8 i;
+
+ memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
+ for (i = 0; i < ssi->nr_frags; i++)
+ put_page(skb_frag_page(&ssi->frags[i]));
+ ssi->nr_frags = 0;
+ skb->data_len = 0;
+ skb->truesize -= skb->len;
+ skb->len = 0;
+}
+
+static void
+cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx)
+{
+ struct sk_buff *hskb = csk->lro_hskb;
+ struct cxgbit_lro_pdu_cb *hpdu_cb = cxgbit_skb_lro_pdu_cb(hskb, 0);
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, pdu_idx);
+ struct skb_shared_info *hssi = skb_shinfo(hskb);
+ struct skb_shared_info *ssi = skb_shinfo(skb);
+ unsigned int len = 0;
+
+ if (pdu_cb->flags & PDUCBF_RX_HDR) {
+ u8 hfrag_idx = hssi->nr_frags;
+
+ hpdu_cb->flags |= pdu_cb->flags;
+ hpdu_cb->seq = pdu_cb->seq;
+ hpdu_cb->hdr = pdu_cb->hdr;
+ hpdu_cb->hlen = pdu_cb->hlen;
+
+ memcpy(&hssi->frags[hfrag_idx], &ssi->frags[pdu_cb->hfrag_idx],
+ sizeof(skb_frag_t));
+
+ get_page(skb_frag_page(&hssi->frags[hfrag_idx]));
+ hssi->nr_frags++;
+ hpdu_cb->frags++;
+ hpdu_cb->hfrag_idx = hfrag_idx;
+
+ len = skb_frag_size(&hssi->frags[hfrag_idx]);
+ hskb->len += len;
+ hskb->data_len += len;
+ hskb->truesize += len;
+ }
+
+ if (pdu_cb->flags & PDUCBF_RX_DATA) {
+ u8 dfrag_idx = hssi->nr_frags, i;
+
+ hpdu_cb->flags |= pdu_cb->flags;
+ hpdu_cb->dfrag_idx = dfrag_idx;
+
+ len = 0;
+ for (i = 0; i < pdu_cb->nr_dfrags; dfrag_idx++, i++) {
+ memcpy(&hssi->frags[dfrag_idx],
+ &ssi->frags[pdu_cb->dfrag_idx + i],
+ sizeof(skb_frag_t));
+
+ get_page(skb_frag_page(&hssi->frags[dfrag_idx]));
+
+ len += skb_frag_size(&hssi->frags[dfrag_idx]);
+
+ hssi->nr_frags++;
+ hpdu_cb->frags++;
+ }
+
+ hpdu_cb->dlen = pdu_cb->dlen;
+ hpdu_cb->doffset = hpdu_cb->hlen;
+ hpdu_cb->nr_dfrags = pdu_cb->nr_dfrags;
+ hskb->len += len;
+ hskb->data_len += len;
+ hskb->truesize += len;
+ }
+
+ if (pdu_cb->flags & PDUCBF_RX_STATUS) {
+ hpdu_cb->flags |= pdu_cb->flags;
+
+ if (hpdu_cb->flags & PDUCBF_RX_DATA)
+ hpdu_cb->flags &= ~PDUCBF_RX_DATA_DDPD;
+
+ hpdu_cb->ddigest = pdu_cb->ddigest;
+ hpdu_cb->pdulen = pdu_cb->pdulen;
+ }
+}
+
+static int cxgbit_process_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
+ u8 pdu_idx = 0, last_idx = 0;
+ int ret = 0;
+
+ if (!pdu_cb->complete) {
+ cxgbit_lro_skb_merge(csk, skb, 0);
+
+ if (pdu_cb->flags & PDUCBF_RX_STATUS) {
+ struct sk_buff *hskb = csk->lro_hskb;
+
+ ret = cxgbit_process_iscsi_pdu(csk, hskb, 0);
+
+ cxgbit_lro_hskb_reset(csk);
+
+ if (ret < 0)
+ goto out;
+ }
+
+ pdu_idx = 1;
+ }
+
+ if (lro_cb->pdu_idx)
+ last_idx = lro_cb->pdu_idx - 1;
+
+ for (; pdu_idx <= last_idx; pdu_idx++) {
+ ret = cxgbit_process_iscsi_pdu(csk, skb, pdu_idx);
+ if (ret < 0)
+ goto out;
+ }
+
+ if ((!lro_cb->complete) && lro_cb->pdu_idx)
+ cxgbit_lro_skb_merge(csk, skb, lro_cb->pdu_idx);
+
+out:
+ return ret;
+}
+
+static int cxgbit_t5_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
+ int ret = -1;
+
+ if ((pdu_cb->flags & PDUCBF_RX_HDR) &&
+ (pdu_cb->seq != csk->rcv_nxt)) {
+ pr_info("csk 0x%p, tid 0x%x, seq 0x%x != 0x%x.\n",
+ csk, csk->tid, pdu_cb->seq, csk->rcv_nxt);
+ cxgbit_lro_skb_dump(skb);
+ return ret;
+ }
+
+ csk->rcv_nxt += lro_cb->pdu_totallen;
+
+ ret = cxgbit_process_lro_skb(csk, skb);
+
+ csk->rx_credits += lro_cb->pdu_totallen;
+
+ if (csk->rx_credits >= (csk->rcv_win / 4))
+ cxgbit_rx_data_ack(csk);
+
+ return ret;
+}
+
+static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ int ret;
+
+ ret = cxgbit_process_lro_skb(csk, skb);
+ if (ret)
+ return ret;
+
+ csk->rx_credits += lro_cb->pdu_totallen;
+ if (csk->rx_credits >= csk->rcv_win) {
+ csk->rx_credits = 0;
+ cxgbit_rx_data_ack(csk);
+ }
+
+ return 0;
+}
+
+static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
+ int ret = -1;
+
+ if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO)) {
+ if (is_t5(lldi->adapter_type))
+ ret = cxgbit_t5_rx_lro_skb(csk, skb);
+ else
+ ret = cxgbit_rx_lro_skb(csk, skb);
+ }
+
+ __kfree_skb(skb);
+ return ret;
+}
+
+static bool cxgbit_rxq_len(struct cxgbit_sock *csk, struct sk_buff_head *rxq)
+{
+ spin_lock_bh(&csk->rxq.lock);
+ if (skb_queue_len(&csk->rxq)) {
+ skb_queue_splice_init(&csk->rxq, rxq);
+ spin_unlock_bh(&csk->rxq.lock);
+ return true;
+ }
+ spin_unlock_bh(&csk->rxq.lock);
+ return false;
+}
+
+static int cxgbit_wait_rxq(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+ struct sk_buff_head rxq;
+
+ skb_queue_head_init(&rxq);
+
+ wait_event_interruptible(csk->waitq, cxgbit_rxq_len(csk, &rxq));
+
+ if (signal_pending(current))
+ goto out;
+
+ while ((skb = __skb_dequeue(&rxq))) {
+ if (cxgbit_rx_skb(csk, skb))
+ goto out;
+ }
+
+ return 0;
+out:
+ __skb_queue_purge(&rxq);
+ return -1;
+}
+
+int cxgbit_get_login_rx(struct iscsit_conn *conn, struct iscsi_login *login)
+{
+ struct cxgbit_sock *csk = conn->context;
+ int ret = -1;
+
+ while (!test_and_clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags)) {
+ ret = cxgbit_wait_rxq(csk);
+ if (ret) {
+ clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+void cxgbit_get_rx_pdu(struct iscsit_conn *conn)
+{
+ struct cxgbit_sock *csk = conn->context;
+
+ while (!kthread_should_stop()) {
+ iscsit_thread_check_cpumask(conn, current, 0);
+ if (cxgbit_wait_rxq(csk))
+ return;
+ }
+}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index f73da43cdf9e..a2dde08c8a62 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1,42 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains main functions related to the iSCSI Target Core Driver.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
******************************************************************************/
+#include <linux/crc32c.h>
#include <linux/string.h>
#include <linux/kthread.h>
-#include <linux/crypto.h>
#include <linux/completion.h>
#include <linux/module.h>
+#include <linux/vmalloc.h>
#include <linux/idr.h>
-#include <asm/unaligned.h>
-#include <scsi/scsi_device.h>
+#include <linux/delay.h>
+#include <linux/sched/signal.h>
+#include <linux/unaligned.h>
+#include <linux/inet.h>
+#include <net/ipv6.h>
+#include <scsi/scsi_proto.h>
#include <scsi/iscsi_proto.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
-#include "iscsi_target_core.h"
+#include <target/target_core_backend.h>
+#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h"
#include "iscsi_target_seq_pdu_list.h"
-#include "iscsi_target_tq.h"
-#include "iscsi_target_configfs.h"
#include "iscsi_target_datain_values.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl1.h"
@@ -47,29 +40,27 @@
#include "iscsi_target_util.h"
#include "iscsi_target.h"
#include "iscsi_target_device.h"
-#include "iscsi_target_stat.h"
+#include <target/iscsi/iscsi_target_stat.h>
#include <target/iscsi/iscsi_transport.h>
static LIST_HEAD(g_tiqn_list);
static LIST_HEAD(g_np_list);
static DEFINE_SPINLOCK(tiqn_lock);
-static DEFINE_SPINLOCK(np_lock);
+static DEFINE_MUTEX(np_lock);
static struct idr tiqn_idr;
-struct idr sess_idr;
+DEFINE_IDA(sess_ida);
struct mutex auth_id_lock;
-spinlock_t sess_idr_lock;
struct iscsit_global *iscsit_global;
-struct kmem_cache *lio_cmd_cache;
struct kmem_cache *lio_qr_cache;
struct kmem_cache *lio_dr_cache;
struct kmem_cache *lio_ooo_cache;
struct kmem_cache *lio_r2t_cache;
-static int iscsit_handle_immediate_data(struct iscsi_cmd *,
+static int iscsit_handle_immediate_data(struct iscsit_cmd *,
struct iscsi_scsi_req *, u32);
struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
@@ -130,11 +121,9 @@ struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
return ERR_PTR(-EINVAL);
}
- tiqn = kzalloc(sizeof(struct iscsi_tiqn), GFP_KERNEL);
- if (!tiqn) {
- pr_err("Unable to allocate struct iscsi_tiqn\n");
+ tiqn = kzalloc(sizeof(*tiqn), GFP_KERNEL);
+ if (!tiqn)
return ERR_PTR(-ENOMEM);
- }
sprintf(tiqn->tiqn, "%s", buf);
INIT_LIST_HEAD(&tiqn->tiqn_list);
@@ -220,11 +209,6 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
spin_unlock_bh(&np->np_thread_lock);
return -1;
}
- if (np->np_login_tpg) {
- pr_err("np->np_login_tpg() is not NULL!\n");
- spin_unlock_bh(&np->np_thread_lock);
- return -1;
- }
spin_unlock_bh(&np->np_thread_lock);
/*
* Determine if the portal group is accepting storage traffic.
@@ -239,26 +223,38 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
/*
* Here we serialize access across the TIQN+TPG Tuple.
*/
- ret = mutex_lock_interruptible(&tpg->np_login_lock);
- if ((ret != 0) || signal_pending(current))
+ ret = down_interruptible(&tpg->np_login_sem);
+ if (ret != 0)
return -1;
- spin_lock_bh(&np->np_thread_lock);
- np->np_login_tpg = tpg;
- spin_unlock_bh(&np->np_thread_lock);
+ spin_lock_bh(&tpg->tpg_state_lock);
+ if (tpg->tpg_state != TPG_STATE_ACTIVE) {
+ spin_unlock_bh(&tpg->tpg_state_lock);
+ up(&tpg->np_login_sem);
+ return -1;
+ }
+ spin_unlock_bh(&tpg->tpg_state_lock);
return 0;
}
-int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
+void iscsit_login_kref_put(struct kref *kref)
+{
+ struct iscsi_tpg_np *tpg_np = container_of(kref,
+ struct iscsi_tpg_np, tpg_np_kref);
+
+ complete(&tpg_np->tpg_np_comp);
+}
+
+int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg,
+ struct iscsi_tpg_np *tpg_np)
{
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
- spin_lock_bh(&np->np_thread_lock);
- np->np_login_tpg = NULL;
- spin_unlock_bh(&np->np_thread_lock);
+ up(&tpg->np_login_sem);
- mutex_unlock(&tpg->np_login_lock);
+ if (tpg_np)
+ kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
if (tiqn)
iscsit_put_tiqn_for_login(tiqn);
@@ -267,14 +263,14 @@ int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
}
bool iscsit_check_np_match(
- struct __kernel_sockaddr_storage *sockaddr,
+ struct sockaddr_storage *sockaddr,
struct iscsi_np *np,
int network_transport)
{
struct sockaddr_in *sock_in, *sock_in_e;
struct sockaddr_in6 *sock_in6, *sock_in6_e;
bool ip_match = false;
- u16 port;
+ u16 port, port_e;
if (sockaddr->ss_family == AF_INET6) {
sock_in6 = (struct sockaddr_in6 *)sockaddr;
@@ -286,6 +282,7 @@ bool iscsit_check_np_match(
ip_match = true;
port = ntohs(sock_in6->sin6_port);
+ port_e = ntohs(sock_in6_e->sin6_port);
} else {
sock_in = (struct sockaddr_in *)sockaddr;
sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;
@@ -294,9 +291,10 @@ bool iscsit_check_np_match(
ip_match = true;
port = ntohs(sock_in->sin_port);
+ port_e = ntohs(sock_in_e->sin_port);
}
- if ((ip_match == true) && (np->np_port == port) &&
+ if (ip_match && (port_e == port) &&
(np->np_network_transport == network_transport))
return true;
@@ -304,72 +302,63 @@ bool iscsit_check_np_match(
}
static struct iscsi_np *iscsit_get_np(
- struct __kernel_sockaddr_storage *sockaddr,
+ struct sockaddr_storage *sockaddr,
int network_transport)
{
struct iscsi_np *np;
bool match;
- spin_lock_bh(&np_lock);
+ lockdep_assert_held(&np_lock);
+
list_for_each_entry(np, &g_np_list, np_list) {
- spin_lock(&np->np_thread_lock);
+ spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
- spin_unlock(&np->np_thread_lock);
+ spin_unlock_bh(&np->np_thread_lock);
continue;
}
match = iscsit_check_np_match(sockaddr, np, network_transport);
- if (match == true) {
+ if (match) {
/*
* Increment the np_exports reference count now to
* prevent iscsit_del_np() below from being called
* while iscsi_tpg_add_network_portal() is called.
*/
np->np_exports++;
- spin_unlock(&np->np_thread_lock);
- spin_unlock_bh(&np_lock);
+ spin_unlock_bh(&np->np_thread_lock);
return np;
}
- spin_unlock(&np->np_thread_lock);
+ spin_unlock_bh(&np->np_thread_lock);
}
- spin_unlock_bh(&np_lock);
return NULL;
}
struct iscsi_np *iscsit_add_np(
- struct __kernel_sockaddr_storage *sockaddr,
- char *ip_str,
+ struct sockaddr_storage *sockaddr,
int network_transport)
{
- struct sockaddr_in *sock_in;
- struct sockaddr_in6 *sock_in6;
struct iscsi_np *np;
int ret;
+
+ mutex_lock(&np_lock);
+
/*
* Locate the existing struct iscsi_np if already active..
*/
np = iscsit_get_np(sockaddr, network_transport);
- if (np)
+ if (np) {
+ mutex_unlock(&np_lock);
return np;
+ }
- np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
+ np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np) {
- pr_err("Unable to allocate memory for struct iscsi_np\n");
+ mutex_unlock(&np_lock);
return ERR_PTR(-ENOMEM);
}
np->np_flags |= NPF_IP_NETWORK;
- if (sockaddr->ss_family == AF_INET6) {
- sock_in6 = (struct sockaddr_in6 *)sockaddr;
- snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str);
- np->np_port = ntohs(sock_in6->sin6_port);
- } else {
- sock_in = (struct sockaddr_in *)sockaddr;
- sprintf(np->np_ip, "%s", ip_str);
- np->np_port = ntohs(sock_in->sin_port);
- }
-
np->np_network_transport = network_transport;
spin_lock_init(&np->np_thread_lock);
init_completion(&np->np_restart_comp);
@@ -378,6 +367,7 @@ struct iscsi_np *iscsit_add_np(
ret = iscsi_target_setup_login_socket(np, sockaddr);
if (ret != 0) {
kfree(np);
+ mutex_unlock(&np_lock);
return ERR_PTR(ret);
}
@@ -386,6 +376,7 @@ struct iscsi_np *iscsit_add_np(
pr_err("Unable to create kthread: iscsi_np\n");
ret = PTR_ERR(np->np_thread);
kfree(np);
+ mutex_unlock(&np_lock);
return ERR_PTR(ret);
}
/*
@@ -396,13 +387,13 @@ struct iscsi_np *iscsit_add_np(
* point because iscsi_np has not been added to g_np_list yet.
*/
np->np_exports = 1;
+ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
- spin_lock_bh(&np_lock);
list_add_tail(&np->np_list, &g_np_list);
- spin_unlock_bh(&np_lock);
+ mutex_unlock(&np_lock);
- pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
- np->np_ip, np->np_port, np->np_transport->name);
+ pr_debug("CORE[0] - Added Network Portal: %pISpc on %s\n",
+ &np->np_sockaddr, np->np_transport->name);
return np;
}
@@ -410,25 +401,16 @@ struct iscsi_np *iscsit_add_np(
int iscsit_reset_np_thread(
struct iscsi_np *np,
struct iscsi_tpg_np *tpg_np,
- struct iscsi_portal_group *tpg)
+ struct iscsi_portal_group *tpg,
+ bool shutdown)
{
spin_lock_bh(&np->np_thread_lock);
- if (tpg && tpg_np) {
- /*
- * The reset operation need only be performed when the
- * passed struct iscsi_portal_group has a login in progress
- * to one of the network portals.
- */
- if (tpg_np->tpg_np->np_login_tpg != tpg) {
- spin_unlock_bh(&np->np_thread_lock);
- return 0;
- }
- }
if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
spin_unlock_bh(&np->np_thread_lock);
return 0;
}
np->np_thread_state = ISCSI_NP_THREAD_RESET;
+ atomic_inc(&np->np_reset_count);
if (np->np_thread) {
spin_unlock_bh(&np->np_thread_lock);
@@ -438,6 +420,12 @@ int iscsit_reset_np_thread(
}
spin_unlock_bh(&np->np_thread_lock);
+ if (tpg_np && shutdown) {
+ kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
+
+ wait_for_completion(&tpg_np->tpg_np_comp);
+ }
+
return 0;
}
@@ -452,6 +440,7 @@ int iscsit_del_np(struct iscsi_np *np)
spin_lock_bh(&np->np_thread_lock);
np->np_exports--;
if (np->np_exports) {
+ np->enabled = true;
spin_unlock_bh(&np->np_thread_lock);
return 0;
}
@@ -465,39 +454,208 @@ int iscsit_del_np(struct iscsi_np *np)
*/
send_sig(SIGINT, np->np_thread, 1);
kthread_stop(np->np_thread);
+ np->np_thread = NULL;
}
np->np_transport->iscsit_free_np(np);
- spin_lock_bh(&np_lock);
+ mutex_lock(&np_lock);
list_del(&np->np_list);
- spin_unlock_bh(&np_lock);
+ mutex_unlock(&np_lock);
- pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
- np->np_ip, np->np_port, np->np_transport->name);
+ pr_debug("CORE[0] - Removed Network Portal: %pISpc on %s\n",
+ &np->np_sockaddr, np->np_transport->name);
iscsit_put_transport(np->np_transport);
kfree(np);
return 0;
}
-static int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
-static int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
+static void iscsit_get_rx_pdu(struct iscsit_conn *);
+
+int iscsit_queue_rsp(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
+{
+ return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+}
+EXPORT_SYMBOL(iscsit_queue_rsp);
-static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+void iscsit_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
{
- iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+ spin_lock_bh(&conn->cmd_lock);
+ if (!list_empty(&cmd->i_conn_node))
+ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+
+ __iscsit_free_cmd(cmd, true);
+}
+EXPORT_SYMBOL(iscsit_aborted_task);
+
+static u32 iscsit_crc_buf(const void *buf, u32 payload_length,
+ u32 padding, const void *pad_bytes);
+static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *);
+
+static int
+iscsit_xmit_nondatain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
+ const void *data_buf, u32 data_buf_len)
+{
+ struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu;
+ struct kvec *iov;
+ u32 niov = 0, tx_size = ISCSI_HDR_LEN;
+ int ret;
+
+ iov = &cmd->iov_misc[0];
+ iov[niov].iov_base = cmd->pdu;
+ iov[niov++].iov_len = ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ *header_digest = iscsit_crc_buf(hdr, ISCSI_HDR_LEN, 0, NULL);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32C HeaderDigest"
+ " to opcode 0x%x 0x%08x\n",
+ hdr->opcode, *header_digest);
+ }
+
+ if (data_buf_len) {
+ u32 padding = ((-data_buf_len) & 3);
+
+ iov[niov].iov_base = (void *)data_buf;
+ iov[niov++].iov_len = data_buf_len;
+ tx_size += data_buf_len;
+
+ if (padding != 0) {
+ iov[niov].iov_base = &cmd->pad_bytes;
+ iov[niov++].iov_len = padding;
+ tx_size += padding;
+ pr_debug("Attaching %u additional"
+ " padding bytes.\n", padding);
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ cmd->data_crc = iscsit_crc_buf(data_buf, data_buf_len,
+ padding,
+ &cmd->pad_bytes);
+ iov[niov].iov_base = &cmd->data_crc;
+ iov[niov++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attached DataDigest for %u"
+ " bytes opcode 0x%x, CRC 0x%08x\n",
+ data_buf_len, hdr->opcode, cmd->data_crc);
+ }
+ }
+
+ cmd->iov_misc_count = niov;
+ cmd->tx_size = tx_size;
+
+ ret = iscsit_send_tx_data(cmd, conn, 1);
+ if (ret < 0) {
+ iscsit_tx_thread_wait_for_tcp(conn);
+ return ret;
+ }
+
return 0;
}
+static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
+ u32 data_offset, u32 data_length);
+static void iscsit_unmap_iovec(struct iscsit_cmd *);
+static u32 iscsit_crc_sglist(const struct iscsit_cmd *cmd, u32 data_length,
+ u32 padding, const u8 *pad_bytes);
+static int
+iscsit_xmit_datain_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
+ const struct iscsi_datain *datain)
+{
+ struct kvec *iov;
+ u32 iov_count = 0, tx_size = 0;
+ int ret, iov_ret;
+
+ iov = &cmd->iov_data[0];
+ iov[iov_count].iov_base = cmd->pdu;
+ iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+ tx_size += ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ *header_digest = iscsit_crc_buf(cmd->pdu, ISCSI_HDR_LEN, 0,
+ NULL);
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+
+ pr_debug("Attaching CRC32 HeaderDigest for DataIN PDU 0x%08x\n",
+ *header_digest);
+ }
+
+ iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[iov_count],
+ cmd->orig_iov_data_count - (iov_count + 2),
+ datain->offset, datain->length);
+ if (iov_ret < 0)
+ return -1;
+
+ iov_count += iov_ret;
+ tx_size += datain->length;
+
+ cmd->padding = ((-datain->length) & 3);
+ if (cmd->padding) {
+ iov[iov_count].iov_base = cmd->pad_bytes;
+ iov[iov_count++].iov_len = cmd->padding;
+ tx_size += cmd->padding;
+
+ pr_debug("Attaching %u padding bytes\n", cmd->padding);
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ cmd->data_crc = iscsit_crc_sglist(cmd, datain->length,
+ cmd->padding, cmd->pad_bytes);
+ iov[iov_count].iov_base = &cmd->data_crc;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+
+ pr_debug("Attached CRC32C DataDigest %d bytes, crc 0x%08x\n",
+ datain->length + cmd->padding, cmd->data_crc);
+ }
+
+ cmd->iov_data_count = iov_count;
+ cmd->tx_size = tx_size;
+
+ ret = iscsit_fe_sendpage_sg(cmd, conn);
+
+ iscsit_unmap_iovec(cmd);
+
+ if (ret < 0) {
+ iscsit_tx_thread_wait_for_tcp(conn);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int iscsit_xmit_pdu(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
+ struct iscsi_datain_req *dr, const void *buf,
+ u32 buf_len)
+{
+ if (dr)
+ return iscsit_xmit_datain_pdu(conn, cmd, buf);
+ else
+ return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
+}
+
+static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsit_conn *conn)
+{
+ return TARGET_PROT_NORMAL;
+}
+
static struct iscsit_transport iscsi_target_transport = {
.name = "iSCSI/TCP",
.transport_type = ISCSI_TCP,
+ .rdma_shutdown = false,
.owner = NULL,
.iscsit_setup_np = iscsit_setup_np,
.iscsit_accept_np = iscsit_accept_np,
.iscsit_free_np = iscsit_free_np,
- .iscsit_alloc_cmd = iscsit_alloc_cmd,
.iscsit_get_login_rx = iscsit_get_login_rx,
.iscsit_put_login_tx = iscsit_put_login_tx,
.iscsit_get_dataout = iscsit_build_r2ts_for_cmd,
@@ -505,55 +663,47 @@ static struct iscsit_transport iscsi_target_transport = {
.iscsit_response_queue = iscsit_response_queue,
.iscsit_queue_data_in = iscsit_queue_rsp,
.iscsit_queue_status = iscsit_queue_rsp,
+ .iscsit_aborted_task = iscsit_aborted_task,
+ .iscsit_xmit_pdu = iscsit_xmit_pdu,
+ .iscsit_get_rx_pdu = iscsit_get_rx_pdu,
+ .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
};
static int __init iscsi_target_init_module(void)
{
- int ret = 0;
+ int ret = 0, size;
pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
-
- iscsit_global = kzalloc(sizeof(struct iscsit_global), GFP_KERNEL);
- if (!iscsit_global) {
- pr_err("Unable to allocate memory for iscsit_global\n");
+ iscsit_global = kzalloc(sizeof(*iscsit_global), GFP_KERNEL);
+ if (!iscsit_global)
return -1;
- }
+
+ spin_lock_init(&iscsit_global->ts_bitmap_lock);
mutex_init(&auth_id_lock);
- spin_lock_init(&sess_idr_lock);
idr_init(&tiqn_idr);
- idr_init(&sess_idr);
- ret = iscsi_target_register_configfs();
- if (ret < 0)
+ ret = target_register_template(&iscsi_ops);
+ if (ret)
goto out;
- ret = iscsi_thread_set_init();
- if (ret < 0)
+ size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
+ iscsit_global->ts_bitmap = vzalloc(size);
+ if (!iscsit_global->ts_bitmap)
goto configfs_out;
- if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
- TARGET_THREAD_SET_COUNT) {
- pr_err("iscsi_allocate_thread_sets() returned"
- " unexpected value!\n");
- goto ts_out1;
- }
-
- lio_cmd_cache = kmem_cache_create("lio_cmd_cache",
- sizeof(struct iscsi_cmd), __alignof__(struct iscsi_cmd),
- 0, NULL);
- if (!lio_cmd_cache) {
- pr_err("Unable to kmem_cache_create() for"
- " lio_cmd_cache\n");
- goto ts_out2;
+ if (!zalloc_cpumask_var(&iscsit_global->allowed_cpumask, GFP_KERNEL)) {
+ pr_err("Unable to allocate iscsit_global->allowed_cpumask\n");
+ goto bitmap_out;
}
+ cpumask_setall(iscsit_global->allowed_cpumask);
lio_qr_cache = kmem_cache_create("lio_qr_cache",
sizeof(struct iscsi_queue_req),
__alignof__(struct iscsi_queue_req), 0, NULL);
if (!lio_qr_cache) {
- pr_err("nable to kmem_cache_create() for"
+ pr_err("Unable to kmem_cache_create() for"
" lio_qr_cache\n");
- goto cmd_out;
+ goto cpumask_out;
}
lio_dr_cache = kmem_cache_create("lio_dr_cache",
@@ -590,6 +740,7 @@ static int __init iscsi_target_init_module(void)
return ret;
r2t_out:
+ iscsit_unregister_transport(&iscsi_target_transport);
kmem_cache_destroy(lio_r2t_cache);
ooo_out:
kmem_cache_destroy(lio_ooo_cache);
@@ -597,14 +748,15 @@ dr_out:
kmem_cache_destroy(lio_dr_cache);
qr_out:
kmem_cache_destroy(lio_qr_cache);
-cmd_out:
- kmem_cache_destroy(lio_cmd_cache);
-ts_out2:
- iscsi_deallocate_thread_sets();
-ts_out1:
- iscsi_thread_set_free();
+cpumask_out:
+ free_cpumask_var(iscsit_global->allowed_cpumask);
+bitmap_out:
+ vfree(iscsit_global->ts_bitmap);
configfs_out:
- iscsi_target_deregister_configfs();
+ /* XXX: this probably wants it to be it's own unwind step.. */
+ if (iscsit_global->discovery_tpg)
+ iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
+ target_unregister_template(&iscsi_ops);
out:
kfree(iscsit_global);
return -ENOMEM;
@@ -612,29 +764,34 @@ out:
static void __exit iscsi_target_cleanup_module(void)
{
- iscsi_deallocate_thread_sets();
- iscsi_thread_set_free();
iscsit_release_discovery_tpg();
iscsit_unregister_transport(&iscsi_target_transport);
- kmem_cache_destroy(lio_cmd_cache);
kmem_cache_destroy(lio_qr_cache);
kmem_cache_destroy(lio_dr_cache);
kmem_cache_destroy(lio_ooo_cache);
kmem_cache_destroy(lio_r2t_cache);
- iscsi_target_deregister_configfs();
+ /*
+ * Shutdown discovery sessions and disable discovery TPG
+ */
+ if (iscsit_global->discovery_tpg)
+ iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
+
+ target_unregister_template(&iscsi_ops);
+ free_cpumask_var(iscsit_global->allowed_cpumask);
+ vfree(iscsit_global->ts_bitmap);
kfree(iscsit_global);
}
-static int iscsit_add_reject(
- struct iscsi_conn *conn,
+int iscsit_add_reject(
+ struct iscsit_conn *conn,
u8 reason,
unsigned char *buf)
{
- struct iscsi_cmd *cmd;
+ struct iscsit_cmd *cmd;
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
return -1;
@@ -657,14 +814,16 @@ static int iscsit_add_reject(
return -1;
}
+EXPORT_SYMBOL(iscsit_add_reject);
static int iscsit_add_reject_from_cmd(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
u8 reason,
bool add_to_conn,
unsigned char *buf)
{
- struct iscsi_conn *conn;
+ struct iscsit_conn *conn;
+ const bool do_put = cmd->se_cmd.se_tfo != NULL;
if (!cmd->conn) {
pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
@@ -695,49 +854,62 @@ static int iscsit_add_reject_from_cmd(
* Perform the kref_put now if se_cmd has already been setup by
* scsit_setup_scsi_cmd()
*/
- if (cmd->se_cmd.se_tfo != NULL) {
+ if (do_put) {
pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ target_put_sess_cmd(&cmd->se_cmd);
}
return -1;
}
-static int iscsit_add_reject_cmd(struct iscsi_cmd *cmd, u8 reason,
+static int iscsit_add_reject_cmd(struct iscsit_cmd *cmd, u8 reason,
unsigned char *buf)
{
return iscsit_add_reject_from_cmd(cmd, reason, true, buf);
}
-int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8 reason, unsigned char *buf)
+int iscsit_reject_cmd(struct iscsit_cmd *cmd, u8 reason, unsigned char *buf)
{
return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
}
+EXPORT_SYMBOL(iscsit_reject_cmd);
/*
* Map some portion of the allocated scatterlist to an iovec, suitable for
* kernel sockets to copy data in/out.
*/
-static int iscsit_map_iovec(
- struct iscsi_cmd *cmd,
- struct kvec *iov,
- u32 data_offset,
- u32 data_length)
+static int iscsit_map_iovec(struct iscsit_cmd *cmd, struct kvec *iov, int nvec,
+ u32 data_offset, u32 data_length)
{
- u32 i = 0;
+ u32 i = 0, orig_data_length = data_length;
struct scatterlist *sg;
unsigned int page_off;
/*
* We know each entry in t_data_sg contains a page.
*/
- sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
+ u32 ent = data_offset / PAGE_SIZE;
+
+ if (!data_length)
+ return 0;
+
+ if (ent >= cmd->se_cmd.t_data_nents) {
+ pr_err("Initial page entry out-of-bounds\n");
+ goto overflow;
+ }
+
+ sg = &cmd->se_cmd.t_data_sg[ent];
page_off = (data_offset % PAGE_SIZE);
cmd->first_data_sg = sg;
cmd->first_data_sg_off = page_off;
while (data_length) {
- u32 cur_len = min_t(u32, data_length, sg->length - page_off);
+ u32 cur_len;
+
+ if (WARN_ON_ONCE(!sg || i >= nvec))
+ goto overflow;
+
+ cur_len = min_t(u32, data_length, sg->length - page_off);
iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
iov[i].iov_len = cur_len;
@@ -751,9 +923,19 @@ static int iscsit_map_iovec(
cmd->kmapped_nents = i;
return i;
+
+overflow:
+ pr_err("offset %d + length %d overflow; %d/%d; sg-list:\n",
+ data_offset, orig_data_length, i, nvec);
+ for_each_sg(cmd->se_cmd.t_data_sg, sg,
+ cmd->se_cmd.t_data_nents, i) {
+ pr_err("[%d] off %d len %d\n",
+ i, sg->offset, sg->length);
+ }
+ return -1;
}
-static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
+static void iscsit_unmap_iovec(struct iscsit_cmd *cmd)
{
u32 i;
struct scatterlist *sg;
@@ -764,9 +946,10 @@ static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
kunmap(sg_page(&sg[i]));
}
-static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
+static void iscsit_ack_from_expstatsn(struct iscsit_conn *conn, u32 exp_statsn)
{
- struct iscsi_cmd *cmd;
+ LIST_HEAD(ack_list);
+ struct iscsit_cmd *cmd, *cmd_p;
conn->exp_statsn = exp_statsn;
@@ -774,53 +957,49 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
return;
spin_lock_bh(&conn->cmd_lock);
- list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
+ list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
spin_lock(&cmd->istate_lock);
if ((cmd->i_state == ISTATE_SENT_STATUS) &&
iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
cmd->i_state = ISTATE_REMOVE;
spin_unlock(&cmd->istate_lock);
- iscsit_add_cmd_to_immediate_queue(cmd, conn,
- cmd->i_state);
+ list_move_tail(&cmd->i_conn_node, &ack_list);
continue;
}
spin_unlock(&cmd->istate_lock);
}
spin_unlock_bh(&conn->cmd_lock);
+
+ list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
+ list_del_init(&cmd->i_conn_node);
+ iscsit_free_cmd(cmd, false);
+ }
}
-static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
+static int iscsit_allocate_iovecs(struct iscsit_cmd *cmd)
{
u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
iov_count += ISCSI_IOV_DATA_BUFFER;
-
- cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL);
- if (!cmd->iov_data) {
- pr_err("Unable to allocate cmd->iov_data\n");
+ cmd->iov_data = kcalloc(iov_count, sizeof(*cmd->iov_data), GFP_KERNEL);
+ if (!cmd->iov_data)
return -ENOMEM;
- }
cmd->orig_iov_data_count = iov_count;
return 0;
}
-int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char *buf)
{
int data_direction, payload_length;
+ struct iscsi_ecdb_ahdr *ecdb_ahdr;
struct iscsi_scsi_req *hdr;
int iscsi_task_attr;
+ unsigned char *cdb;
int sam_task_attr;
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->cmd_pdus++;
- if (conn->sess->se_sess->se_node_acl) {
- spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
- conn->sess->se_sess->se_node_acl->num_cmds++;
- spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
- }
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_inc(&conn->sess->cmd_pdus);
hdr = (struct iscsi_scsi_req *) buf;
payload_length = ntoh24(hdr->dlength);
@@ -838,24 +1017,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
(hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
/*
- * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
- * that adds support for RESERVE/RELEASE. There is a bug
- * add with this new functionality that sets R/W bits when
- * neither CDB carries any READ or WRITE datapayloads.
+ * From RFC-3720 Section 10.3.1:
+ *
+ * "Either or both of R and W MAY be 1 when either the
+ * Expected Data Transfer Length and/or Bidirectional Read
+ * Expected Data Transfer Length are 0"
+ *
+ * For this case, go ahead and clear the unnecssary bits
+ * to avoid any confusion with ->data_direction.
*/
- if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
- hdr->flags &= ~ISCSI_FLAG_CMD_READ;
- hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
- goto done;
- }
+ hdr->flags &= ~ISCSI_FLAG_CMD_READ;
+ hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
- pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
+ pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
" set when Expected Data Transfer Length is 0 for"
- " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
- return iscsit_add_reject_cmd(cmd,
- ISCSI_REASON_BOOKMARK_INVALID, buf);
+ " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
}
-done:
if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
!(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
@@ -920,6 +1097,27 @@ done:
ISCSI_REASON_BOOKMARK_INVALID, buf);
}
+ cdb = hdr->cdb;
+
+ if (hdr->hlength) {
+ ecdb_ahdr = (struct iscsi_ecdb_ahdr *) (hdr + 1);
+ if (ecdb_ahdr->ahstype != ISCSI_AHSTYPE_CDB) {
+ pr_err("Additional Header Segment type %d not supported!\n",
+ ecdb_ahdr->ahstype);
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_CMD_NOT_SUPPORTED, buf);
+ }
+
+ cdb = kmalloc(be16_to_cpu(ecdb_ahdr->ahslength) + 15,
+ GFP_KERNEL);
+ if (cdb == NULL)
+ return iscsit_add_reject_cmd(cmd,
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ memcpy(cdb, hdr->cdb, ISCSI_CDB_SIZE);
+ memcpy(cdb + ISCSI_CDB_SIZE, ecdb_ahdr->ecdb,
+ be16_to_cpu(ecdb_ahdr->ahslength) - 1);
+ }
+
data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
(hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
DMA_NONE;
@@ -931,17 +1129,17 @@ done:
*/
if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
(iscsi_task_attr == ISCSI_ATTR_SIMPLE))
- sam_task_attr = MSG_SIMPLE_TAG;
+ sam_task_attr = TCM_SIMPLE_TAG;
else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
- sam_task_attr = MSG_ORDERED_TAG;
+ sam_task_attr = TCM_ORDERED_TAG;
else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
- sam_task_attr = MSG_HEAD_TAG;
+ sam_task_attr = TCM_HEAD_TAG;
else if (iscsi_task_attr == ISCSI_ATTR_ACA)
- sam_task_attr = MSG_ACA_TAG;
+ sam_task_attr = TCM_ACA_TAG;
else {
pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
- " MSG_SIMPLE_TAG\n", iscsi_task_attr);
- sam_task_attr = MSG_SIMPLE_TAG;
+ " TCM_SIMPLE_TAG\n", iscsi_task_attr);
+ sam_task_attr = TCM_SIMPLE_TAG;
}
cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
@@ -954,13 +1152,9 @@ done:
cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
- if (hdr->flags & ISCSI_FLAG_CMD_READ) {
- spin_lock_bh(&conn->sess->ttt_lock);
- cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
- if (cmd->targ_xfer_tag == 0xFFFFFFFF)
- cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
- spin_unlock_bh(&conn->sess->ttt_lock);
- } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
+ if (hdr->flags & ISCSI_FLAG_CMD_READ)
+ cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
+ else
cmd->targ_xfer_tag = 0xFFFFFFFF;
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
@@ -971,9 +1165,12 @@ done:
struct iscsi_datain_req *dr;
dr = iscsit_allocate_datain_req();
- if (!dr)
+ if (!dr) {
+ if (cdb != hdr->cdb)
+ kfree(cdb);
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ }
iscsit_attach_datain_req(cmd, dr);
}
@@ -981,33 +1178,43 @@ done:
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
- transport_init_se_cmd(&cmd->se_cmd, &lio_target_fabric_configfs->tf_ops,
- conn->sess->se_sess, be32_to_cpu(hdr->data_length),
- cmd->data_direction, sam_task_attr,
- cmd->sense_buffer + 2);
+ __target_init_cmd(&cmd->se_cmd, &iscsi_ops,
+ conn->sess->se_sess, be32_to_cpu(hdr->data_length),
+ cmd->data_direction, sam_task_attr,
+ cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun),
+ conn->cmd_cnt);
pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
conn->cid);
- target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
+ target_get_sess_cmd(&cmd->se_cmd, true);
- cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
- scsilun_to_int(&hdr->lun));
- if (cmd->sense_reason)
- goto attach_cmd;
+ cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
+ cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, cdb,
+ GFP_KERNEL);
+
+ if (cdb != hdr->cdb)
+ kfree(cdb);
- cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
if (cmd->sense_reason) {
if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
return iscsit_add_reject_cmd(cmd,
- ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
goto attach_cmd;
}
+ cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd);
+ if (cmd->sense_reason)
+ goto attach_cmd;
+
+ cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd);
+ if (cmd->sense_reason)
+ goto attach_cmd;
+
if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
@@ -1017,17 +1224,11 @@ attach_cmd:
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
- /*
- * Check if we need to delay processing because of ALUA
- * Active/NonOptimized primary access state..
- */
- core_alua_check_nonop_delay(&cmd->se_cmd);
-
return 0;
}
EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
-void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *cmd)
+void iscsit_set_unsolicited_dataout(struct iscsit_cmd *cmd)
{
iscsit_set_dataout_sequence_values(cmd);
@@ -1035,9 +1236,9 @@ void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *cmd)
iscsit_start_dataout_timer(cmd, cmd->conn);
spin_unlock_bh(&cmd->dataout_timeout_lock);
}
-EXPORT_SYMBOL(iscsit_set_unsoliticed_dataout);
+EXPORT_SYMBOL(iscsit_set_unsolicited_dataout);
-int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+int iscsit_process_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_scsi_req *hdr)
{
int cmdsn_ret = 0;
@@ -1057,7 +1258,7 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
return -1;
else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
}
}
@@ -1069,11 +1270,11 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
*/
if (!cmd->immediate_data) {
if (!cmd->sense_reason && cmd->unsolicited_data)
- iscsit_set_unsoliticed_dataout(cmd);
+ iscsit_set_unsolicited_dataout(cmd);
if (!cmd->sense_reason)
return 0;
- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
}
@@ -1082,13 +1283,8 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* execution. These exceptions are processed in CmdSN order using
* iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
*/
- if (cmd->sense_reason) {
- if (cmd->reject_reason)
- return 0;
-
- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+ if (cmd->sense_reason)
return 1;
- }
/*
* Call directly into transport_generic_new_cmd() to perform
* the backend memory allocation.
@@ -1102,20 +1298,31 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
EXPORT_SYMBOL(iscsit_process_scsi_cmd);
static int
-iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
+iscsit_get_immediate_data(struct iscsit_cmd *cmd, struct iscsi_scsi_req *hdr,
bool dump_payload)
{
- struct iscsi_conn *conn = cmd->conn;
int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+ int rc;
+
/*
* Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
*/
- if (dump_payload == true)
- goto after_immediate_data;
+ if (dump_payload) {
+ u32 length = min(cmd->se_cmd.data_length - cmd->write_data_done,
+ cmd->first_burst_len);
+
+ pr_debug("Dumping min(%d - %d, %d) = %d bytes of immediate data\n",
+ cmd->se_cmd.data_length, cmd->write_data_done,
+ cmd->first_burst_len, length);
+ rc = iscsit_dump_data_payload(cmd->conn, length, 1);
+ pr_debug("Finished dumping immediate data\n");
+ if (rc < 0)
+ immed_ret = IMMEDIATE_DATA_CANNOT_RECOVER;
+ } else {
+ immed_ret = iscsit_handle_immediate_data(cmd, hdr,
+ cmd->first_burst_len);
+ }
- immed_ret = iscsit_handle_immediate_data(cmd, hdr,
- cmd->first_burst_len);
-after_immediate_data:
if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
/*
* A PDU/CmdSN carrying Immediate Data passed
@@ -1124,22 +1331,15 @@ after_immediate_data:
*/
cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd,
(unsigned char *)hdr, hdr->cmdsn);
- if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
return -1;
- } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
- return 0;
- }
- if (cmd->sense_reason) {
- int rc;
+ if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ target_put_sess_cmd(&cmd->se_cmd);
- rc = iscsit_dump_data_payload(cmd->conn,
- cmd->first_burst_len, 1);
- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
- return rc;
+ return 0;
} else if (cmd->unsolicited_data)
- iscsit_set_unsoliticed_dataout(cmd);
+ iscsit_set_unsolicited_dataout(cmd);
} else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
/*
@@ -1162,7 +1362,7 @@ after_immediate_data:
}
static int
-iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+iscsit_handle_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char *buf)
{
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
@@ -1177,7 +1377,7 @@ iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* traditional iSCSI block I/O.
*/
if (iscsit_allocate_iovecs(cmd) < 0) {
- return iscsit_add_reject_cmd(cmd,
+ return iscsit_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
immed_data = cmd->immediate_data;
@@ -1194,106 +1394,58 @@ iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
return iscsit_get_immediate_data(cmd, hdr, dump_payload);
}
-static u32 iscsit_do_crypto_hash_sg(
- struct hash_desc *hash,
- struct iscsi_cmd *cmd,
- u32 data_offset,
- u32 data_length,
- u32 padding,
- u8 *pad_bytes)
+static u32 iscsit_crc_sglist(const struct iscsit_cmd *cmd, u32 data_length,
+ u32 padding, const u8 *pad_bytes)
{
- u32 data_crc;
- u32 i;
- struct scatterlist *sg;
- unsigned int page_off;
-
- crypto_hash_init(hash);
+ struct scatterlist *sg = cmd->first_data_sg;
+ unsigned int page_off = cmd->first_data_sg_off;
+ u32 crc = ~0;
- sg = cmd->first_data_sg;
- page_off = cmd->first_data_sg_off;
-
- i = 0;
while (data_length) {
- u32 cur_len = min_t(u32, data_length, (sg[i].length - page_off));
+ u32 cur_len = min_t(u32, data_length, sg->length - page_off);
+ const void *virt;
- crypto_hash_update(hash, &sg[i], cur_len);
+ virt = kmap_local_page(sg_page(sg)) + sg->offset + page_off;
+ crc = crc32c(crc, virt, cur_len);
+ kunmap_local(virt);
+
+ /* iscsit_map_iovec has already checked for invalid sg pointers */
+ sg = sg_next(sg);
- data_length -= cur_len;
page_off = 0;
- i++;
+ data_length -= cur_len;
}
- if (padding) {
- struct scatterlist pad_sg;
-
- sg_init_one(&pad_sg, pad_bytes, padding);
- crypto_hash_update(hash, &pad_sg, padding);
- }
- crypto_hash_final(hash, (u8 *) &data_crc);
+ if (padding)
+ crc = crc32c(crc, pad_bytes, padding);
- return data_crc;
+ return ~crc;
}
-static void iscsit_do_crypto_hash_buf(
- struct hash_desc *hash,
- const void *buf,
- u32 payload_length,
- u32 padding,
- u8 *pad_bytes,
- u8 *data_crc)
+static u32 iscsit_crc_buf(const void *buf, u32 payload_length,
+ u32 padding, const void *pad_bytes)
{
- struct scatterlist sg;
+ u32 crc = ~0;
- crypto_hash_init(hash);
+ crc = crc32c(crc, buf, payload_length);
- sg_init_one(&sg, buf, payload_length);
- crypto_hash_update(hash, &sg, payload_length);
+ if (padding)
+ crc = crc32c(crc, pad_bytes, padding);
- if (padding) {
- sg_init_one(&sg, pad_bytes, padding);
- crypto_hash_update(hash, &sg, padding);
- }
- crypto_hash_final(hash, data_crc);
+ return ~crc;
}
int
-iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
- struct iscsi_cmd **out_cmd)
+__iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
+ struct iscsit_cmd *cmd, u32 payload_length,
+ bool *success)
{
- struct iscsi_data *hdr = (struct iscsi_data *)buf;
- struct iscsi_cmd *cmd = NULL;
+ struct iscsi_data *hdr = buf;
struct se_cmd *se_cmd;
- u32 payload_length = ntoh24(hdr->dlength);
int rc;
- if (!payload_length) {
- pr_err("DataOUT payload is ZERO, protocol error.\n");
- return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
- buf);
- }
-
/* iSCSI write */
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->rx_data_octets += payload_length;
- if (conn->sess->se_sess->se_node_acl) {
- spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
- conn->sess->se_sess->se_node_acl->write_bytes += payload_length;
- spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
- }
- spin_unlock_bh(&conn->sess->session_stats_lock);
-
- if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
- pr_err("DataSegmentLength: %u is greater than"
- " MaxXmitDataSegmentLength: %u\n", payload_length,
- conn->conn_ops->MaxXmitDataSegmentLength);
- return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
- buf);
- }
-
- cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt,
- payload_length);
- if (!cmd)
- return 0;
+ atomic_long_add(payload_length, &conn->sess->rx_data_octets);
pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
" DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
@@ -1310,15 +1462,15 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
if (cmd->data_direction != DMA_TO_DEVICE) {
pr_err("Command ITT: 0x%08x received DataOUT for a"
" NON-WRITE command.\n", cmd->init_task_tag);
- return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
+ return iscsit_dump_data_payload(conn, payload_length, 1);
}
se_cmd = &cmd->se_cmd;
iscsit_mod_dataout_timer(cmd);
if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
- pr_err("DataOut Offset: %u, Length %u greater than"
- " iSCSI Command EDTL %u, protocol error.\n",
- hdr->offset, payload_length, cmd->se_cmd.data_length);
+ pr_err("DataOut Offset: %u, Length %u greater than iSCSI Command EDTL %u, protocol error.\n",
+ be32_to_cpu(hdr->offset), payload_length,
+ cmd->se_cmd.data_length);
return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
}
@@ -1347,13 +1499,11 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
/*
* Check if a delayed TASK_ABORTED status needs to
* be sent now if the ISCSI_FLAG_CMD_FINAL has been
- * received with the unsolicitied data out.
+ * received with the unsolicited data out.
*/
if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
iscsit_stop_dataout_timer(cmd);
- transport_check_aborted_status(se_cmd,
- (hdr->flags & ISCSI_FLAG_CMD_FINAL));
return iscsit_dump_data_payload(conn, payload_length, 1);
}
} else {
@@ -1368,18 +1518,15 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
* TASK_ABORTED status.
*/
if (se_cmd->transport_state & CMD_T_ABORTED) {
- if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
- if (--cmd->outstanding_r2ts < 1) {
- iscsit_stop_dataout_timer(cmd);
- transport_check_aborted_status(
- se_cmd, 1);
- }
+ if (hdr->flags & ISCSI_FLAG_CMD_FINAL &&
+ --cmd->outstanding_r2ts < 1)
+ iscsit_stop_dataout_timer(cmd);
return iscsit_dump_data_payload(conn, payload_length, 1);
}
}
/*
- * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and
+ * Perform DataSN, DataSequenceInOrder, DataPDUInOrder, and
* within-command recovery checks before receiving the payload.
*/
rc = iscsit_check_pre_dataout(cmd, buf);
@@ -1387,26 +1534,62 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
return 0;
else if (rc == DATAOUT_CANNOT_RECOVER)
return -1;
-
- *out_cmd = cmd;
+ *success = true;
return 0;
}
+EXPORT_SYMBOL(__iscsit_check_dataout_hdr);
+
+int
+iscsit_check_dataout_hdr(struct iscsit_conn *conn, void *buf,
+ struct iscsit_cmd **out_cmd)
+{
+ struct iscsi_data *hdr = buf;
+ struct iscsit_cmd *cmd;
+ u32 payload_length = ntoh24(hdr->dlength);
+ int rc;
+ bool success = false;
+
+ if (!payload_length) {
+ pr_warn_ratelimited("DataOUT payload is ZERO, ignoring.\n");
+ return 0;
+ }
+
+ if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
+ pr_err_ratelimited("DataSegmentLength: %u is greater than"
+ " MaxXmitDataSegmentLength: %u\n", payload_length,
+ conn->conn_ops->MaxXmitDataSegmentLength);
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, buf);
+ }
+
+ cmd = iscsit_find_cmd_from_itt_or_dump(conn, hdr->itt, payload_length);
+ if (!cmd)
+ return 0;
+
+ rc = __iscsit_check_dataout_hdr(conn, buf, cmd, payload_length, &success);
+
+ if (success)
+ *out_cmd = cmd;
+
+ return rc;
+}
EXPORT_SYMBOL(iscsit_check_dataout_hdr);
static int
-iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+iscsit_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_data *hdr)
{
struct kvec *iov;
u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0;
- u32 payload_length = ntoh24(hdr->dlength);
+ u32 payload_length;
int iov_ret, data_crc_failed = 0;
+ payload_length = min_t(u32, cmd->se_cmd.data_length,
+ ntoh24(hdr->dlength));
rx_size += payload_length;
iov = &cmd->iov_data[0];
- iov_ret = iscsit_map_iovec(cmd, iov, be32_to_cpu(hdr->offset),
- payload_length);
+ iov_ret = iscsit_map_iovec(cmd, iov, cmd->orig_iov_data_count - 2,
+ be32_to_cpu(hdr->offset), payload_length);
if (iov_ret < 0)
return -1;
@@ -1426,6 +1609,7 @@ iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
rx_size += ISCSI_CRC_LEN;
}
+ WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count);
rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
iscsit_unmap_iovec(cmd);
@@ -1436,11 +1620,8 @@ iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (conn->conn_ops->DataDigest) {
u32 data_crc;
- data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
- be32_to_cpu(hdr->offset),
- payload_length, padding,
- cmd->pad_bytes);
-
+ data_crc = iscsit_crc_sglist(cmd, payload_length, padding,
+ cmd->pad_bytes);
if (checksum != data_crc) {
pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
" DataSN: 0x%08x, CRC32C DataDigest 0x%08x"
@@ -1459,10 +1640,10 @@ iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
int
-iscsit_check_dataout_payload(struct iscsi_cmd *cmd, struct iscsi_data *hdr,
+iscsit_check_dataout_payload(struct iscsit_cmd *cmd, struct iscsi_data *hdr,
bool data_crc_failed)
{
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
int rc, ooo_cmdsn;
/*
* Increment post receive data and CRC values or perform
@@ -1497,9 +1678,9 @@ iscsit_check_dataout_payload(struct iscsi_cmd *cmd, struct iscsi_data *hdr,
}
EXPORT_SYMBOL(iscsit_check_dataout_payload);
-static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
+static int iscsit_handle_data_out(struct iscsit_conn *conn, unsigned char *buf)
{
- struct iscsi_cmd *cmd;
+ struct iscsit_cmd *cmd = NULL;
struct iscsi_data *hdr = (struct iscsi_data *)buf;
int rc;
bool data_crc_failed = false;
@@ -1519,14 +1700,28 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed);
}
-int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+int iscsit_setup_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_nopout *hdr)
{
u32 payload_length = ntoh24(hdr->dlength);
+ if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
+ pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n");
+ if (!cmd)
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
+
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
+ }
+
if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
" not set, protocol error.\n");
+ if (!cmd)
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
+
return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
(unsigned char *)hdr);
}
@@ -1536,6 +1731,10 @@ int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
" greater than MaxXmitDataSegmentLength: %u, protocol"
" error.\n", payload_length,
conn->conn_ops->MaxXmitDataSegmentLength);
+ if (!cmd)
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
+
return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
(unsigned char *)hdr);
}
@@ -1549,7 +1748,7 @@ int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* This is not a response to a Unsolicited NopIN, which means
* it can either be a NOPOUT ping request (with a valid ITT),
* or a NOPOUT not requesting a NOPIN (with a reserved ITT).
- * Either way, make sure we allocate an struct iscsi_cmd, as both
+ * Either way, make sure we allocate an struct iscsit_cmd, as both
* can contain ping data.
*/
if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
@@ -1568,16 +1767,18 @@ int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
EXPORT_SYMBOL(iscsit_setup_nop_out);
-int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+int iscsit_process_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_nopout *hdr)
{
- struct iscsi_cmd *cmd_p = NULL;
+ struct iscsit_cmd *cmd_p = NULL;
int cmdsn_ret = 0;
/*
* Initiator is expecting a NopIN ping reply..
*/
if (hdr->itt != RESERVED_ITT) {
- BUG_ON(!cmd);
+ if (!cmd)
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
@@ -1620,11 +1821,15 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* Otherwise, initiator is not expecting a NOPIN is response.
* Just ignore for now.
*/
+
+ if (cmd)
+ iscsit_free_cmd(cmd, false);
+
return 0;
}
EXPORT_SYMBOL(iscsit_process_nop_out);
-static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+static int iscsit_handle_nop_out(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char *buf)
{
unsigned char *ping_data = NULL;
@@ -1645,8 +1850,6 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
if (!ping_data) {
- pr_err("Unable to allocate memory for"
- " NOPOUT ping data.\n");
ret = -1;
goto out;
}
@@ -1669,6 +1872,7 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
rx_size += ISCSI_CRC_LEN;
}
+ WARN_ON_ONCE(niov > ARRAY_SIZE(cmd->iov_misc));
rx_got = rx_data(conn, &cmd->iov_misc[0], niov, rx_size);
if (rx_got != rx_size) {
ret = -1;
@@ -1676,11 +1880,8 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
- ping_data, payload_length,
- padding, cmd->pad_bytes,
- (u8 *)&data_crc);
-
+ data_crc = iscsit_crc_buf(ping_data, payload_length,
+ padding, cmd->pad_bytes);
if (checksum != data_crc) {
pr_err("Ping data CRC32C DataDigest"
" 0x%08x does not match computed 0x%08x\n",
@@ -1711,7 +1912,7 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
ping_data[payload_length] = '\0';
/*
- * Attach ping data to struct iscsi_cmd->buf_ptr.
+ * Attach ping data to struct iscsit_cmd->buf_ptr.
*/
cmd->buf_ptr = ping_data;
cmd->buf_ptr_size = payload_length;
@@ -1730,16 +1931,37 @@ out:
return ret;
}
+static enum tcm_tmreq_table iscsit_convert_tmf(u8 iscsi_tmf)
+{
+ switch (iscsi_tmf) {
+ case ISCSI_TM_FUNC_ABORT_TASK:
+ return TMR_ABORT_TASK;
+ case ISCSI_TM_FUNC_ABORT_TASK_SET:
+ return TMR_ABORT_TASK_SET;
+ case ISCSI_TM_FUNC_CLEAR_ACA:
+ return TMR_CLEAR_ACA;
+ case ISCSI_TM_FUNC_CLEAR_TASK_SET:
+ return TMR_CLEAR_TASK_SET;
+ case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
+ return TMR_LUN_RESET;
+ case ISCSI_TM_FUNC_TARGET_WARM_RESET:
+ return TMR_TARGET_WARM_RESET;
+ case ISCSI_TM_FUNC_TARGET_COLD_RESET:
+ return TMR_TARGET_COLD_RESET;
+ default:
+ return TMR_UNKNOWN;
+ }
+}
+
int
-iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+iscsit_handle_task_mgt_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char *buf)
{
struct se_tmr_req *se_tmr;
struct iscsi_tmr_req *tmr_req;
struct iscsi_tm *hdr;
int out_of_order_cmdsn = 0, ret;
- bool sess_ref = false;
- u8 function;
+ u8 function, tcm_function = TMR_UNKNOWN;
hdr = (struct iscsi_tm *) buf;
hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
@@ -1770,70 +1992,41 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG);
cmd->data_direction = DMA_NONE;
-
- cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
+ cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
if (!cmd->tmr_req) {
- pr_err("Unable to allocate memory for"
- " Task Management command!\n");
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
buf);
}
+ __target_init_cmd(&cmd->se_cmd, &iscsi_ops,
+ conn->sess->se_sess, 0, DMA_NONE,
+ TCM_SIMPLE_TAG, cmd->sense_buffer + 2,
+ scsilun_to_int(&hdr->lun),
+ conn->cmd_cnt);
+
+ target_get_sess_cmd(&cmd->se_cmd, true);
+
/*
* TASK_REASSIGN for ERL=2 / connection stays inside of
* LIO-Target $FABRIC_MOD
*/
if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
-
- u8 tcm_function;
- int ret;
-
- transport_init_se_cmd(&cmd->se_cmd,
- &lio_target_fabric_configfs->tf_ops,
- conn->sess->se_sess, 0, DMA_NONE,
- MSG_SIMPLE_TAG, cmd->sense_buffer + 2);
-
- target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
- sess_ref = true;
-
- switch (function) {
- case ISCSI_TM_FUNC_ABORT_TASK:
- tcm_function = TMR_ABORT_TASK;
- break;
- case ISCSI_TM_FUNC_ABORT_TASK_SET:
- tcm_function = TMR_ABORT_TASK_SET;
- break;
- case ISCSI_TM_FUNC_CLEAR_ACA:
- tcm_function = TMR_CLEAR_ACA;
- break;
- case ISCSI_TM_FUNC_CLEAR_TASK_SET:
- tcm_function = TMR_CLEAR_TASK_SET;
- break;
- case ISCSI_TM_FUNC_LOGICAL_UNIT_RESET:
- tcm_function = TMR_LUN_RESET;
- break;
- case ISCSI_TM_FUNC_TARGET_WARM_RESET:
- tcm_function = TMR_TARGET_WARM_RESET;
- break;
- case ISCSI_TM_FUNC_TARGET_COLD_RESET:
- tcm_function = TMR_TARGET_COLD_RESET;
- break;
- default:
+ tcm_function = iscsit_convert_tmf(function);
+ if (tcm_function == TMR_UNKNOWN) {
pr_err("Unknown iSCSI TMR Function:"
" 0x%02x\n", function);
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
-
- ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req,
- tcm_function, GFP_KERNEL);
- if (ret < 0)
- return iscsit_add_reject_cmd(cmd,
+ }
+ ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function,
+ GFP_KERNEL);
+ if (ret < 0)
+ return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
- cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
- }
+ cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC;
cmd->i_state = ISTATE_SEND_TASKMGTRSP;
@@ -1848,8 +2041,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
*/
if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
- ret = transport_lookup_tmr_lun(&cmd->se_cmd,
- scsilun_to_int(&hdr->lun));
+ ret = transport_lookup_tmr_lun(&cmd->se_cmd);
if (ret < 0) {
se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
goto attach;
@@ -1909,12 +2101,14 @@ attach:
if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
- if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
+ if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
out_of_order_cmdsn = 1;
- else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+ } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
- else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
return -1;
+ }
}
iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
@@ -1934,19 +2128,15 @@ attach:
* For connection recovery, this is also the default action for
* TMR TASK_REASSIGN.
*/
- if (sess_ref) {
- pr_debug("Handle TMR, using sess_ref=true check\n");
- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
- }
-
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
}
EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
/* #warning FIXME: Support Text Command parameters besides SendTargets */
int
-iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+iscsit_setup_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_text *hdr)
{
u32 payload_length = ntoh24(hdr->dlength);
@@ -1959,6 +2149,13 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
(unsigned char *)hdr);
}
+ if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL) ||
+ (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)) {
+ pr_err("Multi sequence text commands currently not supported\n");
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_CMD_NOT_SUPPORTED,
+ (unsigned char *)hdr);
+ }
+
pr_debug("Got Text Request: ITT: 0x%08x, CmdSN: 0x%08x,"
" ExpStatSN: 0x%08x, Length: %u\n", hdr->itt, hdr->cmdsn,
hdr->exp_statsn, payload_length);
@@ -1971,41 +2168,45 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
cmd->data_direction = DMA_NONE;
+ kfree(cmd->text_in_ptr);
+ cmd->text_in_ptr = NULL;
return 0;
}
EXPORT_SYMBOL(iscsit_setup_text_cmd);
int
-iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+iscsit_process_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
struct iscsi_text *hdr)
{
unsigned char *text_in = cmd->text_in_ptr, *text_ptr;
int cmdsn_ret;
if (!text_in) {
- pr_err("Unable to locate text_in buffer for sendtargets"
- " discovery\n");
- goto reject;
+ cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt);
+ if (cmd->targ_xfer_tag == 0xFFFFFFFF) {
+ pr_err("Unable to locate text_in buffer for sendtargets"
+ " discovery\n");
+ goto reject;
+ }
+ goto empty_sendtargets;
}
- if (strncmp("SendTargets", text_in, 11) != 0) {
+ if (strncmp("SendTargets=", text_in, 12) != 0) {
pr_err("Received Text Data that is not"
" SendTargets, cannot continue.\n");
goto reject;
}
+ /* '=' confirmed in strncmp */
text_ptr = strchr(text_in, '=');
- if (!text_ptr) {
- pr_err("No \"=\" separator found in Text Data,"
- " cannot continue.\n");
- goto reject;
- }
- if (!strncmp("=All", text_ptr, 4)) {
- cmd->cmd_flags |= IFC_SENDTARGETS_ALL;
+ BUG_ON(!text_ptr);
+ if (!strncmp("=All", text_ptr, 5)) {
+ cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
} else if (!strncmp("=iqn.", text_ptr, 5) ||
!strncmp("=eui.", text_ptr, 5)) {
- cmd->cmd_flags |= IFC_SENDTARGETS_SINGLE;
+ cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
} else {
- pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr);
+ pr_err("Unable to locate valid SendTargets%s value\n",
+ text_ptr);
goto reject;
}
@@ -2013,6 +2214,7 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
+empty_sendtargets:
iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
@@ -2033,7 +2235,7 @@ reject:
EXPORT_SYMBOL(iscsit_process_text_cmd);
static int
-iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+iscsit_handle_text_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char *buf)
{
struct iscsi_text *hdr = (struct iscsi_text *)buf;
@@ -2048,46 +2250,38 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
rx_size = payload_length;
if (payload_length) {
u32 checksum = 0, data_crc = 0;
- u32 padding = 0, pad_bytes = 0;
+ u32 padding = 0;
int niov = 0, rx_got;
- struct kvec iov[3];
+ struct kvec iov[2];
- text_in = kzalloc(payload_length, GFP_KERNEL);
- if (!text_in) {
- pr_err("Unable to allocate memory for"
- " incoming text parameters\n");
+ rx_size = ALIGN(payload_length, 4);
+ text_in = kzalloc(rx_size, GFP_KERNEL);
+ if (!text_in)
goto reject;
- }
+
cmd->text_in_ptr = text_in;
- memset(iov, 0, 3 * sizeof(struct kvec));
+ memset(iov, 0, sizeof(iov));
iov[niov].iov_base = text_in;
- iov[niov++].iov_len = payload_length;
+ iov[niov++].iov_len = rx_size;
- padding = ((-payload_length) & 3);
- if (padding != 0) {
- iov[niov].iov_base = &pad_bytes;
- iov[niov++].iov_len = padding;
- rx_size += padding;
+ padding = rx_size - payload_length;
+ if (padding)
pr_debug("Receiving %u additional bytes"
" for padding.\n", padding);
- }
if (conn->conn_ops->DataDigest) {
iov[niov].iov_base = &checksum;
iov[niov++].iov_len = ISCSI_CRC_LEN;
rx_size += ISCSI_CRC_LEN;
}
+ WARN_ON_ONCE(niov > ARRAY_SIZE(iov));
rx_got = rx_data(conn, &iov[0], niov, rx_size);
if (rx_got != rx_size)
goto reject;
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
- text_in, payload_length,
- padding, (u8 *)&pad_bytes,
- (u8 *)&data_crc);
-
+ data_crc = iscsit_crc_buf(text_in, rx_size, 0, NULL);
if (checksum != data_crc) {
pr_err("Text data CRC32C DataDigest"
" 0x%08x does not match computed"
@@ -2126,12 +2320,11 @@ reject:
cmd->text_in_ptr = NULL;
return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
}
-EXPORT_SYMBOL(iscsit_handle_text_cmd);
-int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+int iscsit_logout_closesession(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
- struct iscsi_conn *conn_p;
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_conn *conn_p;
+ struct iscsit_session *sess = conn->sess;
pr_debug("Received logout request CLOSESESSION on CID: %hu"
" for SID: %u.\n", conn->cid, conn->sess->sid);
@@ -2158,10 +2351,10 @@ int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
return 0;
}
-int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+int iscsit_logout_closeconnection(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
- struct iscsi_conn *l_conn;
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_conn *l_conn;
+ struct iscsit_session *sess = conn->sess;
pr_debug("Received logout request CLOSECONNECTION for CID:"
" %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
@@ -2206,9 +2399,9 @@ int iscsit_logout_closeconnection(struct iscsi_cmd *cmd, struct iscsi_conn *conn
return 0;
}
-int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+int iscsit_logout_removeconnforrecovery(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_session *sess = conn->sess;
pr_debug("Received explicit REMOVECONNFORRECOVERY logout for"
" CID: %hu on CID: %hu.\n", cmd->logout_cid, conn->cid);
@@ -2236,7 +2429,7 @@ int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *cmd, struct iscsi_conn
}
int
-iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+iscsit_handle_logout_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char *buf)
{
int cmdsn_ret, logout_remove = 0;
@@ -2316,8 +2509,8 @@ iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
EXPORT_SYMBOL(iscsit_handle_logout_cmd);
-static int iscsit_handle_snack(
- struct iscsi_conn *conn,
+int iscsit_handle_snack(
+ struct iscsit_conn *conn,
unsigned char *buf)
{
struct iscsi_snack *hdr;
@@ -2369,8 +2562,9 @@ static int iscsit_handle_snack(
return 0;
}
+EXPORT_SYMBOL(iscsit_handle_snack);
-static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
+static void iscsit_rx_thread_wait_for_tcp(struct iscsit_conn *conn)
{
if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
(conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
@@ -2381,22 +2575,42 @@ static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
}
static int iscsit_handle_immediate_data(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
struct iscsi_scsi_req *hdr,
u32 length)
{
int iov_ret, rx_got = 0, rx_size = 0;
u32 checksum, iov_count = 0, padding = 0;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct kvec *iov;
+ void *overflow_buf = NULL;
- iov_ret = iscsit_map_iovec(cmd, cmd->iov_data, cmd->write_data_done, length);
+ BUG_ON(cmd->write_data_done > cmd->se_cmd.data_length);
+ rx_size = min(cmd->se_cmd.data_length - cmd->write_data_done, length);
+ iov_ret = iscsit_map_iovec(cmd, cmd->iov_data,
+ cmd->orig_iov_data_count - 2,
+ cmd->write_data_done, rx_size);
if (iov_ret < 0)
return IMMEDIATE_DATA_CANNOT_RECOVER;
- rx_size = length;
iov_count = iov_ret;
iov = &cmd->iov_data[0];
+ if (rx_size < length) {
+ /*
+ * Special case: length of immediate data exceeds the data
+ * buffer size derived from the CDB.
+ */
+ overflow_buf = kmalloc(length - rx_size, GFP_KERNEL);
+ if (!overflow_buf) {
+ iscsit_unmap_iovec(cmd);
+ return IMMEDIATE_DATA_CANNOT_RECOVER;
+ }
+ cmd->overflow_buf = overflow_buf;
+ iov[iov_count].iov_base = overflow_buf;
+ iov[iov_count].iov_len = length - rx_size;
+ iov_count++;
+ rx_size = length;
+ }
padding = ((-length) & 3);
if (padding != 0) {
@@ -2411,6 +2625,7 @@ static int iscsit_handle_immediate_data(
rx_size += ISCSI_CRC_LEN;
}
+ WARN_ON_ONCE(iov_count > cmd->orig_iov_data_count);
rx_got = rx_data(conn, &cmd->iov_data[0], iov_count, rx_size);
iscsit_unmap_iovec(cmd);
@@ -2423,10 +2638,8 @@ static int iscsit_handle_immediate_data(
if (conn->conn_ops->DataDigest) {
u32 data_crc;
- data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
- cmd->write_data_done, length, padding,
- cmd->pad_bytes);
-
+ data_crc = iscsit_crc_sglist(cmd, length, padding,
+ cmd->pad_bytes);
if (checksum != data_crc) {
pr_err("ImmediateData CRC32C DataDigest 0x%08x"
" does not match computed 0x%08x\n", checksum,
@@ -2465,15 +2678,15 @@ static int iscsit_handle_immediate_data(
return IMMEDIATE_DATA_NORMAL_OPERATION;
}
-/*
- * Called with sess->conn_lock held.
- */
/* #warning iscsi_build_conn_drop_async_message() only sends out on connections
with active network interface */
-static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
+static void iscsit_build_conn_drop_async_message(struct iscsit_conn *conn)
{
- struct iscsi_cmd *cmd;
- struct iscsi_conn *conn_p;
+ struct iscsit_cmd *cmd;
+ struct iscsit_conn *conn_p;
+ bool found = false;
+
+ lockdep_assert_held(&conn->sess->conn_lock);
/*
* Only send a Asynchronous Message on connections whos network
@@ -2482,14 +2695,15 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
iscsit_inc_conn_usage_count(conn_p);
+ found = true;
break;
}
}
- if (!conn_p)
+ if (!found)
return;
- cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
+ cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
if (!cmd) {
iscsit_dec_conn_usage_count(conn_p);
return;
@@ -2508,12 +2722,11 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
}
static int iscsit_send_conn_drop_async_message(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn)
+ struct iscsit_cmd *cmd,
+ struct iscsit_conn *conn)
{
struct iscsi_async *hdr;
- cmd->tx_size = ISCSI_HDR_LEN;
cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
hdr = (struct iscsi_async *) cmd->pdu;
@@ -2525,34 +2738,20 @@ static int iscsit_send_conn_drop_async_message(
cmd->stat_sn = conn->stat_sn++;
hdr->statsn = cpu_to_be32(cmd->stat_sn);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
hdr->async_event = ISCSI_ASYNC_MSG_DROPPING_CONNECTION;
hdr->param1 = cpu_to_be16(cmd->logout_cid);
hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- cmd->tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32C HeaderDigest to"
- " Async Message 0x%08x\n", *header_digest);
- }
-
- cmd->iov_misc[0].iov_base = cmd->pdu;
- cmd->iov_misc[0].iov_len = cmd->tx_size;
- cmd->iov_misc_count = 1;
-
pr_debug("Sending Connection Dropped Async Message StatSN:"
" 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
cmd->logout_cid, conn->cid);
- return 0;
+
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
}
-static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
+static void iscsit_tx_thread_wait_for_tcp(struct iscsit_conn *conn)
{
if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
(conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
@@ -2562,8 +2761,8 @@ static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
}
}
-static void
-iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+void
+iscsit_build_datain_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
struct iscsi_datain *datain, struct iscsi_data_rsp *hdr,
bool set_statsn)
{
@@ -2597,7 +2796,7 @@ iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
hdr->statsn = cpu_to_be32(0xFFFFFFFF);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
hdr->datasn = cpu_to_be32(datain->data_sn);
hdr->offset = cpu_to_be32(datain->offset);
@@ -2606,15 +2805,14 @@ iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
ntohl(hdr->offset), datain->length, conn->cid);
}
+EXPORT_SYMBOL(iscsit_build_datain_pdu);
-static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+static int iscsit_send_datain(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0];
struct iscsi_datain datain;
struct iscsi_datain_req *dr;
- struct kvec *iov;
- u32 iov_count = 0, tx_size = 0;
- int eodr = 0, ret, iov_ret;
+ int eodr = 0, ret;
bool set_statsn = false;
memset(&datain, 0, sizeof(struct iscsi_datain));
@@ -2635,14 +2833,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
return -1;
}
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->tx_data_octets += datain.length;
- if (conn->sess->se_sess->se_node_acl) {
- spin_lock(&conn->sess->se_sess->se_node_acl->stats_lock);
- conn->sess->se_sess->se_node_acl->read_bytes += datain.length;
- spin_unlock(&conn->sess->se_sess->se_node_acl->stats_lock);
- }
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_add(datain.length, &conn->sess->tx_data_octets);
/*
* Special case for successfully execution w/ both DATAIN
* and Sense Data.
@@ -2663,68 +2854,9 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn);
- iov = &cmd->iov_data[0];
- iov[iov_count].iov_base = cmd->pdu;
- iov[iov_count++].iov_len = ISCSI_HDR_LEN;
- tx_size += ISCSI_HDR_LEN;
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- iov[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
-
- pr_debug("Attaching CRC32 HeaderDigest"
- " for DataIN PDU 0x%08x\n", *header_digest);
- }
-
- iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1],
- datain.offset, datain.length);
- if (iov_ret < 0)
- return -1;
-
- iov_count += iov_ret;
- tx_size += datain.length;
-
- cmd->padding = ((-datain.length) & 3);
- if (cmd->padding) {
- iov[iov_count].iov_base = cmd->pad_bytes;
- iov[iov_count++].iov_len = cmd->padding;
- tx_size += cmd->padding;
-
- pr_debug("Attaching %u padding bytes\n",
- cmd->padding);
- }
- if (conn->conn_ops->DataDigest) {
- cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd,
- datain.offset, datain.length, cmd->padding, cmd->pad_bytes);
-
- iov[iov_count].iov_base = &cmd->data_crc;
- iov[iov_count++].iov_len = ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
-
- pr_debug("Attached CRC32C DataDigest %d bytes, crc"
- " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc);
- }
-
- cmd->iov_data_count = iov_count;
- cmd->tx_size = tx_size;
-
- /* sendpage is preferred but can't insert markers */
- if (!conn->conn_ops->IFMarker)
- ret = iscsit_fe_sendpage_sg(cmd, conn);
- else
- ret = iscsit_send_tx_data(cmd, conn, 0);
-
- iscsit_unmap_iovec(cmd);
-
- if (ret < 0) {
- iscsit_tx_thread_wait_for_tcp(conn);
+ ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, dr, &datain, 0);
+ if (ret < 0)
return ret;
- }
if (dr->dr_complete) {
eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
@@ -2736,12 +2868,12 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
}
int
-iscsit_build_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+iscsit_build_logout_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
struct iscsi_logout_rsp *hdr)
{
- struct iscsi_conn *logout_conn = NULL;
+ struct iscsit_conn *logout_conn = NULL;
struct iscsi_conn_recovery *cr = NULL;
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_session *sess = conn->sess;
/*
* The actual shutting down of Sessions and/or Connections
* for CLOSESESSION and CLOSECONNECTION Logout Requests
@@ -2819,7 +2951,7 @@ iscsit_build_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
iscsit_increment_maxcmdsn(cmd, conn->sess);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
pr_debug("Built Logout Response ITT: 0x%08x StatSN:"
" 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
@@ -2831,40 +2963,20 @@ iscsit_build_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
EXPORT_SYMBOL(iscsit_build_logout_rsp);
static int
-iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+iscsit_send_logout(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
- struct kvec *iov;
- int niov = 0, tx_size, rc;
+ int rc;
rc = iscsit_build_logout_rsp(cmd, conn,
(struct iscsi_logout_rsp *)&cmd->pdu[0]);
if (rc < 0)
return rc;
- tx_size = ISCSI_HDR_LEN;
- iov = &cmd->iov_misc[0];
- iov[niov].iov_base = cmd->pdu;
- iov[niov++].iov_len = ISCSI_HDR_LEN;
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0],
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- iov[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32C HeaderDigest to"
- " Logout Response 0x%08x\n", *header_digest);
- }
- cmd->iov_misc_count = niov;
- cmd->tx_size = tx_size;
-
- return 0;
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
}
void
-iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+iscsit_build_nopin_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
struct iscsi_nopin *hdr, bool nopout_response)
{
hdr->opcode = ISCSI_OP_NOOP_IN;
@@ -2882,11 +2994,11 @@ iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
iscsit_increment_maxcmdsn(cmd, conn->sess);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
" StatSN: 0x%08x, Length %u\n", (nopout_response) ?
- "Solicitied" : "Unsolicitied", cmd->init_task_tag,
+ "Solicited" : "Unsolicited", cmd->init_task_tag,
cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
}
EXPORT_SYMBOL(iscsit_build_nopin_rsp);
@@ -2895,39 +3007,21 @@ EXPORT_SYMBOL(iscsit_build_nopin_rsp);
* Unsolicited NOPIN, either requesting a response or not.
*/
static int iscsit_send_unsolicited_nopin(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn,
+ struct iscsit_cmd *cmd,
+ struct iscsit_conn *conn,
int want_response)
{
struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
- int tx_size = ISCSI_HDR_LEN, ret;
+ int ret;
iscsit_build_nopin_rsp(cmd, conn, hdr, false);
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32C HeaderDigest to"
- " NopIN 0x%08x\n", *header_digest);
- }
-
- cmd->iov_misc[0].iov_base = cmd->pdu;
- cmd->iov_misc[0].iov_len = tx_size;
- cmd->iov_misc_count = 1;
- cmd->tx_size = tx_size;
-
pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
" 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
- ret = iscsit_send_tx_data(cmd, conn, 1);
- if (ret < 0) {
- iscsit_tx_thread_wait_for_tcp(conn);
+ ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
+ if (ret < 0)
return ret;
- }
spin_lock_bh(&cmd->istate_lock);
cmd->i_state = want_response ?
@@ -2938,78 +3032,27 @@ static int iscsit_send_unsolicited_nopin(
}
static int
-iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+iscsit_send_nopin(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
- struct kvec *iov;
- u32 padding = 0;
- int niov = 0, tx_size;
iscsit_build_nopin_rsp(cmd, conn, hdr, true);
- tx_size = ISCSI_HDR_LEN;
- iov = &cmd->iov_misc[0];
- iov[niov].iov_base = cmd->pdu;
- iov[niov++].iov_len = ISCSI_HDR_LEN;
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- iov[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32C HeaderDigest"
- " to NopIn 0x%08x\n", *header_digest);
- }
-
/*
- * NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr.
- * NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size.
+ * NOPOUT Ping Data is attached to struct iscsit_cmd->buf_ptr.
+ * NOPOUT DataSegmentLength is at struct iscsit_cmd->buf_ptr_size.
*/
- if (cmd->buf_ptr_size) {
- iov[niov].iov_base = cmd->buf_ptr;
- iov[niov++].iov_len = cmd->buf_ptr_size;
- tx_size += cmd->buf_ptr_size;
-
- pr_debug("Echoing back %u bytes of ping"
- " data.\n", cmd->buf_ptr_size);
-
- padding = ((-cmd->buf_ptr_size) & 3);
- if (padding != 0) {
- iov[niov].iov_base = &cmd->pad_bytes;
- iov[niov++].iov_len = padding;
- tx_size += padding;
- pr_debug("Attaching %u additional"
- " padding bytes.\n", padding);
- }
- if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- cmd->buf_ptr, cmd->buf_ptr_size,
- padding, (u8 *)&cmd->pad_bytes,
- (u8 *)&cmd->data_crc);
-
- iov[niov].iov_base = &cmd->data_crc;
- iov[niov++].iov_len = ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attached DataDigest for %u"
- " bytes of ping data, CRC 0x%08x\n",
- cmd->buf_ptr_size, cmd->data_crc);
- }
- }
-
- cmd->iov_misc_count = niov;
- cmd->tx_size = tx_size;
+ pr_debug("Echoing back %u bytes of ping data.\n", cmd->buf_ptr_size);
- return 0;
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
+ cmd->buf_ptr,
+ cmd->buf_ptr_size);
}
static int iscsit_send_r2t(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn)
+ struct iscsit_cmd *cmd,
+ struct iscsit_conn *conn)
{
- int tx_size = 0;
struct iscsi_r2t *r2t;
struct iscsi_r2t_rsp *hdr;
int ret;
@@ -3025,51 +3068,30 @@ static int iscsit_send_r2t(
int_to_scsilun(cmd->se_cmd.orig_fe_lun,
(struct scsi_lun *)&hdr->lun);
hdr->itt = cmd->init_task_tag;
- spin_lock_bh(&conn->sess->ttt_lock);
- r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
- if (r2t->targ_xfer_tag == 0xFFFFFFFF)
- r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
- spin_unlock_bh(&conn->sess->ttt_lock);
+ if (conn->conn_transport->iscsit_get_r2t_ttt)
+ conn->conn_transport->iscsit_get_r2t_ttt(conn, cmd, r2t);
+ else
+ r2t->targ_xfer_tag = session_get_next_ttt(conn->sess);
hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
hdr->statsn = cpu_to_be32(conn->stat_sn);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
hdr->r2tsn = cpu_to_be32(r2t->r2t_sn);
hdr->data_offset = cpu_to_be32(r2t->offset);
hdr->data_length = cpu_to_be32(r2t->xfer_len);
- cmd->iov_misc[0].iov_base = cmd->pdu;
- cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
- tx_size += ISCSI_HDR_LEN;
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32 HeaderDigest for R2T"
- " PDU 0x%08x\n", *header_digest);
- }
-
pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
" 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
(!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
r2t->offset, r2t->xfer_len, conn->cid);
- cmd->iov_misc_count = 1;
- cmd->tx_size = tx_size;
-
spin_lock_bh(&cmd->r2t_lock);
r2t->sent_r2t = 1;
spin_unlock_bh(&cmd->r2t_lock);
- ret = iscsit_send_tx_data(cmd, conn, 1);
+ ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
if (ret < 0) {
- iscsit_tx_thread_wait_for_tcp(conn);
return ret;
}
@@ -3085,8 +3107,8 @@ static int iscsit_send_r2t(
* connection recovery.
*/
int iscsit_build_r2ts_for_cmd(
- struct iscsi_conn *conn,
- struct iscsi_cmd *cmd,
+ struct iscsit_conn *conn,
+ struct iscsit_cmd *cmd,
bool recovery)
{
int first_r2t = 1;
@@ -3126,6 +3148,12 @@ int iscsit_build_r2ts_for_cmd(
else
xfer_len = conn->sess->sess_ops->MaxBurstLength;
}
+
+ if ((s32)xfer_len < 0) {
+ cmd->cmd_flags |= ICF_SENT_LAST_R2T;
+ break;
+ }
+
cmd->r2t_offset += xfer_len;
if (cmd->r2t_offset == cmd->se_cmd.data_length)
@@ -3160,16 +3188,15 @@ int iscsit_build_r2ts_for_cmd(
return 0;
}
+EXPORT_SYMBOL(iscsit_build_r2ts_for_cmd);
-void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+void iscsit_build_rsp_pdu(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
bool inc_stat_sn, struct iscsi_scsi_rsp *hdr)
{
if (inc_stat_sn)
cmd->stat_sn = conn->stat_sn++;
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->rsp_pdus++;
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_inc(&conn->sess->rsp_pdus);
memset(hdr, 0, ISCSI_HDR_LEN);
hdr->opcode = ISCSI_OP_SCSI_CMD_RSP;
@@ -3188,7 +3215,7 @@ void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
iscsit_increment_maxcmdsn(cmd, conn->sess);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
" Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
@@ -3197,21 +3224,15 @@ void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
}
EXPORT_SYMBOL(iscsit_build_rsp_pdu);
-static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+static int iscsit_send_response(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0];
- struct kvec *iov;
- u32 padding = 0, tx_size = 0;
- int iov_count = 0;
bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS);
+ void *data_buf = NULL;
+ u32 padding = 0, data_buf_len = 0;
iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr);
- iov = &cmd->iov_misc[0];
- iov[iov_count].iov_base = cmd->pdu;
- iov[iov_count++].iov_len = ISCSI_HDR_LEN;
- tx_size += ISCSI_HDR_LEN;
-
/*
* Attach SENSE DATA payload to iSCSI Response PDU
*/
@@ -3223,56 +3244,23 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
padding = -(cmd->se_cmd.scsi_sense_length) & 3;
hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
- iov[iov_count].iov_base = cmd->sense_buffer;
- iov[iov_count++].iov_len =
- (cmd->se_cmd.scsi_sense_length + padding);
- tx_size += cmd->se_cmd.scsi_sense_length;
+ data_buf = cmd->sense_buffer;
+ data_buf_len = cmd->se_cmd.scsi_sense_length + padding;
if (padding) {
memset(cmd->sense_buffer +
cmd->se_cmd.scsi_sense_length, 0, padding);
- tx_size += padding;
pr_debug("Adding %u bytes of padding to"
" SENSE.\n", padding);
}
- if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- cmd->sense_buffer,
- (cmd->se_cmd.scsi_sense_length + padding),
- 0, NULL, (u8 *)&cmd->data_crc);
-
- iov[iov_count].iov_base = &cmd->data_crc;
- iov[iov_count++].iov_len = ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
-
- pr_debug("Attaching CRC32 DataDigest for"
- " SENSE, %u bytes CRC 0x%08x\n",
- (cmd->se_cmd.scsi_sense_length + padding),
- cmd->data_crc);
- }
-
pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
" Response PDU\n",
cmd->se_cmd.scsi_sense_length);
}
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- iov[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32 HeaderDigest for Response"
- " PDU 0x%08x\n", *header_digest);
- }
-
- cmd->iov_misc_count = iov_count;
- cmd->tx_size = tx_size;
-
- return 0;
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, data_buf,
+ data_buf_len);
}
static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
@@ -3293,7 +3281,7 @@ static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
}
void
-iscsit_build_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+iscsit_build_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
struct iscsi_tm_rsp *hdr)
{
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
@@ -3307,7 +3295,7 @@ iscsit_build_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
iscsit_increment_maxcmdsn(cmd, conn->sess);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
pr_debug("Built Task Management Response ITT: 0x%08x,"
" StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
@@ -3316,86 +3304,45 @@ iscsit_build_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
EXPORT_SYMBOL(iscsit_build_task_mgt_rsp);
static int
-iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+iscsit_send_task_mgt_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0];
- u32 tx_size = 0;
iscsit_build_task_mgt_rsp(cmd, conn, hdr);
- cmd->iov_misc[0].iov_base = cmd->pdu;
- cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
- tx_size += ISCSI_HDR_LEN;
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32 HeaderDigest for Task"
- " Mgmt Response PDU 0x%08x\n", *header_digest);
- }
-
- cmd->iov_misc_count = 1;
- cmd->tx_size = tx_size;
-
- return 0;
-}
-
-static bool iscsit_check_inaddr_any(struct iscsi_np *np)
-{
- bool ret = false;
-
- if (np->np_sockaddr.ss_family == AF_INET6) {
- const struct sockaddr_in6 sin6 = {
- .sin6_addr = IN6ADDR_ANY_INIT };
- struct sockaddr_in6 *sock_in6 =
- (struct sockaddr_in6 *)&np->np_sockaddr;
-
- if (!memcmp(sock_in6->sin6_addr.s6_addr,
- sin6.sin6_addr.s6_addr, 16))
- ret = true;
- } else {
- struct sockaddr_in * sock_in =
- (struct sockaddr_in *)&np->np_sockaddr;
-
- if (sock_in->sin_addr.s_addr == htonl(INADDR_ANY))
- ret = true;
- }
-
- return ret;
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
}
#define SENDTARGETS_BUF_LIMIT 32768U
-static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
+static int
+iscsit_build_sendtargets_response(struct iscsit_cmd *cmd,
+ enum iscsit_transport_type network_transport,
+ int skip_bytes, bool *completed)
{
char *payload = NULL;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_portal_group *tpg;
struct iscsi_tiqn *tiqn;
struct iscsi_tpg_np *tpg_np;
int buffer_len, end_of_buf = 0, len = 0, payload_len = 0;
+ int target_name_printed;
unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
+ bool active;
- buffer_len = max(conn->conn_ops->MaxRecvDataSegmentLength,
+ buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
SENDTARGETS_BUF_LIMIT);
payload = kzalloc(buffer_len, GFP_KERNEL);
- if (!payload) {
- pr_err("Unable to allocate memory for sendtargets"
- " response.\n");
+ if (!payload)
return -ENOMEM;
- }
+
/*
- * Locate pointer to iqn./eui. string for IFC_SENDTARGETS_SINGLE
+ * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE
* explicit case..
*/
- if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) {
+ if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) {
text_ptr = strchr(text_in, '=');
if (!text_ptr) {
pr_err("Unable to locate '=' string in text_in:"
@@ -3411,47 +3358,76 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
spin_lock(&tiqn_lock);
list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
- if ((cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) &&
+ if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) &&
strcmp(tiqn->tiqn, text_ptr)) {
continue;
}
- len = sprintf(buf, "TargetName=%s", tiqn->tiqn);
- len += 1;
-
- if ((len + payload_len) > buffer_len) {
- end_of_buf = 1;
- goto eob;
- }
- memcpy(payload + payload_len, buf, len);
- payload_len += len;
+ target_name_printed = 0;
spin_lock(&tiqn->tiqn_tpg_lock);
list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
- spin_lock(&tpg->tpg_state_lock);
- if ((tpg->tpg_state == TPG_STATE_FREE) ||
- (tpg->tpg_state == TPG_STATE_INACTIVE)) {
- spin_unlock(&tpg->tpg_state_lock);
+ /* If demo_mode_discovery=0 and generate_node_acls=0
+ * (demo mode dislabed) do not return
+ * TargetName+TargetAddress unless a NodeACL exists.
+ */
+
+ if ((tpg->tpg_attrib.generate_node_acls == 0) &&
+ (tpg->tpg_attrib.demo_mode_discovery == 0) &&
+ (!target_tpg_has_node_acl(&tpg->tpg_se_tpg,
+ cmd->conn->sess->sess_ops->InitiatorName))) {
continue;
}
+
+ spin_lock(&tpg->tpg_state_lock);
+ active = (tpg->tpg_state == TPG_STATE_ACTIVE);
spin_unlock(&tpg->tpg_state_lock);
+ if (!active && tpg->tpg_attrib.tpg_enabled_sendtargets)
+ continue;
+
spin_lock(&tpg->tpg_np_lock);
list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
tpg_np_list) {
struct iscsi_np *np = tpg_np->tpg_np;
- bool inaddr_any = iscsit_check_inaddr_any(np);
+ struct sockaddr_storage *sockaddr;
+
+ if (np->np_network_transport != network_transport)
+ continue;
+
+ if (!target_name_printed) {
+ len = sprintf(buf, "TargetName=%s",
+ tiqn->tiqn);
+ len += 1;
+
+ if ((len + payload_len) > buffer_len) {
+ spin_unlock(&tpg->tpg_np_lock);
+ spin_unlock(&tiqn->tiqn_tpg_lock);
+ end_of_buf = 1;
+ goto eob;
+ }
+
+ if (skip_bytes && len <= skip_bytes) {
+ skip_bytes -= len;
+ } else {
+ memcpy(payload + payload_len, buf, len);
+ payload_len += len;
+ target_name_printed = 1;
+ if (len > skip_bytes)
+ skip_bytes = 0;
+ }
+ }
+
+ if (inet_addr_is_any(&np->np_sockaddr))
+ sockaddr = &conn->local_sockaddr;
+ else
+ sockaddr = &np->np_sockaddr;
len = sprintf(buf, "TargetAddress="
- "%s%s%s:%hu,%hu",
- (np->np_sockaddr.ss_family == AF_INET6) ?
- "[" : "", (inaddr_any == false) ?
- np->np_ip : conn->local_ip,
- (np->np_sockaddr.ss_family == AF_INET6) ?
- "]" : "", (inaddr_any == false) ?
- np->np_port : conn->local_port,
- tpg->tpgt);
+ "%pISpc,%hu",
+ sockaddr,
+ tpg->tpgt);
len += 1;
if ((len + payload_len) > buffer_len) {
@@ -3460,17 +3436,26 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
end_of_buf = 1;
goto eob;
}
- memcpy(payload + payload_len, buf, len);
- payload_len += len;
+
+ if (skip_bytes && len <= skip_bytes) {
+ skip_bytes -= len;
+ } else {
+ memcpy(payload + payload_len, buf, len);
+ payload_len += len;
+ if (len > skip_bytes)
+ skip_bytes = 0;
+ }
}
spin_unlock(&tpg->tpg_np_lock);
}
spin_unlock(&tiqn->tiqn_tpg_lock);
eob:
- if (end_of_buf)
+ if (end_of_buf) {
+ *completed = false;
break;
+ }
- if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE)
+ if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
break;
}
spin_unlock(&tiqn_lock);
@@ -3481,17 +3466,28 @@ eob:
}
int
-iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
- struct iscsi_text_rsp *hdr)
+iscsit_build_text_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
+ struct iscsi_text_rsp *hdr,
+ enum iscsit_transport_type network_transport)
{
int text_length, padding;
+ bool completed = true;
- text_length = iscsit_build_sendtargets_response(cmd);
+ text_length = iscsit_build_sendtargets_response(cmd, network_transport,
+ cmd->read_data_done,
+ &completed);
if (text_length < 0)
return text_length;
+ if (completed) {
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ } else {
+ hdr->flags = ISCSI_FLAG_TEXT_CONTINUE;
+ cmd->read_data_done += text_length;
+ if (cmd->targ_xfer_tag == 0xFFFFFFFF)
+ cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
+ }
hdr->opcode = ISCSI_OP_TEXT_RSP;
- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
padding = ((-text_length) & 3);
hton24(hdr->dlength, text_length);
hdr->itt = cmd->init_task_tag;
@@ -3500,77 +3496,44 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
hdr->statsn = cpu_to_be32(cmd->stat_sn);
iscsit_increment_maxcmdsn(cmd, conn->sess);
+ /*
+ * Reset maxcmdsn_inc in multi-part text payload exchanges to
+ * correctly increment MaxCmdSN for each response answering a
+ * non immediate text request with a valid CmdSN.
+ */
+ cmd->maxcmdsn_inc = 0;
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
- pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x,"
- " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn,
- text_length, conn->cid);
+ pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
+ " Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
+ cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid,
+ !!(hdr->flags & ISCSI_FLAG_CMD_FINAL),
+ !!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE));
return text_length + padding;
}
EXPORT_SYMBOL(iscsit_build_text_rsp);
-/*
- * FIXME: Add support for F_BIT and C_BIT when the length is longer than
- * MaxRecvDataSegmentLength.
- */
static int iscsit_send_text_rsp(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn)
+ struct iscsit_cmd *cmd,
+ struct iscsit_conn *conn)
{
struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu;
- struct kvec *iov;
- u32 tx_size = 0;
- int text_length, iov_count = 0, rc;
-
- rc = iscsit_build_text_rsp(cmd, conn, hdr);
- if (rc < 0)
- return rc;
-
- text_length = rc;
- iov = &cmd->iov_misc[0];
- iov[iov_count].iov_base = cmd->pdu;
- iov[iov_count++].iov_len = ISCSI_HDR_LEN;
- iov[iov_count].iov_base = cmd->buf_ptr;
- iov[iov_count++].iov_len = text_length;
-
- tx_size += (ISCSI_HDR_LEN + text_length);
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- iov[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32 HeaderDigest for"
- " Text Response PDU 0x%08x\n", *header_digest);
- }
-
- if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
- cmd->buf_ptr, text_length,
- 0, NULL, (u8 *)&cmd->data_crc);
-
- iov[iov_count].iov_base = &cmd->data_crc;
- iov[iov_count++].iov_len = ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
-
- pr_debug("Attaching DataDigest for %u bytes of text"
- " data, CRC 0x%08x\n", text_length,
- cmd->data_crc);
- }
+ int text_length;
- cmd->iov_misc_count = iov_count;
- cmd->tx_size = tx_size;
+ text_length = iscsit_build_text_rsp(cmd, conn, hdr,
+ conn->conn_transport->transport_type);
+ if (text_length < 0)
+ return text_length;
- return 0;
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
+ cmd->buf_ptr,
+ text_length);
}
void
-iscsit_build_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+iscsit_build_reject(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
struct iscsi_reject *hdr)
{
hdr->opcode = ISCSI_OP_REJECT;
@@ -3581,79 +3544,62 @@ iscsit_build_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
cmd->stat_sn = conn->stat_sn++;
hdr->statsn = cpu_to_be32(cmd->stat_sn);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ hdr->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
}
EXPORT_SYMBOL(iscsit_build_reject);
static int iscsit_send_reject(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn)
+ struct iscsit_cmd *cmd,
+ struct iscsit_conn *conn)
{
struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
- struct kvec *iov;
- u32 iov_count = 0, tx_size;
iscsit_build_reject(cmd, conn, hdr);
- iov = &cmd->iov_misc[0];
- iov[iov_count].iov_base = cmd->pdu;
- iov[iov_count++].iov_len = ISCSI_HDR_LEN;
- iov[iov_count].iov_base = cmd->buf_ptr;
- iov[iov_count++].iov_len = ISCSI_HDR_LEN;
-
- tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN);
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- iov[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32 HeaderDigest for"
- " REJECT PDU 0x%08x\n", *header_digest);
- }
-
- if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc);
-
- iov[iov_count].iov_base = &cmd->data_crc;
- iov[iov_count++].iov_len = ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32 DataDigest for REJECT"
- " PDU 0x%08x\n", cmd->data_crc);
- }
-
- cmd->iov_misc_count = iov_count;
- cmd->tx_size = tx_size;
-
pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
" CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
- return 0;
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
+ cmd->buf_ptr,
+ ISCSI_HDR_LEN);
}
-void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
+void iscsit_thread_get_cpumask(struct iscsit_conn *conn)
{
- struct iscsi_thread_set *ts = conn->thread_set;
int ord, cpu;
+ cpumask_var_t conn_allowed_cpumask;
+
/*
- * thread_id is assigned from iscsit_global->ts_bitmap from
- * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
+ * bitmap_id is assigned from iscsit_global->ts_bitmap from
+ * within iscsit_start_kthreads()
*
- * Here we use thread_id to determine which CPU that this
- * iSCSI connection's iscsi_thread_set will be scheduled to
+ * Here we use bitmap_id to determine which CPU that this
+ * iSCSI connection's RX/TX threads will be scheduled to
* execute upon.
*/
- ord = ts->thread_id % cpumask_weight(cpu_online_mask);
- for_each_online_cpu(cpu) {
- if (ord-- == 0) {
- cpumask_set_cpu(cpu, conn->conn_cpumask);
- return;
+ if (!zalloc_cpumask_var(&conn_allowed_cpumask, GFP_KERNEL)) {
+ ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
+ for_each_online_cpu(cpu) {
+ if (ord-- == 0) {
+ cpumask_set_cpu(cpu, conn->conn_cpumask);
+ return;
+ }
}
+ } else {
+ cpumask_and(conn_allowed_cpumask, iscsit_global->allowed_cpumask,
+ cpu_online_mask);
+
+ cpumask_clear(conn->conn_cpumask);
+ ord = conn->bitmap_id % cpumask_weight(conn_allowed_cpumask);
+ for_each_cpu(cpu, conn_allowed_cpumask) {
+ if (ord-- == 0) {
+ cpumask_set_cpu(cpu, conn->conn_cpumask);
+ free_cpumask_var(conn_allowed_cpumask);
+ return;
+ }
+ }
+ free_cpumask_var(conn_allowed_cpumask);
}
/*
* This should never be reached..
@@ -3662,12 +3608,37 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
cpumask_setall(conn->conn_cpumask);
}
-static inline void iscsit_thread_check_cpumask(
- struct iscsi_conn *conn,
+static void iscsit_thread_reschedule(struct iscsit_conn *conn)
+{
+ /*
+ * If iscsit_global->allowed_cpumask modified, reschedule iSCSI
+ * connection's RX/TX threads update conn->allowed_cpumask.
+ */
+ if (!cpumask_equal(iscsit_global->allowed_cpumask,
+ conn->allowed_cpumask)) {
+ iscsit_thread_get_cpumask(conn);
+ conn->conn_tx_reset_cpumask = 1;
+ conn->conn_rx_reset_cpumask = 1;
+ cpumask_copy(conn->allowed_cpumask,
+ iscsit_global->allowed_cpumask);
+ }
+}
+
+void iscsit_thread_check_cpumask(
+ struct iscsit_conn *conn,
struct task_struct *p,
int mode)
{
- char buf[128];
+ /*
+ * The TX and RX threads maybe call iscsit_thread_check_cpumask()
+ * at the same time. The RX thread might be faster and return from
+ * iscsit_thread_reschedule() with conn_rx_reset_cpumask set to 0.
+ * Then the TX thread sets it back to 1.
+ * The next time the RX thread loops, it sees conn_rx_reset_cpumask
+ * set to 1 and calls set_cpus_allowed_ptr() again and set it to 0.
+ */
+ iscsit_thread_reschedule(conn);
+
/*
* mode == 1 signals iscsi_target_tx_thread() usage.
* mode == 0 signals iscsi_target_rx_thread() usage.
@@ -3675,24 +3646,26 @@ static inline void iscsit_thread_check_cpumask(
if (mode == 1) {
if (!conn->conn_tx_reset_cpumask)
return;
- conn->conn_tx_reset_cpumask = 0;
} else {
if (!conn->conn_rx_reset_cpumask)
return;
- conn->conn_rx_reset_cpumask = 0;
}
+
/*
* Update the CPU mask for this single kthread so that
* both TX and RX kthreads are scheduled to run on the
* same CPU.
*/
- memset(buf, 0, 128);
- cpumask_scnprintf(buf, 128, conn->conn_cpumask);
set_cpus_allowed_ptr(p, conn->conn_cpumask);
+ if (mode == 1)
+ conn->conn_tx_reset_cpumask = 0;
+ else
+ conn->conn_rx_reset_cpumask = 0;
}
+EXPORT_SYMBOL(iscsit_thread_check_cpumask);
-static int
-iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+int
+iscsit_immediate_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
{
int ret;
@@ -3704,7 +3677,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
break;
case ISTATE_REMOVE:
spin_lock_bh(&conn->cmd_lock);
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, false);
@@ -3733,13 +3706,14 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
err:
return -1;
}
+EXPORT_SYMBOL(iscsit_immediate_queue);
static int
-iscsit_handle_immediate_queue(struct iscsi_conn *conn)
+iscsit_handle_immediate_queue(struct iscsit_conn *conn)
{
struct iscsit_transport *t = conn->conn_transport;
struct iscsi_queue_req *qr;
- struct iscsi_cmd *cmd;
+ struct iscsit_cmd *cmd;
u8 state;
int ret;
@@ -3757,8 +3731,8 @@ iscsit_handle_immediate_queue(struct iscsi_conn *conn)
return 0;
}
-static int
-iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+int
+iscsit_response_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
{
int ret;
@@ -3830,18 +3804,11 @@ check_rsp_state:
if (ret < 0)
goto err;
- if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
- iscsit_tx_thread_wait_for_tcp(conn);
- iscsit_unmap_iovec(cmd);
- goto err;
- }
- iscsit_unmap_iovec(cmd);
-
switch (state) {
case ISTATE_SEND_LOGOUTRSP:
if (!iscsit_logout_post_handler(cmd, conn))
- goto restart;
- /* fall through */
+ return -ECONNRESET;
+ fallthrough;
case ISTATE_SEND_STATUS:
case ISTATE_SEND_ASYNCMSG:
case ISTATE_SEND_NOPIN:
@@ -3868,15 +3835,14 @@ check_rsp_state:
err:
return -1;
-restart:
- return -EAGAIN;
}
+EXPORT_SYMBOL(iscsit_response_queue);
-static int iscsit_handle_response_queue(struct iscsi_conn *conn)
+static int iscsit_handle_response_queue(struct iscsit_conn *conn)
{
struct iscsit_transport *t = conn->conn_transport;
struct iscsi_queue_req *qr;
- struct iscsi_cmd *cmd;
+ struct iscsit_cmd *cmd;
u8 state;
int ret;
@@ -3896,21 +3862,15 @@ static int iscsit_handle_response_queue(struct iscsi_conn *conn)
int iscsi_target_tx_thread(void *arg)
{
int ret = 0;
- struct iscsi_conn *conn;
- struct iscsi_thread_set *ts = arg;
+ struct iscsit_conn *conn = arg;
+ bool conn_freed = false;
+
/*
* Allow ourselves to be interrupted by SIGINT so that a
* connection recovery / failure event can be triggered externally.
*/
allow_signal(SIGINT);
-restart:
- conn = iscsi_tx_thread_pre_handler(ts);
- if (!conn)
- goto out;
-
- ret = 0;
-
while (!kthread_should_stop()) {
/*
* Ensure that both TX and RX per connection kthreads
@@ -3919,11 +3879,9 @@ restart:
iscsit_thread_check_cpumask(conn, current, 1);
wait_event_interruptible(conn->queues_wq,
- !iscsit_conn_all_queues_empty(conn) ||
- ts->status == ISCSI_THREAD_SET_RESET);
+ !iscsit_conn_all_queues_empty(conn));
- if ((ts->status == ISCSI_THREAD_SET_RESET) ||
- signal_pending(current))
+ if (signal_pending(current))
goto transport_err;
get_immediate:
@@ -3932,30 +3890,42 @@ get_immediate:
goto transport_err;
ret = iscsit_handle_response_queue(conn);
- if (ret == 1)
+ if (ret == 1) {
goto get_immediate;
- else if (ret == -EAGAIN)
- goto restart;
- else if (ret < 0)
+ } else if (ret == -ECONNRESET) {
+ conn_freed = true;
+ goto out;
+ } else if (ret < 0) {
goto transport_err;
+ }
}
transport_err:
- iscsit_take_action_for_connection_exit(conn);
- goto restart;
+ /*
+ * Avoid the normal connection failure code-path if this connection
+ * is still within LOGIN mode, and iscsi_np process context is
+ * responsible for cleaning up the early connection failure.
+ */
+ if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
+ iscsit_take_action_for_connection_exit(conn, &conn_freed);
out:
+ if (!conn_freed) {
+ while (!kthread_should_stop()) {
+ msleep(100);
+ }
+ }
return 0;
}
-static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
+static int iscsi_target_rx_opcode(struct iscsit_conn *conn, unsigned char *buf)
{
struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf;
- struct iscsi_cmd *cmd;
+ struct iscsit_cmd *cmd;
int ret = 0;
switch (hdr->opcode & ISCSI_OPCODE_MASK) {
case ISCSI_OP_SCSI_CMD:
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
@@ -3967,28 +3937,34 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
case ISCSI_OP_NOOP_OUT:
cmd = NULL;
if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
}
ret = iscsit_handle_nop_out(conn, cmd, buf);
break;
case ISCSI_OP_SCSI_TMFUNC:
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
break;
case ISCSI_OP_TEXT:
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
- if (!cmd)
- goto reject;
+ if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
+ cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
+ if (!cmd)
+ goto reject;
+ } else {
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+ if (!cmd)
+ goto reject;
+ }
ret = iscsit_handle_text_cmd(conn, cmd, buf);
break;
case ISCSI_OP_LOGOUT:
- cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd)
goto reject;
@@ -4007,17 +3983,9 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
" opcode while ERL=0, closing iSCSI connection.\n");
return -1;
}
- if (!conn->conn_ops->OFMarker) {
- pr_err("Unable to recover from unknown"
- " opcode while OFMarker=No, closing iSCSI"
- " connection.\n");
- return -1;
- }
- if (iscsit_recover_from_unknown_opcode(conn) < 0) {
- pr_err("Unable to recover from unknown"
- " opcode, closing iSCSI connection.\n");
- return -1;
- }
+ pr_err("Unable to recover from unknown opcode while OFMarker=No,"
+ " closing iSCSI connection.\n");
+ ret = -1;
break;
}
@@ -4026,36 +3994,28 @@ reject:
return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
-int iscsi_target_rx_thread(void *arg)
+static bool iscsi_target_check_conn_state(struct iscsit_conn *conn)
{
- int ret;
- u8 buffer[ISCSI_HDR_LEN], opcode;
- u32 checksum = 0, digest = 0;
- struct iscsi_conn *conn = NULL;
- struct iscsi_thread_set *ts = arg;
- struct kvec iov;
- /*
- * Allow ourselves to be interrupted by SIGINT so that a
- * connection recovery / failure event can be triggered externally.
- */
- allow_signal(SIGINT);
+ bool ret;
-restart:
- conn = iscsi_rx_thread_pre_handler(ts);
- if (!conn)
- goto out;
+ spin_lock_bh(&conn->state_lock);
+ ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
+ spin_unlock_bh(&conn->state_lock);
- if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
- struct completion comp;
- int rc;
+ return ret;
+}
- init_completion(&comp);
- rc = wait_for_completion_interruptible(&comp);
- if (rc < 0)
- goto transport_err;
+static void iscsit_get_rx_pdu(struct iscsit_conn *conn)
+{
+ int ret;
+ u8 *buffer, *tmp_buf, opcode;
+ u32 checksum = 0, digest = 0;
+ struct iscsi_hdr *hdr;
+ struct kvec iov;
- goto out;
- }
+ buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return;
while (!kthread_should_stop()) {
/*
@@ -4064,7 +4024,6 @@ restart:
*/
iscsit_thread_check_cpumask(conn, current, 0);
- memset(buffer, 0, ISCSI_HDR_LEN);
memset(&iov, 0, sizeof(struct kvec));
iov.iov_base = buffer;
@@ -4073,7 +4032,26 @@ restart:
ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
if (ret != ISCSI_HDR_LEN) {
iscsit_rx_thread_wait_for_tcp(conn);
- goto transport_err;
+ break;
+ }
+
+ hdr = (struct iscsi_hdr *) buffer;
+ if (hdr->hlength) {
+ iov.iov_len = hdr->hlength * 4;
+ tmp_buf = krealloc(buffer,
+ ISCSI_HDR_LEN + iov.iov_len,
+ GFP_KERNEL);
+ if (!tmp_buf)
+ break;
+
+ buffer = tmp_buf;
+ iov.iov_base = &buffer[ISCSI_HDR_LEN];
+
+ ret = rx_data(conn, &iov, 1, iov.iov_len);
+ if (ret != iov.iov_len) {
+ iscsit_rx_thread_wait_for_tcp(conn);
+ break;
+ }
}
if (conn->conn_ops->HeaderDigest) {
@@ -4083,13 +4061,11 @@ restart:
ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
if (ret != ISCSI_CRC_LEN) {
iscsit_rx_thread_wait_for_tcp(conn);
- goto transport_err;
+ break;
}
- iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
- buffer, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)&checksum);
-
+ checksum = iscsit_crc_buf(buffer, ISCSI_HDR_LEN, 0,
+ NULL);
if (digest != checksum) {
pr_err("HeaderDigest CRC32C failed,"
" received 0x%08x, computed 0x%08x\n",
@@ -4099,9 +4075,7 @@ restart:
* hit default in the switch below.
*/
memset(buffer, 0xff, ISCSI_HDR_LEN);
- spin_lock_bh(&conn->sess->session_stats_lock);
- conn->sess->conn_digest_errors++;
- spin_unlock_bh(&conn->sess->session_stats_lock);
+ atomic_long_inc(&conn->sess->conn_digest_errors);
} else {
pr_debug("Got HeaderDigest CRC32C"
" 0x%08x\n", checksum);
@@ -4109,7 +4083,7 @@ restart:
}
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
- goto transport_err;
+ break;
opcode = buffer[0] & ISCSI_OPCODE_MASK;
@@ -4120,51 +4094,124 @@ restart:
" while in Discovery Session, rejecting.\n", opcode);
iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
buffer);
- goto transport_err;
+ break;
}
ret = iscsi_target_rx_opcode(conn, buffer);
if (ret < 0)
- goto transport_err;
+ break;
}
-transport_err:
+ kfree(buffer);
+}
+
+int iscsi_target_rx_thread(void *arg)
+{
+ int rc;
+ struct iscsit_conn *conn = arg;
+ bool conn_freed = false;
+
+ /*
+ * Allow ourselves to be interrupted by SIGINT so that a
+ * connection recovery / failure event can be triggered externally.
+ */
+ allow_signal(SIGINT);
+ /*
+ * Wait for iscsi_post_login_handler() to complete before allowing
+ * incoming iscsi/tcp socket I/O, and/or failing the connection.
+ */
+ rc = wait_for_completion_interruptible(&conn->rx_login_comp);
+ if (rc < 0 || iscsi_target_check_conn_state(conn))
+ goto out;
+
+ if (!conn->conn_transport->iscsit_get_rx_pdu)
+ return 0;
+
+ conn->conn_transport->iscsit_get_rx_pdu(conn);
+
if (!signal_pending(current))
atomic_set(&conn->transport_failed, 1);
- iscsit_take_action_for_connection_exit(conn);
- goto restart;
+ iscsit_take_action_for_connection_exit(conn, &conn_freed);
+
out:
+ if (!conn_freed) {
+ while (!kthread_should_stop()) {
+ msleep(100);
+ }
+ }
+
return 0;
}
-static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
+static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
{
- struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
- struct iscsi_session *sess = conn->sess;
+ LIST_HEAD(tmp_list);
+ struct iscsit_cmd *cmd = NULL, *cmd_tmp = NULL;
+ struct iscsit_session *sess = conn->sess;
/*
* We expect this function to only ever be called from either RX or TX
* thread context via iscsit_close_connection() once the other context
* has been reset -> returned sleeping pre-handler state.
*/
spin_lock_bh(&conn->cmd_lock);
- list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
+ list_splice_init(&conn->conn_cmd_list, &tmp_list);
- list_del(&cmd->i_conn_node);
- spin_unlock_bh(&conn->cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
+ struct se_cmd *se_cmd = &cmd->se_cmd;
- iscsit_increment_maxcmdsn(cmd, sess);
+ if (!se_cmd->se_tfo)
+ continue;
- iscsit_free_cmd(cmd, true);
+ spin_lock_irq(&se_cmd->t_state_lock);
+ if (se_cmd->transport_state & CMD_T_ABORTED) {
+ if (!(se_cmd->transport_state & CMD_T_TAS))
+ /*
+ * LIO's abort path owns the cleanup for this,
+ * so put it back on the list and let
+ * aborted_task handle it.
+ */
+ list_move_tail(&cmd->i_conn_node,
+ &conn->conn_cmd_list);
+ } else {
+ se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+ }
- spin_lock_bh(&conn->cmd_lock);
+ if (cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
+ /*
+ * We never submitted the cmd to LIO core, so we have
+ * to tell LIO to perform the completion process.
+ */
+ spin_unlock_irq(&se_cmd->t_state_lock);
+ target_complete_cmd(&cmd->se_cmd, SAM_STAT_TASK_ABORTED);
+ continue;
+ }
+ spin_unlock_irq(&se_cmd->t_state_lock);
}
spin_unlock_bh(&conn->cmd_lock);
+
+ list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
+ list_del_init(&cmd->i_conn_node);
+
+ iscsit_increment_maxcmdsn(cmd, sess);
+ iscsit_free_cmd(cmd, true);
+
+ }
+
+ /*
+ * Wait on commands that were cleaned up via the aborted_task path.
+ * LLDs that implement iscsit_wait_conn will already have waited for
+ * commands.
+ */
+ if (!conn->conn_transport->iscsit_wait_conn) {
+ target_stop_cmd_counter(conn->cmd_cnt);
+ target_wait_for_cmds(conn->cmd_cnt);
+ }
}
static void iscsit_stop_timers_for_cmds(
- struct iscsi_conn *conn)
+ struct iscsit_conn *conn)
{
- struct iscsi_cmd *cmd;
+ struct iscsit_cmd *cmd;
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
@@ -4175,43 +4222,70 @@ static void iscsit_stop_timers_for_cmds(
}
int iscsit_close_connection(
- struct iscsi_conn *conn)
+ struct iscsit_conn *conn)
{
int conn_logout = (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT);
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_session *sess = conn->sess;
pr_debug("Closing iSCSI connection CID %hu on SID:"
" %u\n", conn->cid, sess->sid);
/*
- * Always up conn_logout_comp just in case the RX Thread is sleeping
- * and the logout response never got sent because the connection
- * failed.
+ * Always up conn_logout_comp for the traditional TCP and HW_OFFLOAD
+ * case just in case the RX Thread in iscsi_target_rx_opcode() is
+ * sleeping and the logout response never got sent because the
+ * connection failed.
+ *
+ * However for iser-target, isert_wait4logout() is using conn_logout_comp
+ * to signal logout response TX interrupt completion. Go ahead and skip
+ * this for iser since isert_rx_opcode() does not wait on logout failure,
+ * and to avoid iscsit_conn pointer dereference in iser-target code.
*/
- complete(&conn->conn_logout_comp);
+ if (!conn->conn_transport->rdma_shutdown)
+ complete(&conn->conn_logout_comp);
+
+ if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
+ if (conn->tx_thread &&
+ cmpxchg(&conn->tx_thread_active, true, false)) {
+ send_sig(SIGINT, conn->tx_thread, 1);
+ kthread_stop(conn->tx_thread);
+ }
+ } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
+ if (conn->rx_thread &&
+ cmpxchg(&conn->rx_thread_active, true, false)) {
+ send_sig(SIGINT, conn->rx_thread, 1);
+ kthread_stop(conn->rx_thread);
+ }
+ }
- iscsi_release_thread_set(conn);
+ spin_lock(&iscsit_global->ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+ get_order(1));
+ spin_unlock(&iscsit_global->ts_bitmap_lock);
iscsit_stop_timers_for_cmds(conn);
- iscsit_stop_nopin_response_timer(conn);
iscsit_stop_nopin_timer(conn);
- iscsit_free_queue_reqs_for_conn(conn);
+ iscsit_stop_nopin_response_timer(conn);
+
+ if (conn->conn_transport->iscsit_wait_conn)
+ conn->conn_transport->iscsit_wait_conn(conn);
/*
* During Connection recovery drop unacknowledged out of order
* commands for this connection, and prepare the other commands
- * for realligence.
+ * for reallegiance.
*
* During normal operation clear the out of order commands (but
* do not free the struct iscsi_ooo_cmdsn's) and release all
- * struct iscsi_cmds.
+ * struct iscsit_cmds.
*/
if (atomic_read(&conn->connection_recovery)) {
iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(conn);
- iscsit_prepare_cmds_for_realligance(conn);
+ iscsit_prepare_cmds_for_reallegiance(conn);
} else {
iscsit_clear_ooo_cmdsns_for_conn(conn);
iscsit_release_commands_from_conn(conn);
}
+ iscsit_free_queue_reqs_for_conn(conn);
/*
* Handle decrementing session or connection usage count if
@@ -4278,34 +4352,19 @@ int iscsit_close_connection(
*/
iscsit_check_conn_usage_count(conn);
- if (conn->conn_rx_hash.tfm)
- crypto_free_hash(conn->conn_rx_hash.tfm);
- if (conn->conn_tx_hash.tfm)
- crypto_free_hash(conn->conn_tx_hash.tfm);
-
- if (conn->conn_cpumask)
- free_cpumask_var(conn->conn_cpumask);
-
- kfree(conn->conn_ops);
- conn->conn_ops = NULL;
-
if (conn->sock)
sock_release(conn->sock);
if (conn->conn_transport->iscsit_free_conn)
conn->conn_transport->iscsit_free_conn(conn);
- iscsit_put_transport(conn->conn_transport);
-
- conn->thread_set = NULL;
-
pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
conn->conn_state = TARG_CONN_STATE_FREE;
- kfree(conn);
+ iscsit_free_conn(conn);
spin_lock_bh(&sess->conn_lock);
atomic_dec(&sess->nconn);
- pr_debug("Decremented iSCSI connection count to %hu from node:"
+ pr_debug("Decremented iSCSI connection count to %d from node:"
" %s\n", atomic_read(&sess->nconn),
sess->sess_ops->InitiatorName);
/*
@@ -4354,41 +4413,49 @@ int iscsit_close_connection(
if (!atomic_read(&sess->session_reinstatement) &&
atomic_read(&sess->session_fall_back_to_erl0)) {
spin_unlock_bh(&sess->conn_lock);
- target_put_session(sess->se_sess);
+ complete_all(&sess->session_wait_comp);
+ iscsit_close_session(sess, true);
return 0;
} else if (atomic_read(&sess->session_logout)) {
pr_debug("Moving to TARG_SESS_STATE_FREE.\n");
sess->session_state = TARG_SESS_STATE_FREE;
- spin_unlock_bh(&sess->conn_lock);
- if (atomic_read(&sess->sleep_on_sess_wait_comp))
- complete(&sess->session_wait_comp);
+ if (atomic_read(&sess->session_close)) {
+ spin_unlock_bh(&sess->conn_lock);
+ complete_all(&sess->session_wait_comp);
+ iscsit_close_session(sess, true);
+ } else {
+ spin_unlock_bh(&sess->conn_lock);
+ }
return 0;
} else {
pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
sess->session_state = TARG_SESS_STATE_FAILED;
- if (!atomic_read(&sess->session_continuation)) {
- spin_unlock_bh(&sess->conn_lock);
+ if (!atomic_read(&sess->session_continuation))
iscsit_start_time2retain_handler(sess);
- } else
- spin_unlock_bh(&sess->conn_lock);
- if (atomic_read(&sess->sleep_on_sess_wait_comp))
- complete(&sess->session_wait_comp);
+ if (atomic_read(&sess->session_close)) {
+ spin_unlock_bh(&sess->conn_lock);
+ complete_all(&sess->session_wait_comp);
+ iscsit_close_session(sess, true);
+ } else {
+ spin_unlock_bh(&sess->conn_lock);
+ }
return 0;
}
- spin_unlock_bh(&sess->conn_lock);
-
- return 0;
}
-int iscsit_close_session(struct iscsi_session *sess)
+/*
+ * If the iSCSI Session for the iSCSI Initiator Node exists,
+ * forcefully shutdown the iSCSI NEXUS.
+ */
+int iscsit_close_session(struct iscsit_session *sess, bool can_sleep)
{
- struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
if (atomic_read(&sess->nconn)) {
@@ -4404,6 +4471,9 @@ int iscsit_close_session(struct iscsi_session *sess)
iscsit_stop_time2retain_timer(sess);
spin_unlock_bh(&se_tpg->session_lock);
+ if (sess->sess_ops->ErrorRecoveryLevel == 2)
+ iscsit_free_connection_recovery_entries(sess);
+
/*
* transport_deregister_session_configfs() will clear the
* struct se_node_acl->nacl_sess pointer now as a iscsi_np process context
@@ -4419,22 +4489,14 @@ int iscsit_close_session(struct iscsi_session *sess)
* time2retain handler) and contain and active session usage count we
* restart the timer and exit.
*/
- if (!in_interrupt()) {
- if (iscsit_check_session_usage_count(sess) == 1)
- iscsit_stop_session(sess, 1, 1);
- } else {
- if (iscsit_check_session_usage_count(sess) == 2) {
- atomic_set(&sess->session_logout, 0);
- iscsit_start_time2retain_handler(sess);
- return 0;
- }
+ if (iscsit_check_session_usage_count(sess, can_sleep)) {
+ atomic_set(&sess->session_logout, 0);
+ iscsit_start_time2retain_handler(sess);
+ return 0;
}
transport_deregister_session(sess->se_sess);
- if (sess->sess_ops->ErrorRecoveryLevel == 2)
- iscsit_free_connection_recovery_entires(sess);
-
iscsit_free_all_ooo_cmdsns(sess);
spin_lock_bh(&se_tpg->session_lock);
@@ -4449,10 +4511,7 @@ int iscsit_close_session(struct iscsi_session *sess)
pr_debug("Decremented number of active iSCSI Sessions on"
" iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
- spin_lock(&sess_idr_lock);
- idr_remove(&sess_idr, sess->session_index);
- spin_unlock(&sess_idr_lock);
-
+ ida_free(&sess_ida, sess->session_index);
kfree(sess->sess_ops);
sess->sess_ops = NULL;
spin_unlock_bh(&se_tpg->session_lock);
@@ -4462,41 +4521,59 @@ int iscsit_close_session(struct iscsi_session *sess)
}
static void iscsit_logout_post_handler_closesession(
- struct iscsi_conn *conn)
+ struct iscsit_conn *conn)
{
- struct iscsi_session *sess = conn->sess;
-
- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
+ struct iscsit_session *sess = conn->sess;
+ int sleep = 1;
+ /*
+ * Traditional iscsi/tcp will invoke this logic from TX thread
+ * context during session logout, so clear tx_thread_active and
+ * sleep if iscsit_close_connection() has not already occured.
+ *
+ * Since iser-target invokes this logic from it's own workqueue,
+ * always sleep waiting for RX/TX thread shutdown to complete
+ * within iscsit_close_connection().
+ */
+ if (!conn->conn_transport->rdma_shutdown) {
+ sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ if (!sleep)
+ return;
+ }
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
iscsit_dec_conn_usage_count(conn);
- iscsit_stop_session(sess, 1, 1);
+ atomic_set(&sess->session_close, 1);
+ iscsit_stop_session(sess, sleep, sleep);
iscsit_dec_session_usage_count(sess);
- target_put_session(sess->se_sess);
}
static void iscsit_logout_post_handler_samecid(
- struct iscsi_conn *conn)
+ struct iscsit_conn *conn)
{
- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
+ int sleep = 1;
+
+ if (!conn->conn_transport->rdma_shutdown) {
+ sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ if (!sleep)
+ return;
+ }
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
- iscsit_cause_connection_reinstatement(conn, 1);
+ iscsit_cause_connection_reinstatement(conn, sleep);
iscsit_dec_conn_usage_count(conn);
}
static void iscsit_logout_post_handler_diffcid(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
u16 cid)
{
- struct iscsi_conn *l_conn;
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_conn *l_conn;
+ struct iscsit_session *sess = conn->sess;
+ bool conn_found = false;
if (!sess)
return;
@@ -4505,12 +4582,13 @@ static void iscsit_logout_post_handler_diffcid(
list_for_each_entry(l_conn, &sess->sess_conn_list, conn_list) {
if (l_conn->cid == cid) {
iscsit_inc_conn_usage_count(l_conn);
+ conn_found = true;
break;
}
}
spin_unlock_bh(&sess->conn_lock);
- if (!l_conn)
+ if (!conn_found)
return;
if (l_conn->sock)
@@ -4529,8 +4607,8 @@ static void iscsit_logout_post_handler_diffcid(
* Return of 0 causes the TX thread to restart.
*/
int iscsit_logout_post_handler(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn)
+ struct iscsit_cmd *cmd,
+ struct iscsit_conn *conn)
{
int ret = 0;
@@ -4543,7 +4621,6 @@ int iscsit_logout_post_handler(
iscsit_logout_post_handler_closesession(conn);
break;
}
- ret = 0;
break;
case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
if (conn->cid == cmd->logout_cid) {
@@ -4554,7 +4631,6 @@ int iscsit_logout_post_handler(
iscsit_logout_post_handler_samecid(conn);
break;
}
- ret = 0;
} else {
switch (cmd->logout_response) {
case ISCSI_LOGOUT_SUCCESS:
@@ -4588,76 +4664,16 @@ int iscsit_logout_post_handler(
}
EXPORT_SYMBOL(iscsit_logout_post_handler);
-void iscsit_fail_session(struct iscsi_session *sess)
-{
- struct iscsi_conn *conn;
-
- spin_lock_bh(&sess->conn_lock);
- list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
- pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
- conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
- }
- spin_unlock_bh(&sess->conn_lock);
-
- pr_debug("Moving to TARG_SESS_STATE_FAILED.\n");
- sess->session_state = TARG_SESS_STATE_FAILED;
-}
-
-int iscsit_free_session(struct iscsi_session *sess)
-{
- u16 conn_count = atomic_read(&sess->nconn);
- struct iscsi_conn *conn, *conn_tmp = NULL;
- int is_last;
-
- spin_lock_bh(&sess->conn_lock);
- atomic_set(&sess->sleep_on_sess_wait_comp, 1);
-
- list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
- conn_list) {
- if (conn_count == 0)
- break;
-
- if (list_is_last(&conn->conn_list, &sess->sess_conn_list)) {
- is_last = 1;
- } else {
- iscsit_inc_conn_usage_count(conn_tmp);
- is_last = 0;
- }
- iscsit_inc_conn_usage_count(conn);
-
- spin_unlock_bh(&sess->conn_lock);
- iscsit_cause_connection_reinstatement(conn, 1);
- spin_lock_bh(&sess->conn_lock);
-
- iscsit_dec_conn_usage_count(conn);
- if (is_last == 0)
- iscsit_dec_conn_usage_count(conn_tmp);
-
- conn_count--;
- }
-
- if (atomic_read(&sess->nconn)) {
- spin_unlock_bh(&sess->conn_lock);
- wait_for_completion(&sess->session_wait_comp);
- } else
- spin_unlock_bh(&sess->conn_lock);
-
- target_put_session(sess->se_sess);
- return 0;
-}
-
void iscsit_stop_session(
- struct iscsi_session *sess,
+ struct iscsit_session *sess,
int session_sleep,
int connection_sleep)
{
u16 conn_count = atomic_read(&sess->nconn);
- struct iscsi_conn *conn, *conn_tmp = NULL;
+ struct iscsit_conn *conn, *conn_tmp = NULL;
int is_last;
spin_lock_bh(&sess->conn_lock);
- if (session_sleep)
- atomic_set(&sess->sleep_on_sess_wait_comp, 1);
if (connection_sleep) {
list_for_each_entry_safe(conn, conn_tmp, &sess->sess_conn_list,
@@ -4696,9 +4712,10 @@ void iscsit_stop_session(
int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
{
- struct iscsi_session *sess;
+ struct iscsit_session *sess;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
struct se_session *se_sess, *se_sess_tmp;
+ LIST_HEAD(free_list);
int session_count = 0;
spin_lock_bh(&se_tpg->session_lock);
@@ -4709,25 +4726,34 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
sess_list) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = (struct iscsit_session *)se_sess->fabric_sess_ptr;
spin_lock(&sess->conn_lock);
if (atomic_read(&sess->session_fall_back_to_erl0) ||
atomic_read(&sess->session_logout) ||
+ atomic_read(&sess->session_close) ||
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
continue;
}
+ iscsit_inc_session_usage_count(sess);
atomic_set(&sess->session_reinstatement, 1);
+ atomic_set(&sess->session_fall_back_to_erl0, 1);
+ atomic_set(&sess->session_close, 1);
spin_unlock(&sess->conn_lock);
- spin_unlock_bh(&se_tpg->session_lock);
- iscsit_free_session(sess);
- spin_lock_bh(&se_tpg->session_lock);
+ list_move_tail(&se_sess->sess_list, &free_list);
+ }
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
+ sess = (struct iscsit_session *)se_sess->fabric_sess_ptr;
+ list_del_init(&se_sess->sess_list);
+ iscsit_stop_session(sess, 1, 1);
+ iscsit_dec_session_usage_count(sess);
session_count++;
}
- spin_unlock_bh(&se_tpg->session_lock);
pr_debug("Released %d iSCSI Session(s) from Target Portal"
" Group: %hu\n", session_count, tpg->tpgt);
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 2c437cb8ca00..f4addae2aae4 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -1,49 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_H
#define ISCSI_TARGET_H
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+struct iscsit_cmd;
+struct iscsit_conn;
+struct iscsi_np;
+struct iscsi_portal_group;
+struct iscsit_session;
+struct iscsi_tpg_np;
+struct kref;
+struct sockaddr_storage;
+
extern struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *);
-extern struct iscsi_tiqn *iscsit_get_tiqn(unsigned char *, int);
extern void iscsit_put_tiqn_for_login(struct iscsi_tiqn *);
extern struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *);
extern void iscsit_del_tiqn(struct iscsi_tiqn *);
extern int iscsit_access_np(struct iscsi_np *, struct iscsi_portal_group *);
-extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *);
-extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *,
+extern void iscsit_login_kref_put(struct kref *);
+extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *,
+ struct iscsi_tpg_np *);
+extern bool iscsit_check_np_match(struct sockaddr_storage *,
struct iscsi_np *, int);
-extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
- char *, int);
+extern struct iscsi_np *iscsit_add_np(struct sockaddr_storage *,
+ int);
extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
- struct iscsi_portal_group *);
+ struct iscsi_portal_group *, bool);
extern int iscsit_del_np(struct iscsi_np *);
-extern int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8, unsigned char *);
-extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
-extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
-extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
-extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *);
-extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8);
-extern int iscsit_build_r2ts_for_cmd(struct iscsi_conn *, struct iscsi_cmd *, bool recovery);
-extern void iscsit_thread_get_cpumask(struct iscsi_conn *);
+extern int iscsit_reject_cmd(struct iscsit_cmd *cmd, u8, unsigned char *);
+extern void iscsit_set_unsolicited_dataout(struct iscsit_cmd *);
+extern int iscsit_logout_closesession(struct iscsit_cmd *, struct iscsit_conn *);
+extern int iscsit_logout_closeconnection(struct iscsit_cmd *, struct iscsit_conn *);
+extern int iscsit_logout_removeconnforrecovery(struct iscsit_cmd *, struct iscsit_conn *);
+extern int iscsit_build_r2ts_for_cmd(struct iscsit_conn *, struct iscsit_cmd *, bool recovery);
+extern void iscsit_thread_get_cpumask(struct iscsit_conn *);
extern int iscsi_target_tx_thread(void *);
extern int iscsi_target_rx_thread(void *);
-extern int iscsit_close_connection(struct iscsi_conn *);
-extern int iscsit_close_session(struct iscsi_session *);
-extern void iscsit_fail_session(struct iscsi_session *);
-extern int iscsit_free_session(struct iscsi_session *);
-extern void iscsit_stop_session(struct iscsi_session *, int, int);
+extern int iscsit_close_connection(struct iscsit_conn *);
+extern int iscsit_close_session(struct iscsit_session *, bool can_sleep);
+extern void iscsit_stop_session(struct iscsit_session *, int, int);
extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
extern struct iscsit_global *iscsit_global;
-extern struct target_fabric_configfs *lio_target_fabric_configfs;
+extern const struct target_core_fabric_ops iscsi_ops;
extern struct kmem_cache *lio_dr_cache;
extern struct kmem_cache *lio_ooo_cache;
-extern struct kmem_cache *lio_cmd_cache;
extern struct kmem_cache *lio_qr_cache;
extern struct kmem_cache *lio_r2t_cache;
-extern struct idr sess_idr;
+extern struct ida sess_ida;
extern struct mutex auth_id_lock;
-extern spinlock_t sess_idr_lock;
-
#endif /*** ISCSI_TARGET_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index cee17543278c..c8a248bd11be 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -1,68 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file houses the main functions for the iSCSI CHAP support
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
******************************************************************************/
+#include <crypto/hash.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/crypto.h>
#include <linux/err.h>
+#include <linux/random.h>
#include <linux/scatterlist.h>
-
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
-static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
-{
- int j = DIV_ROUND_UP(len, 2), rc;
-
- rc = hex2bin(dst, src, j);
- if (rc < 0)
- pr_debug("CHAP string contains non hex digit symbols\n");
-
- dst[j] = '\0';
- return j;
-}
-
-static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
+static char *chap_get_digest_name(const int digest_type)
{
- int i;
-
- for (i = 0; i < src_len; i++) {
- sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
+ switch (digest_type) {
+ case CHAP_DIGEST_MD5:
+ return "md5";
+ case CHAP_DIGEST_SHA1:
+ return "sha1";
+ case CHAP_DIGEST_SHA256:
+ return "sha256";
+ case CHAP_DIGEST_SHA3_256:
+ return "sha3-256";
+ default:
+ return NULL;
}
}
-static void chap_gen_challenge(
- struct iscsi_conn *conn,
+static int chap_gen_challenge(
+ struct iscsit_conn *conn,
int caller,
char *c_str,
unsigned int *c_len)
{
- unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
+ int ret;
+ unsigned char *challenge_asciihex;
struct iscsi_chap *chap = conn->auth_protocol;
- memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
+ challenge_asciihex = kzalloc(chap->challenge_len * 2 + 1, GFP_KERNEL);
+ if (!challenge_asciihex)
+ return -ENOMEM;
- get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH);
- chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
- CHAP_CHALLENGE_LENGTH);
+ memset(chap->challenge, 0, MAX_CHAP_CHALLENGE_LEN);
+
+ ret = get_random_bytes_wait(chap->challenge, chap->challenge_len);
+ if (unlikely(ret))
+ goto out;
+
+ bin2hex(challenge_asciihex, chap->challenge,
+ chap->challenge_len);
/*
* Set CHAP_C, and copy the generated challenge into c_str.
*/
@@ -71,16 +64,84 @@ static void chap_gen_challenge(
pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client",
challenge_asciihex);
+
+out:
+ kfree(challenge_asciihex);
+ return ret;
}
+static int chap_test_algorithm(const char *name)
+{
+ struct crypto_shash *tfm;
+
+ tfm = crypto_alloc_shash(name, 0, 0);
+ if (IS_ERR(tfm))
+ return -1;
+
+ crypto_free_shash(tfm);
+ return 0;
+}
+
+static int chap_check_algorithm(const char *a_str)
+{
+ char *tmp, *orig, *token, *digest_name;
+ long digest_type;
+ int r = CHAP_DIGEST_UNKNOWN;
+
+ tmp = kstrdup(a_str, GFP_KERNEL);
+ if (!tmp) {
+ pr_err("Memory allocation failed for CHAP_A temporary buffer\n");
+ return CHAP_DIGEST_UNKNOWN;
+ }
+ orig = tmp;
+
+ token = strsep(&tmp, "=");
+ if (!token)
+ goto out;
+
+ if (strcmp(token, "CHAP_A")) {
+ pr_err("Unable to locate CHAP_A key\n");
+ goto out;
+ }
+ while (token) {
+ token = strsep(&tmp, ",");
+ if (!token)
+ goto out;
+
+ if (kstrtol(token, 10, &digest_type))
+ continue;
+
+ digest_name = chap_get_digest_name(digest_type);
+ if (!digest_name)
+ continue;
+
+ pr_debug("Selected %s Algorithm\n", digest_name);
+ if (chap_test_algorithm(digest_name) < 0) {
+ pr_err("failed to allocate %s algo\n", digest_name);
+ } else {
+ r = digest_type;
+ goto out;
+ }
+ }
+out:
+ kfree(orig);
+ return r;
+}
+
+static void chap_close(struct iscsit_conn *conn)
+{
+ kfree(conn->auth_protocol);
+ conn->auth_protocol = NULL;
+}
static struct iscsi_chap *chap_server_open(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
struct iscsi_node_auth *auth,
const char *a_str,
char *aic_str,
unsigned int *aic_len)
{
+ int digest_type;
struct iscsi_chap *chap;
if (!(auth->naf_flags & NAF_USERID_SET) ||
@@ -95,82 +156,147 @@ static struct iscsi_chap *chap_server_open(
return NULL;
chap = conn->auth_protocol;
- /*
- * We only support MD5 MDA presently.
- */
- if (strncmp(a_str, "CHAP_A=5", 8)) {
- pr_err("CHAP_A is not MD5.\n");
+ digest_type = chap_check_algorithm(a_str);
+ switch (digest_type) {
+ case CHAP_DIGEST_MD5:
+ chap->digest_size = MD5_SIGNATURE_SIZE;
+ break;
+ case CHAP_DIGEST_SHA1:
+ chap->digest_size = SHA1_SIGNATURE_SIZE;
+ break;
+ case CHAP_DIGEST_SHA256:
+ chap->digest_size = SHA256_SIGNATURE_SIZE;
+ break;
+ case CHAP_DIGEST_SHA3_256:
+ chap->digest_size = SHA3_256_SIGNATURE_SIZE;
+ break;
+ case CHAP_DIGEST_UNKNOWN:
+ default:
+ pr_err("Unsupported CHAP_A value\n");
+ chap_close(conn);
return NULL;
}
- pr_debug("[server] Got CHAP_A=5\n");
- /*
- * Send back CHAP_A set to MD5.
- */
- *aic_len = sprintf(aic_str, "CHAP_A=5");
+
+ chap->digest_name = chap_get_digest_name(digest_type);
+
+ /* Tie the challenge length to the digest size */
+ chap->challenge_len = chap->digest_size;
+
+ pr_debug("[server] Got CHAP_A=%d\n", digest_type);
+ *aic_len = sprintf(aic_str, "CHAP_A=%d", digest_type);
*aic_len += 1;
- chap->digest_type = CHAP_DIGEST_MD5;
- pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
+ pr_debug("[server] Sending CHAP_A=%d\n", digest_type);
+
/*
* Set Identifier.
*/
- chap->id = ISCSI_TPG_C(conn)->tpg_chap_id++;
+ chap->id = conn->tpg->tpg_chap_id++;
*aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id);
*aic_len += 1;
pr_debug("[server] Sending CHAP_I=%d\n", chap->id);
/*
* Generate Challenge.
*/
- chap_gen_challenge(conn, 1, aic_str, aic_len);
+ if (chap_gen_challenge(conn, 1, aic_str, aic_len) < 0) {
+ chap_close(conn);
+ return NULL;
+ }
return chap;
}
-static void chap_close(struct iscsi_conn *conn)
+static const char base64_lookup_table[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+static int chap_base64_decode(u8 *dst, const char *src, size_t len)
{
- kfree(conn->auth_protocol);
- conn->auth_protocol = NULL;
+ int i, bits = 0, ac = 0;
+ const char *p;
+ u8 *cp = dst;
+
+ for (i = 0; i < len; i++) {
+ if (src[i] == '=')
+ return cp - dst;
+
+ p = strchr(base64_lookup_table, src[i]);
+ if (p == NULL || src[i] == 0)
+ return -2;
+
+ ac <<= 6;
+ ac += (p - base64_lookup_table);
+ bits += 6;
+ if (bits >= 8) {
+ *cp++ = (ac >> (bits - 8)) & 0xff;
+ ac &= ~(BIT(16) - BIT(bits - 8));
+ bits -= 8;
+ }
+ }
+ if (ac)
+ return -1;
+
+ return cp - dst;
}
-static int chap_server_compute_md5(
- struct iscsi_conn *conn,
+static int chap_server_compute_hash(
+ struct iscsit_conn *conn,
struct iscsi_node_auth *auth,
char *nr_in_ptr,
char *nr_out_ptr,
unsigned int *nr_out_len)
{
- char *endptr;
unsigned long id;
unsigned char id_as_uchar;
- unsigned char digest[MD5_SIGNATURE_SIZE];
- unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
- unsigned char identifier[10], *challenge = NULL;
- unsigned char *challenge_binhex = NULL;
- unsigned char client_digest[MD5_SIGNATURE_SIZE];
- unsigned char server_digest[MD5_SIGNATURE_SIZE];
+ unsigned char type;
+ unsigned char identifier[10], *initiatorchg = NULL;
+ unsigned char *initiatorchg_binhex = NULL;
+ unsigned char *digest = NULL;
+ unsigned char *response = NULL;
+ unsigned char *client_digest = NULL;
+ unsigned char *server_digest = NULL;
unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
+ size_t compare_len;
struct iscsi_chap *chap = conn->auth_protocol;
- struct crypto_hash *tfm;
- struct hash_desc desc;
- struct scatterlist sg;
- int auth_ret = -1, ret, challenge_len;
+ struct crypto_shash *tfm = NULL;
+ struct shash_desc *desc = NULL;
+ int auth_ret = -1, ret, initiatorchg_len;
+
+ digest = kzalloc(chap->digest_size, GFP_KERNEL);
+ if (!digest) {
+ pr_err("Unable to allocate the digest buffer\n");
+ goto out;
+ }
+
+ response = kzalloc(chap->digest_size * 2 + 2, GFP_KERNEL);
+ if (!response) {
+ pr_err("Unable to allocate the response buffer\n");
+ goto out;
+ }
+
+ client_digest = kzalloc(chap->digest_size, GFP_KERNEL);
+ if (!client_digest) {
+ pr_err("Unable to allocate the client_digest buffer\n");
+ goto out;
+ }
+
+ server_digest = kzalloc(chap->digest_size, GFP_KERNEL);
+ if (!server_digest) {
+ pr_err("Unable to allocate the server_digest buffer\n");
+ goto out;
+ }
memset(identifier, 0, 10);
memset(chap_n, 0, MAX_CHAP_N_SIZE);
memset(chap_r, 0, MAX_RESPONSE_LENGTH);
- memset(digest, 0, MD5_SIGNATURE_SIZE);
- memset(response, 0, MD5_SIGNATURE_SIZE * 2 + 2);
- memset(client_digest, 0, MD5_SIGNATURE_SIZE);
- memset(server_digest, 0, MD5_SIGNATURE_SIZE);
- challenge = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
- if (!challenge) {
+ initiatorchg = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+ if (!initiatorchg) {
pr_err("Unable to allocate challenge buffer\n");
goto out;
}
- challenge_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
- if (!challenge_binhex) {
- pr_err("Unable to allocate challenge_binhex buffer\n");
+ initiatorchg_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
+ if (!initiatorchg_binhex) {
+ pr_err("Unable to allocate initiatorchg_binhex buffer\n");
goto out;
}
/*
@@ -186,7 +312,9 @@ static int chap_server_compute_md5(
goto out;
}
- if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) {
+ /* Include the terminating NULL in the compare */
+ compare_len = strlen(auth->userid) + 1;
+ if (strncmp(chap_n, auth->userid, compare_len) != 0) {
pr_err("CHAP_N values do not match!\n");
goto out;
}
@@ -199,91 +327,115 @@ static int chap_server_compute_md5(
pr_err("Could not find CHAP_R.\n");
goto out;
}
- if (type != HEX) {
- pr_err("Could not find CHAP_R.\n");
+
+ switch (type) {
+ case HEX:
+ if (strlen(chap_r) != chap->digest_size * 2) {
+ pr_err("Malformed CHAP_R\n");
+ goto out;
+ }
+ if (hex2bin(client_digest, chap_r, chap->digest_size) < 0) {
+ pr_err("Malformed CHAP_R: invalid HEX\n");
+ goto out;
+ }
+ break;
+ case BASE64:
+ if (chap_base64_decode(client_digest, chap_r, strlen(chap_r)) !=
+ chap->digest_size) {
+ pr_err("Malformed CHAP_R: invalid BASE64\n");
+ goto out;
+ }
+ break;
+ default:
+ pr_err("Could not find CHAP_R\n");
goto out;
}
pr_debug("[server] Got CHAP_R=%s\n", chap_r);
- chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
- tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_shash(chap->digest_name, 0, 0);
if (IS_ERR(tfm)) {
- pr_err("Unable to allocate struct crypto_hash\n");
+ tfm = NULL;
+ pr_err("Unable to allocate struct crypto_shash\n");
goto out;
}
- desc.tfm = tfm;
- desc.flags = 0;
- ret = crypto_hash_init(&desc);
- if (ret < 0) {
- pr_err("crypto_hash_init() failed\n");
- crypto_free_hash(tfm);
+ desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
+ if (!desc) {
+ pr_err("Unable to allocate struct shash_desc\n");
goto out;
}
- sg_init_one(&sg, &chap->id, 1);
- ret = crypto_hash_update(&desc, &sg, 1);
+ desc->tfm = tfm;
+
+ ret = crypto_shash_init(desc);
if (ret < 0) {
- pr_err("crypto_hash_update() failed for id\n");
- crypto_free_hash(tfm);
+ pr_err("crypto_shash_init() failed\n");
goto out;
}
- sg_init_one(&sg, &auth->password, strlen(auth->password));
- ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
+ ret = crypto_shash_update(desc, &chap->id, 1);
if (ret < 0) {
- pr_err("crypto_hash_update() failed for password\n");
- crypto_free_hash(tfm);
+ pr_err("crypto_shash_update() failed for id\n");
goto out;
}
- sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH);
- ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
+ ret = crypto_shash_update(desc, (char *)&auth->password,
+ strlen(auth->password));
if (ret < 0) {
- pr_err("crypto_hash_update() failed for challenge\n");
- crypto_free_hash(tfm);
+ pr_err("crypto_shash_update() failed for password\n");
goto out;
}
- ret = crypto_hash_final(&desc, server_digest);
+ ret = crypto_shash_finup(desc, chap->challenge,
+ chap->challenge_len, server_digest);
if (ret < 0) {
- pr_err("crypto_hash_final() failed for server digest\n");
- crypto_free_hash(tfm);
+ pr_err("crypto_shash_finup() failed for challenge\n");
goto out;
}
- crypto_free_hash(tfm);
- chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
- pr_debug("[server] MD5 Server Digest: %s\n", response);
+ bin2hex(response, server_digest, chap->digest_size);
+ pr_debug("[server] %s Server Digest: %s\n",
+ chap->digest_name, response);
- if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
- pr_debug("[server] MD5 Digests do not match!\n\n");
+ if (memcmp(server_digest, client_digest, chap->digest_size) != 0) {
+ pr_debug("[server] %s Digests do not match!\n\n",
+ chap->digest_name);
goto out;
} else
- pr_debug("[server] MD5 Digests match, CHAP connetication"
- " successful.\n\n");
+ pr_debug("[server] %s Digests match, CHAP connection"
+ " successful.\n\n", chap->digest_name);
/*
* One way authentication has succeeded, return now if mutual
* authentication is not enabled.
*/
if (!auth->authenticate_target) {
- kfree(challenge);
- kfree(challenge_binhex);
- return 0;
+ auth_ret = 0;
+ goto out;
}
/*
* Get CHAP_I.
*/
- if (extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type) < 0) {
+ ret = extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type);
+ if (ret == -ENOENT) {
+ pr_debug("Could not find CHAP_I. Initiator uses One way authentication.\n");
+ auth_ret = 0;
+ goto out;
+ }
+ if (ret < 0) {
pr_err("Could not find CHAP_I.\n");
goto out;
}
if (type == HEX)
- id = simple_strtoul(&identifier[2], &endptr, 0);
+ ret = kstrtoul(&identifier[2], 0, &id);
else
- id = simple_strtoul(identifier, &endptr, 0);
+ ret = kstrtoul(identifier, 0, &id);
+
+ if (ret < 0) {
+ pr_err("kstrtoul() failed for CHAP identifier: %d\n", ret);
+ goto out;
+ }
if (id > 255) {
pr_err("chap identifier: %lu greater than 255\n", id);
goto out;
@@ -296,77 +448,98 @@ static int chap_server_compute_md5(
* Get CHAP_C.
*/
if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN,
- challenge, &type) < 0) {
+ initiatorchg, &type) < 0) {
pr_err("Could not find CHAP_C.\n");
goto out;
}
- if (type != HEX) {
+ switch (type) {
+ case HEX:
+ initiatorchg_len = DIV_ROUND_UP(strlen(initiatorchg), 2);
+ if (!initiatorchg_len) {
+ pr_err("Unable to convert incoming challenge\n");
+ goto out;
+ }
+ if (initiatorchg_len > 1024) {
+ pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
+ goto out;
+ }
+
+ if (hex2bin(initiatorchg_binhex, initiatorchg,
+ initiatorchg_len) < 0) {
+ pr_err("Malformed CHAP_C: invalid HEX\n");
+ goto out;
+ }
+ break;
+ case BASE64:
+ initiatorchg_len = chap_base64_decode(initiatorchg_binhex,
+ initiatorchg,
+ strlen(initiatorchg));
+ if (initiatorchg_len < 0) {
+ pr_err("Malformed CHAP_C: invalid BASE64\n");
+ goto out;
+ }
+ if (!initiatorchg_len) {
+ pr_err("Unable to convert incoming challenge\n");
+ goto out;
+ }
+ if (initiatorchg_len > 1024) {
+ pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
+ goto out;
+ }
+ break;
+ default:
pr_err("Could not find CHAP_C.\n");
goto out;
}
- pr_debug("[server] Got CHAP_C=%s\n", challenge);
- challenge_len = chap_string_to_hex(challenge_binhex, challenge,
- strlen(challenge));
- if (!challenge_len) {
- pr_err("Unable to convert incoming challenge\n");
+
+ pr_debug("[server] Got CHAP_C=%s\n", initiatorchg);
+ /*
+ * During mutual authentication, the CHAP_C generated by the
+ * initiator must not match the original CHAP_C generated by
+ * the target.
+ */
+ if (initiatorchg_len == chap->challenge_len &&
+ !memcmp(initiatorchg_binhex, chap->challenge,
+ initiatorchg_len)) {
+ pr_err("initiator CHAP_C matches target CHAP_C, failing"
+ " login attempt\n");
goto out;
}
/*
* Generate CHAP_N and CHAP_R for mutual authentication.
*/
- tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm)) {
- pr_err("Unable to allocate struct crypto_hash\n");
- goto out;
- }
- desc.tfm = tfm;
- desc.flags = 0;
-
- ret = crypto_hash_init(&desc);
+ ret = crypto_shash_init(desc);
if (ret < 0) {
- pr_err("crypto_hash_init() failed\n");
- crypto_free_hash(tfm);
+ pr_err("crypto_shash_init() failed\n");
goto out;
}
/* To handle both endiannesses */
id_as_uchar = id;
- sg_init_one(&sg, &id_as_uchar, 1);
- ret = crypto_hash_update(&desc, &sg, 1);
+ ret = crypto_shash_update(desc, &id_as_uchar, 1);
if (ret < 0) {
- pr_err("crypto_hash_update() failed for id\n");
- crypto_free_hash(tfm);
+ pr_err("crypto_shash_update() failed for id\n");
goto out;
}
- sg_init_one(&sg, auth->password_mutual,
- strlen(auth->password_mutual));
- ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
+ ret = crypto_shash_update(desc, auth->password_mutual,
+ strlen(auth->password_mutual));
if (ret < 0) {
- pr_err("crypto_hash_update() failed for"
+ pr_err("crypto_shash_update() failed for"
" password_mutual\n");
- crypto_free_hash(tfm);
goto out;
}
/*
* Convert received challenge to binary hex.
*/
- sg_init_one(&sg, challenge_binhex, challenge_len);
- ret = crypto_hash_update(&desc, &sg, challenge_len);
+ ret = crypto_shash_finup(desc, initiatorchg_binhex, initiatorchg_len,
+ digest);
if (ret < 0) {
- pr_err("crypto_hash_update() failed for ma challenge\n");
- crypto_free_hash(tfm);
+ pr_err("crypto_shash_finup() failed for ma challenge\n");
goto out;
}
- ret = crypto_hash_final(&desc, digest);
- if (ret < 0) {
- pr_err("crypto_hash_final() failed for ma digest\n");
- crypto_free_hash(tfm);
- goto out;
- }
- crypto_free_hash(tfm);
/*
* Generate CHAP_N and CHAP_R.
*/
@@ -376,42 +549,27 @@ static int chap_server_compute_md5(
/*
* Convert response from binary hex to ascii hext.
*/
- chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
+ bin2hex(response, digest, chap->digest_size);
*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
response);
*nr_out_len += 1;
pr_debug("[server] Sending CHAP_R=0x%s\n", response);
auth_ret = 0;
out:
- kfree(challenge);
- kfree(challenge_binhex);
+ kfree_sensitive(desc);
+ if (tfm)
+ crypto_free_shash(tfm);
+ kfree(initiatorchg);
+ kfree(initiatorchg_binhex);
+ kfree(digest);
+ kfree(response);
+ kfree(server_digest);
+ kfree(client_digest);
return auth_ret;
}
-static int chap_got_response(
- struct iscsi_conn *conn,
- struct iscsi_node_auth *auth,
- char *nr_in_ptr,
- char *nr_out_ptr,
- unsigned int *nr_out_len)
-{
- struct iscsi_chap *chap = conn->auth_protocol;
-
- switch (chap->digest_type) {
- case CHAP_DIGEST_MD5:
- if (chap_server_compute_md5(conn, auth, nr_in_ptr,
- nr_out_ptr, nr_out_len) < 0)
- return -1;
- return 0;
- default:
- pr_err("Unknown CHAP digest type %d!\n",
- chap->digest_type);
- return -1;
- }
-}
-
u32 chap_main_loop(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
struct iscsi_node_auth *auth,
char *in_text,
char *out_text,
@@ -428,7 +586,7 @@ u32 chap_main_loop(
return 0;
} else if (chap->chap_state == CHAP_STAGE_SERVER_AIC) {
convert_null_to_semi(in_text, *in_len);
- if (chap_got_response(conn, auth, in_text, out_text,
+ if (chap_server_compute_hash(conn, auth, in_text, out_text,
out_len) < 0) {
chap_close(conn);
return 2;
diff --git a/drivers/target/iscsi/iscsi_target_auth.h b/drivers/target/iscsi/iscsi_target_auth.h
index 2f463c09626d..ceb9b7754770 100644
--- a/drivers/target/iscsi/iscsi_target_auth.h
+++ b/drivers/target/iscsi/iscsi_target_auth.h
@@ -1,15 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ISCSI_CHAP_H_
#define _ISCSI_CHAP_H_
+#include <linux/types.h>
+
+#define CHAP_DIGEST_UNKNOWN 0
#define CHAP_DIGEST_MD5 5
-#define CHAP_DIGEST_SHA 6
+#define CHAP_DIGEST_SHA1 6
+#define CHAP_DIGEST_SHA256 7
+#define CHAP_DIGEST_SHA3_256 8
-#define CHAP_CHALLENGE_LENGTH 16
+#define MAX_CHAP_CHALLENGE_LEN 32
#define CHAP_CHALLENGE_STR_LEN 4096
-#define MAX_RESPONSE_LENGTH 64 /* sufficient for MD5 */
+#define MAX_RESPONSE_LENGTH 128 /* sufficient for SHA3 256 */
#define MAX_CHAP_N_SIZE 512
#define MD5_SIGNATURE_SIZE 16 /* 16 bytes in a MD5 message digest */
+#define SHA1_SIGNATURE_SIZE 20 /* 20 bytes in a SHA1 message digest */
+#define SHA256_SIGNATURE_SIZE 32 /* 32 bytes in a SHA256 message digest */
+#define SHA3_256_SIGNATURE_SIZE 32 /* 32 bytes in a SHA3 256 message digest */
#define CHAP_STAGE_CLIENT_A 1
#define CHAP_STAGE_SERVER_AIC 2
@@ -17,13 +26,18 @@
#define CHAP_STAGE_CLIENT_NRIC 4
#define CHAP_STAGE_SERVER_NR 5
-extern u32 chap_main_loop(struct iscsi_conn *, struct iscsi_node_auth *, char *, char *,
+struct iscsi_node_auth;
+struct iscsit_conn;
+
+extern u32 chap_main_loop(struct iscsit_conn *, struct iscsi_node_auth *, char *, char *,
int *, int *);
struct iscsi_chap {
- unsigned char digest_type;
unsigned char id;
- unsigned char challenge[CHAP_CHALLENGE_LENGTH];
+ unsigned char challenge[MAX_CHAP_CHALLENGE_LEN];
+ unsigned int challenge_len;
+ unsigned char *digest_name;
+ unsigned int digest_size;
unsigned int authenticate_target;
unsigned int chap_state;
} ____cacheline_aligned;
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index bbfd28893164..efe8cdb20060 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1,36 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains the configfs implementation for iSCSI Target mode
* from the LIO-Target Project.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
****************************************************************************/
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/export.h>
#include <linux/inet.h>
+#include <linux/module.h>
+#include <net/ipv6.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
-#include <target/configfs_macros.h>
#include <target/iscsi/iscsi_transport.h>
-
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h"
#include "iscsi_target_device.h"
#include "iscsi_target_erl0.h"
@@ -38,128 +26,46 @@
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
-#include "iscsi_target_stat.h"
-#include "iscsi_target_configfs.h"
-
-struct target_fabric_configfs *lio_target_fabric_configfs;
+#include <target/iscsi/iscsi_target_stat.h>
-struct lio_target_configfs_attribute {
- struct configfs_attribute attr;
- ssize_t (*show)(void *, char *);
- ssize_t (*store)(void *, const char *, size_t);
-};
/* Start items for lio_target_portal_cit */
-static ssize_t lio_target_np_show_sctp(
- struct se_tpg_np *se_tpg_np,
- char *page)
+static inline struct iscsi_tpg_np *to_iscsi_tpg_np(struct config_item *item)
{
- struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
- struct iscsi_tpg_np, se_tpg_np);
- struct iscsi_tpg_np *tpg_np_sctp;
- ssize_t rb;
-
- tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
- if (tpg_np_sctp)
- rb = sprintf(page, "1\n");
- else
- rb = sprintf(page, "0\n");
-
- return rb;
+ return container_of(to_tpg_np(item), struct iscsi_tpg_np, se_tpg_np);
}
-static ssize_t lio_target_np_store_sctp(
- struct se_tpg_np *se_tpg_np,
- const char *page,
- size_t count)
+static ssize_t lio_target_np_driver_show(struct config_item *item, char *page,
+ enum iscsit_transport_type type)
{
- struct iscsi_np *np;
- struct iscsi_portal_group *tpg;
- struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
- struct iscsi_tpg_np, se_tpg_np);
- struct iscsi_tpg_np *tpg_np_sctp = NULL;
- u32 op;
- int ret;
-
- ret = kstrtou32(page, 0, &op);
- if (ret)
- return ret;
- if ((op != 1) && (op != 0)) {
- pr_err("Illegal value for tpg_enable: %u\n", op);
- return -EINVAL;
- }
- np = tpg_np->tpg_np;
- if (!np) {
- pr_err("Unable to locate struct iscsi_np from"
- " struct iscsi_tpg_np\n");
- return -EINVAL;
- }
-
- tpg = tpg_np->tpg;
- if (iscsit_get_tpg(tpg) < 0)
- return -EINVAL;
-
- if (op) {
- /*
- * Use existing np->np_sockaddr for SCTP network portal reference
- */
- tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
- np->np_ip, tpg_np, ISCSI_SCTP_TCP);
- if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
- goto out;
- } else {
- tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
- if (!tpg_np_sctp)
- goto out;
-
- ret = iscsit_tpg_del_network_portal(tpg, tpg_np_sctp);
- if (ret < 0)
- goto out;
- }
-
- iscsit_put_tpg(tpg);
- return count;
-out:
- iscsit_put_tpg(tpg);
- return -EINVAL;
-}
-
-TF_NP_BASE_ATTR(lio_target, sctp, S_IRUGO | S_IWUSR);
-
-static ssize_t lio_target_np_show_iser(
- struct se_tpg_np *se_tpg_np,
- char *page)
-{
- struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
- struct iscsi_tpg_np, se_tpg_np);
- struct iscsi_tpg_np *tpg_np_iser;
+ struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
+ struct iscsi_tpg_np *tpg_np_new;
ssize_t rb;
- tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
- if (tpg_np_iser)
- rb = sprintf(page, "1\n");
+ tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
+ if (tpg_np_new)
+ rb = sysfs_emit(page, "1\n");
else
- rb = sprintf(page, "0\n");
+ rb = sysfs_emit(page, "0\n");
return rb;
}
-static ssize_t lio_target_np_store_iser(
- struct se_tpg_np *se_tpg_np,
- const char *page,
- size_t count)
+static ssize_t lio_target_np_driver_store(struct config_item *item,
+ const char *page, size_t count, enum iscsit_transport_type type,
+ const char *mod_name)
{
+ struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
struct iscsi_np *np;
struct iscsi_portal_group *tpg;
- struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
- struct iscsi_tpg_np, se_tpg_np);
- struct iscsi_tpg_np *tpg_np_iser = NULL;
- char *endptr;
+ struct iscsi_tpg_np *tpg_np_new = NULL;
u32 op;
- int rc = 0;
+ int rc;
- op = simple_strtoul(page, &endptr, 0);
+ rc = kstrtou32(page, 0, &op);
+ if (rc)
+ return rc;
if ((op != 1) && (op != 0)) {
pr_err("Illegal value for tpg_enable: %u\n", op);
return -EINVAL;
@@ -176,22 +82,25 @@ static ssize_t lio_target_np_store_iser(
return -EINVAL;
if (op) {
- rc = request_module("ib_isert");
- if (rc != 0) {
- pr_warn("Unable to request_module for ib_isert\n");
- rc = 0;
+ if (strlen(mod_name)) {
+ rc = request_module(mod_name);
+ if (rc != 0) {
+ pr_warn("Unable to request_module for %s\n",
+ mod_name);
+ rc = 0;
+ }
}
- tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
- np->np_ip, tpg_np, ISCSI_INFINIBAND);
- if (IS_ERR(tpg_np_iser)) {
- rc = PTR_ERR(tpg_np_iser);
+ tpg_np_new = iscsit_tpg_add_network_portal(tpg,
+ &np->np_sockaddr, tpg_np, type);
+ if (IS_ERR(tpg_np_new)) {
+ rc = PTR_ERR(tpg_np_new);
goto out;
}
} else {
- tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
- if (tpg_np_iser) {
- rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
+ tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
+ if (tpg_np_new) {
+ rc = iscsit_tpg_del_network_portal(tpg, tpg_np_new);
if (rc < 0)
goto out;
}
@@ -204,11 +113,35 @@ out:
return rc;
}
-TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR);
+static ssize_t lio_target_np_iser_show(struct config_item *item, char *page)
+{
+ return lio_target_np_driver_show(item, page, ISCSI_INFINIBAND);
+}
+
+static ssize_t lio_target_np_iser_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ return lio_target_np_driver_store(item, page, count,
+ ISCSI_INFINIBAND, "ib_isert");
+}
+CONFIGFS_ATTR(lio_target_np_, iser);
+
+static ssize_t lio_target_np_cxgbit_show(struct config_item *item, char *page)
+{
+ return lio_target_np_driver_show(item, page, ISCSI_CXGBIT);
+}
+
+static ssize_t lio_target_np_cxgbit_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ return lio_target_np_driver_store(item, page, count,
+ ISCSI_CXGBIT, "cxgbit");
+}
+CONFIGFS_ATTR(lio_target_np_, cxgbit);
static struct configfs_attribute *lio_target_portal_attrs[] = {
- &lio_target_np_sctp.attr,
- &lio_target_np_iser.attr,
+ &lio_target_np_attr_iser,
+ &lio_target_np_attr_cxgbit,
NULL,
};
@@ -226,36 +159,30 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
struct iscsi_portal_group *tpg;
struct iscsi_tpg_np *tpg_np;
char *str, *str2, *ip_str, *port_str;
- struct __kernel_sockaddr_storage sockaddr;
- struct sockaddr_in *sock_in;
- struct sockaddr_in6 *sock_in6;
- unsigned long port;
+ struct sockaddr_storage sockaddr = { };
int ret;
- char buf[MAX_PORTAL_LEN + 1];
+ char buf[MAX_PORTAL_LEN + 1] = { };
if (strlen(name) > MAX_PORTAL_LEN) {
pr_err("strlen(name): %d exceeds MAX_PORTAL_LEN: %d\n",
(int)strlen(name), MAX_PORTAL_LEN);
return ERR_PTR(-EOVERFLOW);
}
- memset(buf, 0, MAX_PORTAL_LEN + 1);
snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name);
- memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage));
-
str = strstr(buf, "[");
if (str) {
- const char *end;
-
str2 = strstr(str, "]");
if (!str2) {
pr_err("Unable to locate trailing \"]\""
" in IPv6 iSCSI network portal address\n");
return ERR_PTR(-EINVAL);
}
- str++; /* Skip over leading "[" */
- *str2 = '\0'; /* Terminate the IPv6 address */
- str2++; /* Skip over the "]" */
+
+ ip_str = str + 1; /* Skip over leading "[" */
+ *str2 = '\0'; /* Terminate the unbracketed IPv6 address */
+ str2++; /* Skip over the \0 */
+
port_str = strstr(str2, ":");
if (!port_str) {
pr_err("Unable to locate \":port\""
@@ -264,23 +191,8 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
}
*port_str = '\0'; /* Terminate string for IP */
port_str++; /* Skip over ":" */
-
- ret = strict_strtoul(port_str, 0, &port);
- if (ret < 0) {
- pr_err("strict_strtoul() failed for port_str: %d\n", ret);
- return ERR_PTR(ret);
- }
- sock_in6 = (struct sockaddr_in6 *)&sockaddr;
- sock_in6->sin6_family = AF_INET6;
- sock_in6->sin6_port = htons((unsigned short)port);
- ret = in6_pton(str, IPV6_ADDRESS_SPACE,
- (void *)&sock_in6->sin6_addr.in6_u, -1, &end);
- if (ret <= 0) {
- pr_err("in6_pton returned: %d\n", ret);
- return ERR_PTR(-EINVAL);
- }
} else {
- str = ip_str = &buf[0];
+ ip_str = &buf[0];
port_str = strstr(ip_str, ":");
if (!port_str) {
pr_err("Unable to locate \":port\""
@@ -289,18 +201,16 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
}
*port_str = '\0'; /* Terminate string for IP */
port_str++; /* Skip over ":" */
+ }
- ret = strict_strtoul(port_str, 0, &port);
- if (ret < 0) {
- pr_err("strict_strtoul() failed for port_str: %d\n", ret);
- return ERR_PTR(ret);
- }
- sock_in = (struct sockaddr_in *)&sockaddr;
- sock_in->sin_family = AF_INET;
- sock_in->sin_port = htons((unsigned short)port);
- sock_in->sin_addr.s_addr = in_aton(ip_str);
+ ret = inet_pton_with_scope(&init_net, AF_UNSPEC, ip_str,
+ port_str, &sockaddr);
+ if (ret) {
+ pr_err("malformed ip/port passed: %s\n", name);
+ return ERR_PTR(ret);
}
- tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+
+ tpg = to_iscsi_tpg(se_tpg);
ret = iscsit_get_tpg(tpg);
if (ret < 0)
return ERR_PTR(-EINVAL);
@@ -322,7 +232,7 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
* sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
*
*/
- tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, str, NULL,
+ tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, NULL,
ISCSI_TCP);
if (IS_ERR(tpg_np)) {
iscsit_put_tpg(tpg);
@@ -350,8 +260,8 @@ static void lio_target_call_delnpfromtpg(
se_tpg = &tpg->tpg_se_tpg;
pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu"
- " PORTAL: %s:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
- tpg->tpgt, tpg_np->tpg_np->np_ip, tpg_np->tpg_np->np_port);
+ " PORTAL: %pISpc\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
+ tpg->tpgt, &tpg_np->tpg_np->np_sockaddr);
ret = iscsit_tpg_del_network_portal(tpg, tpg_np);
if (ret < 0)
@@ -366,24 +276,20 @@ out:
/* Start items for lio_target_nacl_attrib_cit */
-#define DEF_NACL_ATTRIB(name) \
-static ssize_t iscsi_nacl_attrib_show_##name( \
- struct se_node_acl *se_nacl, \
- char *page) \
+#define ISCSI_NACL_ATTR(name) \
+static ssize_t iscsi_nacl_attrib_##name##_show(struct config_item *item,\
+ char *page) \
{ \
- struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
- se_node_acl); \
- \
- return sprintf(page, "%u\n", ISCSI_NODE_ATTRIB(nacl)->name); \
+ struct se_node_acl *se_nacl = attrib_to_nacl(item); \
+ struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl); \
+ return sysfs_emit(page, "%u\n", nacl->node_attrib.name); \
} \
\
-static ssize_t iscsi_nacl_attrib_store_##name( \
- struct se_node_acl *se_nacl, \
- const char *page, \
- size_t count) \
+static ssize_t iscsi_nacl_attrib_##name##_store(struct config_item *item,\
+ const char *page, size_t count) \
{ \
- struct iscsi_node_acl *nacl = container_of(se_nacl, struct iscsi_node_acl, \
- se_node_acl); \
+ struct se_node_acl *se_nacl = attrib_to_nacl(item); \
+ struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl); \
u32 val; \
int ret; \
\
@@ -395,59 +301,59 @@ static ssize_t iscsi_nacl_attrib_store_##name( \
return ret; \
\
return count; \
+} \
+ \
+CONFIGFS_ATTR(iscsi_nacl_attrib_, name)
+
+ISCSI_NACL_ATTR(dataout_timeout);
+ISCSI_NACL_ATTR(dataout_timeout_retries);
+ISCSI_NACL_ATTR(default_erl);
+ISCSI_NACL_ATTR(nopin_timeout);
+ISCSI_NACL_ATTR(nopin_response_timeout);
+ISCSI_NACL_ATTR(random_datain_pdu_offsets);
+ISCSI_NACL_ATTR(random_datain_seq_offsets);
+ISCSI_NACL_ATTR(random_r2t_offsets);
+
+static ssize_t iscsi_nacl_attrib_authentication_show(struct config_item *item,
+ char *page)
+{
+ struct se_node_acl *se_nacl = attrib_to_nacl(item);
+ struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl);
+
+ return sysfs_emit(page, "%d\n", nacl->node_attrib.authentication);
}
-#define NACL_ATTR(_name, _mode) TF_NACL_ATTRIB_ATTR(iscsi, _name, _mode);
-/*
- * Define iscsi_node_attrib_s_dataout_timeout
- */
-DEF_NACL_ATTRIB(dataout_timeout);
-NACL_ATTR(dataout_timeout, S_IRUGO | S_IWUSR);
-/*
- * Define iscsi_node_attrib_s_dataout_timeout_retries
- */
-DEF_NACL_ATTRIB(dataout_timeout_retries);
-NACL_ATTR(dataout_timeout_retries, S_IRUGO | S_IWUSR);
-/*
- * Define iscsi_node_attrib_s_default_erl
- */
-DEF_NACL_ATTRIB(default_erl);
-NACL_ATTR(default_erl, S_IRUGO | S_IWUSR);
-/*
- * Define iscsi_node_attrib_s_nopin_timeout
- */
-DEF_NACL_ATTRIB(nopin_timeout);
-NACL_ATTR(nopin_timeout, S_IRUGO | S_IWUSR);
-/*
- * Define iscsi_node_attrib_s_nopin_response_timeout
- */
-DEF_NACL_ATTRIB(nopin_response_timeout);
-NACL_ATTR(nopin_response_timeout, S_IRUGO | S_IWUSR);
-/*
- * Define iscsi_node_attrib_s_random_datain_pdu_offsets
- */
-DEF_NACL_ATTRIB(random_datain_pdu_offsets);
-NACL_ATTR(random_datain_pdu_offsets, S_IRUGO | S_IWUSR);
-/*
- * Define iscsi_node_attrib_s_random_datain_seq_offsets
- */
-DEF_NACL_ATTRIB(random_datain_seq_offsets);
-NACL_ATTR(random_datain_seq_offsets, S_IRUGO | S_IWUSR);
-/*
- * Define iscsi_node_attrib_s_random_r2t_offsets
- */
-DEF_NACL_ATTRIB(random_r2t_offsets);
-NACL_ATTR(random_r2t_offsets, S_IRUGO | S_IWUSR);
+static ssize_t iscsi_nacl_attrib_authentication_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_node_acl *se_nacl = attrib_to_nacl(item);
+ struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl);
+ s32 val;
+ int ret;
+
+ ret = kstrtos32(page, 0, &val);
+ if (ret)
+ return ret;
+ if (val != 0 && val != 1 && val != NA_AUTHENTICATION_INHERITED)
+ return -EINVAL;
+
+ nacl->node_attrib.authentication = val;
+
+ return count;
+}
+
+CONFIGFS_ATTR(iscsi_nacl_attrib_, authentication);
static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = {
- &iscsi_nacl_attrib_dataout_timeout.attr,
- &iscsi_nacl_attrib_dataout_timeout_retries.attr,
- &iscsi_nacl_attrib_default_erl.attr,
- &iscsi_nacl_attrib_nopin_timeout.attr,
- &iscsi_nacl_attrib_nopin_response_timeout.attr,
- &iscsi_nacl_attrib_random_datain_pdu_offsets.attr,
- &iscsi_nacl_attrib_random_datain_seq_offsets.attr,
- &iscsi_nacl_attrib_random_r2t_offsets.attr,
+ &iscsi_nacl_attrib_attr_dataout_timeout,
+ &iscsi_nacl_attrib_attr_dataout_timeout_retries,
+ &iscsi_nacl_attrib_attr_default_erl,
+ &iscsi_nacl_attrib_attr_nopin_timeout,
+ &iscsi_nacl_attrib_attr_nopin_response_timeout,
+ &iscsi_nacl_attrib_attr_random_datain_pdu_offsets,
+ &iscsi_nacl_attrib_attr_random_datain_seq_offsets,
+ &iscsi_nacl_attrib_attr_random_r2t_offsets,
+ &iscsi_nacl_attrib_attr_authentication,
NULL,
};
@@ -456,7 +362,7 @@ static struct configfs_attribute *lio_target_nacl_attrib_attrs[] = {
/* Start items for lio_target_nacl_auth_cit */
#define __DEF_NACL_AUTH_STR(prefix, name, flags) \
-static ssize_t __iscsi_##prefix##_show_##name( \
+static ssize_t __iscsi_##prefix##_##name##_show( \
struct iscsi_node_acl *nacl, \
char *page) \
{ \
@@ -467,7 +373,7 @@ static ssize_t __iscsi_##prefix##_show_##name( \
return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \
} \
\
-static ssize_t __iscsi_##prefix##_store_##name( \
+static ssize_t __iscsi_##prefix##_##name##_store( \
struct iscsi_node_acl *nacl, \
const char *page, \
size_t count) \
@@ -476,7 +382,8 @@ static ssize_t __iscsi_##prefix##_store_##name( \
\
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
- \
+ if (count >= sizeof(auth->name)) \
+ return -EINVAL; \
snprintf(auth->name, sizeof(auth->name), "%s", page); \
if (!strncmp("NULL", auth->name, 4)) \
auth->naf_flags &= ~flags; \
@@ -492,8 +399,34 @@ static ssize_t __iscsi_##prefix##_store_##name( \
return count; \
}
+#define DEF_NACL_AUTH_STR(name, flags) \
+ __DEF_NACL_AUTH_STR(nacl_auth, name, flags) \
+static ssize_t iscsi_nacl_auth_##name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct se_node_acl *nacl = auth_to_nacl(item); \
+ return __iscsi_nacl_auth_##name##_show(to_iscsi_nacl(nacl), page); \
+} \
+static ssize_t iscsi_nacl_auth_##name##_store(struct config_item *item, \
+ const char *page, size_t count) \
+{ \
+ struct se_node_acl *nacl = auth_to_nacl(item); \
+ return __iscsi_nacl_auth_##name##_store(to_iscsi_nacl(nacl), \
+ page, count); \
+} \
+ \
+CONFIGFS_ATTR(iscsi_nacl_auth_, name)
+
+/*
+ * One-way authentication userid
+ */
+DEF_NACL_AUTH_STR(userid, NAF_USERID_SET);
+DEF_NACL_AUTH_STR(password, NAF_PASSWORD_SET);
+DEF_NACL_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
+DEF_NACL_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
+
#define __DEF_NACL_AUTH_INT(prefix, name) \
-static ssize_t __iscsi_##prefix##_show_##name( \
+static ssize_t __iscsi_##prefix##_##name##_show( \
struct iscsi_node_acl *nacl, \
char *page) \
{ \
@@ -505,69 +438,25 @@ static ssize_t __iscsi_##prefix##_show_##name( \
return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \
}
-#define DEF_NACL_AUTH_STR(name, flags) \
- __DEF_NACL_AUTH_STR(nacl_auth, name, flags) \
-static ssize_t iscsi_nacl_auth_show_##name( \
- struct se_node_acl *nacl, \
- char *page) \
-{ \
- return __iscsi_nacl_auth_show_##name(container_of(nacl, \
- struct iscsi_node_acl, se_node_acl), page); \
-} \
-static ssize_t iscsi_nacl_auth_store_##name( \
- struct se_node_acl *nacl, \
- const char *page, \
- size_t count) \
-{ \
- return __iscsi_nacl_auth_store_##name(container_of(nacl, \
- struct iscsi_node_acl, se_node_acl), page, count); \
-}
-
#define DEF_NACL_AUTH_INT(name) \
__DEF_NACL_AUTH_INT(nacl_auth, name) \
-static ssize_t iscsi_nacl_auth_show_##name( \
- struct se_node_acl *nacl, \
- char *page) \
+static ssize_t iscsi_nacl_auth_##name##_show(struct config_item *item, \
+ char *page) \
{ \
- return __iscsi_nacl_auth_show_##name(container_of(nacl, \
- struct iscsi_node_acl, se_node_acl), page); \
-}
-
-#define AUTH_ATTR(_name, _mode) TF_NACL_AUTH_ATTR(iscsi, _name, _mode);
-#define AUTH_ATTR_RO(_name) TF_NACL_AUTH_ATTR_RO(iscsi, _name);
+ struct se_node_acl *nacl = auth_to_nacl(item); \
+ return __iscsi_nacl_auth_##name##_show(to_iscsi_nacl(nacl), page); \
+} \
+ \
+CONFIGFS_ATTR_RO(iscsi_nacl_auth_, name)
-/*
- * One-way authentication userid
- */
-DEF_NACL_AUTH_STR(userid, NAF_USERID_SET);
-AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
-/*
- * One-way authentication password
- */
-DEF_NACL_AUTH_STR(password, NAF_PASSWORD_SET);
-AUTH_ATTR(password, S_IRUGO | S_IWUSR);
-/*
- * Enforce mutual authentication
- */
DEF_NACL_AUTH_INT(authenticate_target);
-AUTH_ATTR_RO(authenticate_target);
-/*
- * Mutual authentication userid
- */
-DEF_NACL_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
-AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
-/*
- * Mutual authentication password
- */
-DEF_NACL_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
-AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_nacl_auth_attrs[] = {
- &iscsi_nacl_auth_userid.attr,
- &iscsi_nacl_auth_password.attr,
- &iscsi_nacl_auth_authenticate_target.attr,
- &iscsi_nacl_auth_userid_mutual.attr,
- &iscsi_nacl_auth_password_mutual.attr,
+ &iscsi_nacl_auth_attr_userid,
+ &iscsi_nacl_auth_attr_password,
+ &iscsi_nacl_auth_attr_authenticate_target,
+ &iscsi_nacl_auth_attr_userid_mutual,
+ &iscsi_nacl_auth_attr_password_mutual,
NULL,
};
@@ -575,12 +464,12 @@ static struct configfs_attribute *lio_target_nacl_auth_attrs[] = {
/* Start items for lio_target_nacl_param_cit */
-#define DEF_NACL_PARAM(name) \
-static ssize_t iscsi_nacl_param_show_##name( \
- struct se_node_acl *se_nacl, \
- char *page) \
+#define ISCSI_NACL_PARAM(name) \
+static ssize_t iscsi_nacl_param_##name##_show(struct config_item *item, \
+ char *page) \
{ \
- struct iscsi_session *sess; \
+ struct se_node_acl *se_nacl = param_to_nacl(item); \
+ struct iscsit_session *sess; \
struct se_session *se_sess; \
ssize_t rb; \
\
@@ -597,55 +486,34 @@ static ssize_t iscsi_nacl_param_show_##name( \
spin_unlock_bh(&se_nacl->nacl_sess_lock); \
\
return rb; \
-}
-
-#define NACL_PARAM_ATTR(_name) TF_NACL_PARAM_ATTR_RO(iscsi, _name);
-
-DEF_NACL_PARAM(MaxConnections);
-NACL_PARAM_ATTR(MaxConnections);
-
-DEF_NACL_PARAM(InitialR2T);
-NACL_PARAM_ATTR(InitialR2T);
-
-DEF_NACL_PARAM(ImmediateData);
-NACL_PARAM_ATTR(ImmediateData);
-
-DEF_NACL_PARAM(MaxBurstLength);
-NACL_PARAM_ATTR(MaxBurstLength);
-
-DEF_NACL_PARAM(FirstBurstLength);
-NACL_PARAM_ATTR(FirstBurstLength);
-
-DEF_NACL_PARAM(DefaultTime2Wait);
-NACL_PARAM_ATTR(DefaultTime2Wait);
-
-DEF_NACL_PARAM(DefaultTime2Retain);
-NACL_PARAM_ATTR(DefaultTime2Retain);
-
-DEF_NACL_PARAM(MaxOutstandingR2T);
-NACL_PARAM_ATTR(MaxOutstandingR2T);
-
-DEF_NACL_PARAM(DataPDUInOrder);
-NACL_PARAM_ATTR(DataPDUInOrder);
-
-DEF_NACL_PARAM(DataSequenceInOrder);
-NACL_PARAM_ATTR(DataSequenceInOrder);
-
-DEF_NACL_PARAM(ErrorRecoveryLevel);
-NACL_PARAM_ATTR(ErrorRecoveryLevel);
+} \
+ \
+CONFIGFS_ATTR_RO(iscsi_nacl_param_, name)
+
+ISCSI_NACL_PARAM(MaxConnections);
+ISCSI_NACL_PARAM(InitialR2T);
+ISCSI_NACL_PARAM(ImmediateData);
+ISCSI_NACL_PARAM(MaxBurstLength);
+ISCSI_NACL_PARAM(FirstBurstLength);
+ISCSI_NACL_PARAM(DefaultTime2Wait);
+ISCSI_NACL_PARAM(DefaultTime2Retain);
+ISCSI_NACL_PARAM(MaxOutstandingR2T);
+ISCSI_NACL_PARAM(DataPDUInOrder);
+ISCSI_NACL_PARAM(DataSequenceInOrder);
+ISCSI_NACL_PARAM(ErrorRecoveryLevel);
static struct configfs_attribute *lio_target_nacl_param_attrs[] = {
- &iscsi_nacl_param_MaxConnections.attr,
- &iscsi_nacl_param_InitialR2T.attr,
- &iscsi_nacl_param_ImmediateData.attr,
- &iscsi_nacl_param_MaxBurstLength.attr,
- &iscsi_nacl_param_FirstBurstLength.attr,
- &iscsi_nacl_param_DefaultTime2Wait.attr,
- &iscsi_nacl_param_DefaultTime2Retain.attr,
- &iscsi_nacl_param_MaxOutstandingR2T.attr,
- &iscsi_nacl_param_DataPDUInOrder.attr,
- &iscsi_nacl_param_DataSequenceInOrder.attr,
- &iscsi_nacl_param_ErrorRecoveryLevel.attr,
+ &iscsi_nacl_param_attr_MaxConnections,
+ &iscsi_nacl_param_attr_InitialR2T,
+ &iscsi_nacl_param_attr_ImmediateData,
+ &iscsi_nacl_param_attr_MaxBurstLength,
+ &iscsi_nacl_param_attr_FirstBurstLength,
+ &iscsi_nacl_param_attr_DefaultTime2Wait,
+ &iscsi_nacl_param_attr_DefaultTime2Retain,
+ &iscsi_nacl_param_attr_MaxOutstandingR2T,
+ &iscsi_nacl_param_attr_DataPDUInOrder,
+ &iscsi_nacl_param_attr_DataSequenceInOrder,
+ &iscsi_nacl_param_attr_ErrorRecoveryLevel,
NULL,
};
@@ -653,118 +521,114 @@ static struct configfs_attribute *lio_target_nacl_param_attrs[] = {
/* Start items for lio_target_acl_cit */
-static ssize_t lio_target_nacl_show_info(
- struct se_node_acl *se_nacl,
- char *page)
+static ssize_t lio_target_nacl_info_show(struct config_item *item, char *page)
{
- struct iscsi_session *sess;
- struct iscsi_conn *conn;
+ struct se_node_acl *se_nacl = acl_to_nacl(item);
+ struct iscsit_session *sess;
+ struct iscsit_conn *conn;
struct se_session *se_sess;
ssize_t rb = 0;
+ u32 max_cmd_sn;
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (!se_sess) {
- rb += sprintf(page+rb, "No active iSCSI Session for Initiator"
+ rb += sysfs_emit_at(page, rb, "No active iSCSI Session for Initiator"
" Endpoint: %s\n", se_nacl->initiatorname);
} else {
sess = se_sess->fabric_sess_ptr;
- if (sess->sess_ops->InitiatorName)
- rb += sprintf(page+rb, "InitiatorName: %s\n",
- sess->sess_ops->InitiatorName);
- if (sess->sess_ops->InitiatorAlias)
- rb += sprintf(page+rb, "InitiatorAlias: %s\n",
- sess->sess_ops->InitiatorAlias);
-
- rb += sprintf(page+rb, "LIO Session ID: %u "
- "ISID: 0x%02x %02x %02x %02x %02x %02x "
- "TSIH: %hu ", sess->sid,
- sess->isid[0], sess->isid[1], sess->isid[2],
- sess->isid[3], sess->isid[4], sess->isid[5],
- sess->tsih);
- rb += sprintf(page+rb, "SessionType: %s\n",
+ rb += sysfs_emit_at(page, rb, "InitiatorName: %s\n",
+ sess->sess_ops->InitiatorName);
+ rb += sysfs_emit_at(page, rb, "InitiatorAlias: %s\n",
+ sess->sess_ops->InitiatorAlias);
+
+ rb += sysfs_emit_at(page, rb,
+ "LIO Session ID: %u ISID: 0x%6ph TSIH: %hu ",
+ sess->sid, sess->isid, sess->tsih);
+ rb += sysfs_emit_at(page, rb, "SessionType: %s\n",
(sess->sess_ops->SessionType) ?
"Discovery" : "Normal");
- rb += sprintf(page+rb, "Session State: ");
+ rb += sysfs_emit_at(page, rb, "Session State: ");
switch (sess->session_state) {
case TARG_SESS_STATE_FREE:
- rb += sprintf(page+rb, "TARG_SESS_FREE\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_FREE\n");
break;
case TARG_SESS_STATE_ACTIVE:
- rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_ACTIVE\n");
break;
case TARG_SESS_STATE_LOGGED_IN:
- rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_LOGGED_IN\n");
break;
case TARG_SESS_STATE_FAILED:
- rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_FAILED\n");
break;
case TARG_SESS_STATE_IN_CONTINUE:
- rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_IN_CONTINUE\n");
break;
default:
- rb += sprintf(page+rb, "ERROR: Unknown Session"
+ rb += sysfs_emit_at(page, rb, "ERROR: Unknown Session"
" State!\n");
break;
}
- rb += sprintf(page+rb, "---------------------[iSCSI Session"
+ rb += sysfs_emit_at(page, rb, "---------------------[iSCSI Session"
" Values]-----------------------\n");
- rb += sprintf(page+rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
+ rb += sysfs_emit_at(page, rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
" : MaxCmdSN : ITT : TTT\n");
- rb += sprintf(page+rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
+ max_cmd_sn = (u32) atomic_read(&sess->max_cmd_sn);
+ rb += sysfs_emit_at(page, rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
" 0x%08x 0x%08x\n",
sess->cmdsn_window,
- (sess->max_cmd_sn - sess->exp_cmd_sn) + 1,
- sess->exp_cmd_sn, sess->max_cmd_sn,
+ (max_cmd_sn - sess->exp_cmd_sn) + 1,
+ sess->exp_cmd_sn, max_cmd_sn,
sess->init_task_tag, sess->targ_xfer_tag);
- rb += sprintf(page+rb, "----------------------[iSCSI"
+ rb += sysfs_emit_at(page, rb, "----------------------[iSCSI"
" Connections]-------------------------\n");
spin_lock(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
- rb += sprintf(page+rb, "CID: %hu Connection"
+ rb += sysfs_emit_at(page, rb, "CID: %hu Connection"
" State: ", conn->cid);
switch (conn->conn_state) {
case TARG_CONN_STATE_FREE:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_FREE\n");
break;
case TARG_CONN_STATE_XPT_UP:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_XPT_UP\n");
break;
case TARG_CONN_STATE_IN_LOGIN:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_IN_LOGIN\n");
break;
case TARG_CONN_STATE_LOGGED_IN:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_LOGGED_IN\n");
break;
case TARG_CONN_STATE_IN_LOGOUT:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_IN_LOGOUT\n");
break;
case TARG_CONN_STATE_LOGOUT_REQUESTED:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_LOGOUT_REQUESTED\n");
break;
case TARG_CONN_STATE_CLEANUP_WAIT:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_CLEANUP_WAIT\n");
break;
default:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"ERROR: Unknown Connection State!\n");
break;
}
- rb += sprintf(page+rb, " Address %s %s", conn->login_ip,
+ rb += sysfs_emit_at(page, rb, " Address %pISc %s", &conn->login_sockaddr,
(conn->network_transport == ISCSI_TCP) ?
"TCP" : "SCTP");
- rb += sprintf(page+rb, " StatSN: 0x%08x\n",
+ rb += sysfs_emit_at(page, rb, " StatSN: 0x%08x\n",
conn->stat_sn);
}
spin_unlock(&sess->conn_lock);
@@ -774,23 +638,18 @@ static ssize_t lio_target_nacl_show_info(
return rb;
}
-TF_NACL_BASE_ATTR_RO(lio_target, info);
-
-static ssize_t lio_target_nacl_show_cmdsn_depth(
- struct se_node_acl *se_nacl,
- char *page)
+static ssize_t lio_target_nacl_cmdsn_depth_show(struct config_item *item,
+ char *page)
{
- return sprintf(page, "%u\n", se_nacl->queue_depth);
+ return sysfs_emit(page, "%u\n", acl_to_nacl(item)->queue_depth);
}
-static ssize_t lio_target_nacl_store_cmdsn_depth(
- struct se_node_acl *se_nacl,
- const char *page,
- size_t count)
+static ssize_t lio_target_nacl_cmdsn_depth_store(struct config_item *item,
+ const char *page, size_t count)
{
+ struct se_node_acl *se_nacl = acl_to_nacl(item);
struct se_portal_group *se_tpg = se_nacl->se_tpg;
- struct iscsi_portal_group *tpg = container_of(se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg);
struct config_item *acl_ci, *tpg_ci, *wwn_ci;
u32 cmdsn_depth = 0;
int ret;
@@ -806,7 +665,7 @@ static ssize_t lio_target_nacl_store_cmdsn_depth(
}
acl_ci = &se_nacl->acl_group.cg_item;
if (!acl_ci) {
- pr_err("Unable to locatel acl_ci\n");
+ pr_err("Unable to locate acl_ci\n");
return -EINVAL;
}
tpg_ci = &acl_ci->ci_parent->ci_group->cg_item;
@@ -822,13 +681,10 @@ static ssize_t lio_target_nacl_store_cmdsn_depth(
if (iscsit_get_tpg(tpg) < 0)
return -EINVAL;
- /*
- * iscsit_tpg_set_initiator_node_queue_depth() assumes force=1
- */
- ret = iscsit_tpg_set_initiator_node_queue_depth(tpg,
- config_item_name(acl_ci), cmdsn_depth, 1);
- pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for"
+ ret = core_tpg_set_initiator_node_queue_depth(se_nacl, cmdsn_depth);
+
+ pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for "
"InitiatorName: %s\n", config_item_name(wwn_ci),
config_item_name(tpg_ci), cmdsn_depth,
config_item_name(acl_ci));
@@ -837,20 +693,15 @@ static ssize_t lio_target_nacl_store_cmdsn_depth(
return (!ret) ? count : (ssize_t)ret;
}
-TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR);
-
-static ssize_t lio_target_nacl_show_tag(
- struct se_node_acl *se_nacl,
- char *page)
+static ssize_t lio_target_nacl_tag_show(struct config_item *item, char *page)
{
- return snprintf(page, PAGE_SIZE, "%s", se_nacl->acl_tag);
+ return snprintf(page, PAGE_SIZE, "%s", acl_to_nacl(item)->acl_tag);
}
-static ssize_t lio_target_nacl_store_tag(
- struct se_node_acl *se_nacl,
- const char *page,
- size_t count)
+static ssize_t lio_target_nacl_tag_store(struct config_item *item,
+ const char *page, size_t count)
{
+ struct se_node_acl *se_nacl = acl_to_nacl(item);
int ret;
ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page);
@@ -860,96 +711,27 @@ static ssize_t lio_target_nacl_store_tag(
return count;
}
-TF_NACL_BASE_ATTR(lio_target, tag, S_IRUGO | S_IWUSR);
+CONFIGFS_ATTR_RO(lio_target_nacl_, info);
+CONFIGFS_ATTR(lio_target_nacl_, cmdsn_depth);
+CONFIGFS_ATTR(lio_target_nacl_, tag);
static struct configfs_attribute *lio_target_initiator_attrs[] = {
- &lio_target_nacl_info.attr,
- &lio_target_nacl_cmdsn_depth.attr,
- &lio_target_nacl_tag.attr,
+ &lio_target_nacl_attr_info,
+ &lio_target_nacl_attr_cmdsn_depth,
+ &lio_target_nacl_attr_tag,
NULL,
};
-static struct se_node_acl *lio_tpg_alloc_fabric_acl(
- struct se_portal_group *se_tpg)
-{
- struct iscsi_node_acl *acl;
-
- acl = kzalloc(sizeof(struct iscsi_node_acl), GFP_KERNEL);
- if (!acl) {
- pr_err("Unable to allocate memory for struct iscsi_node_acl\n");
- return NULL;
- }
-
- return &acl->se_node_acl;
-}
-
-static struct se_node_acl *lio_target_make_nodeacl(
- struct se_portal_group *se_tpg,
- struct config_group *group,
- const char *name)
+static int lio_target_init_nodeacl(struct se_node_acl *se_nacl,
+ const char *name)
{
- struct config_group *stats_cg;
- struct iscsi_node_acl *acl;
- struct se_node_acl *se_nacl_new, *se_nacl;
- struct iscsi_portal_group *tpg = container_of(se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
- u32 cmdsn_depth;
-
- se_nacl_new = lio_tpg_alloc_fabric_acl(se_tpg);
- if (!se_nacl_new)
- return ERR_PTR(-ENOMEM);
-
- cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
- /*
- * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
- * when converting a NdoeACL from demo mode -> explict
- */
- se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
- name, cmdsn_depth);
- if (IS_ERR(se_nacl))
- return se_nacl;
-
- acl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
- stats_cg = &se_nacl->acl_fabric_stat_group;
-
- stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
- GFP_KERNEL);
- if (!stats_cg->default_groups) {
- pr_err("Unable to allocate memory for"
- " stats_cg->default_groups\n");
- core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
- kfree(acl);
- return ERR_PTR(-ENOMEM);
- }
+ struct iscsi_node_acl *acl = to_iscsi_nacl(se_nacl);
- stats_cg->default_groups[0] = &NODE_STAT_GRPS(acl)->iscsi_sess_stats_group;
- stats_cg->default_groups[1] = NULL;
- config_group_init_type_name(&NODE_STAT_GRPS(acl)->iscsi_sess_stats_group,
+ config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group,
"iscsi_sess_stats", &iscsi_stat_sess_cit);
-
- return se_nacl;
-}
-
-static void lio_target_drop_nodeacl(
- struct se_node_acl *se_nacl)
-{
- struct se_portal_group *se_tpg = se_nacl->se_tpg;
- struct iscsi_node_acl *acl = container_of(se_nacl,
- struct iscsi_node_acl, se_node_acl);
- struct config_item *df_item;
- struct config_group *stats_cg;
- int i;
-
- stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
- for (i = 0; stats_cg->default_groups[i]; i++) {
- df_item = &stats_cg->default_groups[i]->cg_item;
- stats_cg->default_groups[i] = NULL;
- config_item_put(df_item);
- }
- kfree(stats_cg->default_groups);
-
- core_tpg_del_initiator_node_acl(se_tpg, se_nacl, 1);
- kfree(acl);
+ configfs_add_default_group(&acl->node_stat_grps.iscsi_sess_stats_group,
+ &se_nacl->acl_fabric_stat_group);
+ return 0;
}
/* End items for lio_target_acl_cit */
@@ -958,29 +740,26 @@ static void lio_target_drop_nodeacl(
#define DEF_TPG_ATTRIB(name) \
\
-static ssize_t iscsi_tpg_attrib_show_##name( \
- struct se_portal_group *se_tpg, \
- char *page) \
+static ssize_t iscsi_tpg_attrib_##name##_show(struct config_item *item, \
+ char *page) \
{ \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct se_portal_group *se_tpg = attrib_to_tpg(item); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
ssize_t rb; \
\
if (iscsit_get_tpg(tpg) < 0) \
return -EINVAL; \
\
- rb = sprintf(page, "%u\n", ISCSI_TPG_ATTRIB(tpg)->name); \
+ rb = sysfs_emit(page, "%u\n", tpg->tpg_attrib.name); \
iscsit_put_tpg(tpg); \
return rb; \
} \
\
-static ssize_t iscsi_tpg_attrib_store_##name( \
- struct se_portal_group *se_tpg, \
- const char *page, \
- size_t count) \
+static ssize_t iscsi_tpg_attrib_##name##_store(struct config_item *item,\
+ const char *page, size_t count) \
{ \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct se_portal_group *se_tpg = attrib_to_tpg(item); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
u32 val; \
int ret; \
\
@@ -999,60 +778,37 @@ static ssize_t iscsi_tpg_attrib_store_##name( \
out: \
iscsit_put_tpg(tpg); \
return ret; \
-}
-
-#define TPG_ATTR(_name, _mode) TF_TPG_ATTRIB_ATTR(iscsi, _name, _mode);
+} \
+CONFIGFS_ATTR(iscsi_tpg_attrib_, name)
-/*
- * Define iscsi_tpg_attrib_s_authentication
- */
DEF_TPG_ATTRIB(authentication);
-TPG_ATTR(authentication, S_IRUGO | S_IWUSR);
-/*
- * Define iscsi_tpg_attrib_s_login_timeout
- */
DEF_TPG_ATTRIB(login_timeout);
-TPG_ATTR(login_timeout, S_IRUGO | S_IWUSR);
-/*
- * Define iscsi_tpg_attrib_s_netif_timeout
- */
-DEF_TPG_ATTRIB(netif_timeout);
-TPG_ATTR(netif_timeout, S_IRUGO | S_IWUSR);
-/*
- * Define iscsi_tpg_attrib_s_generate_node_acls
- */
DEF_TPG_ATTRIB(generate_node_acls);
-TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
-/*
- * Define iscsi_tpg_attrib_s_default_cmdsn_depth
- */
DEF_TPG_ATTRIB(default_cmdsn_depth);
-TPG_ATTR(default_cmdsn_depth, S_IRUGO | S_IWUSR);
-/*
- Define iscsi_tpg_attrib_s_cache_dynamic_acls
- */
DEF_TPG_ATTRIB(cache_dynamic_acls);
-TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
-/*
- * Define iscsi_tpg_attrib_s_demo_mode_write_protect
- */
DEF_TPG_ATTRIB(demo_mode_write_protect);
-TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
-/*
- * Define iscsi_tpg_attrib_s_prod_mode_write_protect
- */
DEF_TPG_ATTRIB(prod_mode_write_protect);
-TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
+DEF_TPG_ATTRIB(demo_mode_discovery);
+DEF_TPG_ATTRIB(default_erl);
+DEF_TPG_ATTRIB(t10_pi);
+DEF_TPG_ATTRIB(fabric_prot_type);
+DEF_TPG_ATTRIB(tpg_enabled_sendtargets);
+DEF_TPG_ATTRIB(login_keys_workaround);
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
- &iscsi_tpg_attrib_authentication.attr,
- &iscsi_tpg_attrib_login_timeout.attr,
- &iscsi_tpg_attrib_netif_timeout.attr,
- &iscsi_tpg_attrib_generate_node_acls.attr,
- &iscsi_tpg_attrib_default_cmdsn_depth.attr,
- &iscsi_tpg_attrib_cache_dynamic_acls.attr,
- &iscsi_tpg_attrib_demo_mode_write_protect.attr,
- &iscsi_tpg_attrib_prod_mode_write_protect.attr,
+ &iscsi_tpg_attrib_attr_authentication,
+ &iscsi_tpg_attrib_attr_login_timeout,
+ &iscsi_tpg_attrib_attr_generate_node_acls,
+ &iscsi_tpg_attrib_attr_default_cmdsn_depth,
+ &iscsi_tpg_attrib_attr_cache_dynamic_acls,
+ &iscsi_tpg_attrib_attr_demo_mode_write_protect,
+ &iscsi_tpg_attrib_attr_prod_mode_write_protect,
+ &iscsi_tpg_attrib_attr_demo_mode_discovery,
+ &iscsi_tpg_attrib_attr_default_erl,
+ &iscsi_tpg_attrib_attr_t10_pi,
+ &iscsi_tpg_attrib_attr_fabric_prot_type,
+ &iscsi_tpg_attrib_attr_tpg_enabled_sendtargets,
+ &iscsi_tpg_attrib_attr_login_keys_workaround,
NULL,
};
@@ -1061,12 +817,10 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
/* Start items for lio_target_tpg_auth_cit */
#define __DEF_TPG_AUTH_STR(prefix, name, flags) \
-static ssize_t __iscsi_##prefix##_show_##name( \
- struct se_portal_group *se_tpg, \
- char *page) \
+static ssize_t __iscsi_##prefix##_##name##_show(struct se_portal_group *se_tpg, \
+ char *page) \
{ \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
@@ -1075,13 +829,10 @@ static ssize_t __iscsi_##prefix##_show_##name( \
return snprintf(page, PAGE_SIZE, "%s\n", auth->name); \
} \
\
-static ssize_t __iscsi_##prefix##_store_##name( \
- struct se_portal_group *se_tpg, \
- const char *page, \
- size_t count) \
+static ssize_t __iscsi_##prefix##_##name##_store(struct se_portal_group *se_tpg,\
+ const char *page, size_t count) \
{ \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
@@ -1102,13 +853,33 @@ static ssize_t __iscsi_##prefix##_store_##name( \
return count; \
}
+#define DEF_TPG_AUTH_STR(name, flags) \
+ __DEF_TPG_AUTH_STR(tpg_auth, name, flags) \
+static ssize_t iscsi_tpg_auth_##name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ return __iscsi_tpg_auth_##name##_show(auth_to_tpg(item), page); \
+} \
+ \
+static ssize_t iscsi_tpg_auth_##name##_store(struct config_item *item, \
+ const char *page, size_t count) \
+{ \
+ return __iscsi_tpg_auth_##name##_store(auth_to_tpg(item), page, count); \
+} \
+ \
+CONFIGFS_ATTR(iscsi_tpg_auth_, name);
+
+
+DEF_TPG_AUTH_STR(userid, NAF_USERID_SET);
+DEF_TPG_AUTH_STR(password, NAF_PASSWORD_SET);
+DEF_TPG_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
+DEF_TPG_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
+
#define __DEF_TPG_AUTH_INT(prefix, name) \
-static ssize_t __iscsi_##prefix##_show_##name( \
- struct se_portal_group *se_tpg, \
- char *page) \
+static ssize_t __iscsi_##prefix##_##name##_show(struct se_portal_group *se_tpg, \
+ char *page) \
{ \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_node_auth *auth = &tpg->tpg_demo_auth; \
\
if (!capable(CAP_SYS_ADMIN)) \
@@ -1117,67 +888,23 @@ static ssize_t __iscsi_##prefix##_show_##name( \
return snprintf(page, PAGE_SIZE, "%d\n", auth->name); \
}
-#define DEF_TPG_AUTH_STR(name, flags) \
- __DEF_TPG_AUTH_STR(tpg_auth, name, flags) \
-static ssize_t iscsi_tpg_auth_show_##name( \
- struct se_portal_group *se_tpg, \
- char *page) \
-{ \
- return __iscsi_tpg_auth_show_##name(se_tpg, page); \
-} \
- \
-static ssize_t iscsi_tpg_auth_store_##name( \
- struct se_portal_group *se_tpg, \
- const char *page, \
- size_t count) \
-{ \
- return __iscsi_tpg_auth_store_##name(se_tpg, page, count); \
-}
-
#define DEF_TPG_AUTH_INT(name) \
__DEF_TPG_AUTH_INT(tpg_auth, name) \
-static ssize_t iscsi_tpg_auth_show_##name( \
- struct se_portal_group *se_tpg, \
- char *page) \
+static ssize_t iscsi_tpg_auth_##name##_show(struct config_item *item, \
+ char *page) \
{ \
- return __iscsi_tpg_auth_show_##name(se_tpg, page); \
-}
-
-#define TPG_AUTH_ATTR(_name, _mode) TF_TPG_AUTH_ATTR(iscsi, _name, _mode);
-#define TPG_AUTH_ATTR_RO(_name) TF_TPG_AUTH_ATTR_RO(iscsi, _name);
+ return __iscsi_tpg_auth_##name##_show(auth_to_tpg(item), page); \
+} \
+CONFIGFS_ATTR_RO(iscsi_tpg_auth_, name);
-/*
- * * One-way authentication userid
- * */
-DEF_TPG_AUTH_STR(userid, NAF_USERID_SET);
-TPG_AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
-/*
- * * One-way authentication password
- * */
-DEF_TPG_AUTH_STR(password, NAF_PASSWORD_SET);
-TPG_AUTH_ATTR(password, S_IRUGO | S_IWUSR);
-/*
- * * Enforce mutual authentication
- * */
DEF_TPG_AUTH_INT(authenticate_target);
-TPG_AUTH_ATTR_RO(authenticate_target);
-/*
- * * Mutual authentication userid
- * */
-DEF_TPG_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
-TPG_AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
-/*
- * * Mutual authentication password
- * */
-DEF_TPG_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
-TPG_AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_tpg_auth_attrs[] = {
- &iscsi_tpg_auth_userid.attr,
- &iscsi_tpg_auth_password.attr,
- &iscsi_tpg_auth_authenticate_target.attr,
- &iscsi_tpg_auth_userid_mutual.attr,
- &iscsi_tpg_auth_password_mutual.attr,
+ &iscsi_tpg_auth_attr_userid,
+ &iscsi_tpg_auth_attr_password,
+ &iscsi_tpg_auth_attr_authenticate_target,
+ &iscsi_tpg_auth_attr_userid_mutual,
+ &iscsi_tpg_auth_attr_password_mutual,
NULL,
};
@@ -1186,12 +913,11 @@ static struct configfs_attribute *lio_target_tpg_auth_attrs[] = {
/* Start items for lio_target_tpg_param_cit */
#define DEF_TPG_PARAM(name) \
-static ssize_t iscsi_tpg_param_show_##name( \
- struct se_portal_group *se_tpg, \
- char *page) \
+static ssize_t iscsi_tpg_param_##name##_show(struct config_item *item, \
+ char *page) \
{ \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct se_portal_group *se_tpg = param_to_tpg(item); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
struct iscsi_param *param; \
ssize_t rb; \
\
@@ -1209,13 +935,11 @@ static ssize_t iscsi_tpg_param_show_##name( \
iscsit_put_tpg(tpg); \
return rb; \
} \
-static ssize_t iscsi_tpg_param_store_##name( \
- struct se_portal_group *se_tpg, \
- const char *page, \
- size_t count) \
+static ssize_t iscsi_tpg_param_##name##_store(struct config_item *item, \
+ const char *page, size_t count) \
{ \
- struct iscsi_portal_group *tpg = container_of(se_tpg, \
- struct iscsi_portal_group, tpg_se_tpg); \
+ struct se_portal_group *se_tpg = param_to_tpg(item); \
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg); \
char *buf; \
int ret, len; \
\
@@ -1241,96 +965,54 @@ static ssize_t iscsi_tpg_param_store_##name( \
out: \
kfree(buf); \
iscsit_put_tpg(tpg); \
- return -EINVAL; \
-}
-
-#define TPG_PARAM_ATTR(_name, _mode) TF_TPG_PARAM_ATTR(iscsi, _name, _mode);
+ return -EINVAL; \
+} \
+CONFIGFS_ATTR(iscsi_tpg_param_, name)
DEF_TPG_PARAM(AuthMethod);
-TPG_PARAM_ATTR(AuthMethod, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(HeaderDigest);
-TPG_PARAM_ATTR(HeaderDigest, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(DataDigest);
-TPG_PARAM_ATTR(DataDigest, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(MaxConnections);
-TPG_PARAM_ATTR(MaxConnections, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(TargetAlias);
-TPG_PARAM_ATTR(TargetAlias, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(InitialR2T);
-TPG_PARAM_ATTR(InitialR2T, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(ImmediateData);
-TPG_PARAM_ATTR(ImmediateData, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(MaxRecvDataSegmentLength);
-TPG_PARAM_ATTR(MaxRecvDataSegmentLength, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(MaxXmitDataSegmentLength);
-TPG_PARAM_ATTR(MaxXmitDataSegmentLength, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(MaxBurstLength);
-TPG_PARAM_ATTR(MaxBurstLength, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(FirstBurstLength);
-TPG_PARAM_ATTR(FirstBurstLength, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(DefaultTime2Wait);
-TPG_PARAM_ATTR(DefaultTime2Wait, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(DefaultTime2Retain);
-TPG_PARAM_ATTR(DefaultTime2Retain, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(MaxOutstandingR2T);
-TPG_PARAM_ATTR(MaxOutstandingR2T, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(DataPDUInOrder);
-TPG_PARAM_ATTR(DataPDUInOrder, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(DataSequenceInOrder);
-TPG_PARAM_ATTR(DataSequenceInOrder, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(ErrorRecoveryLevel);
-TPG_PARAM_ATTR(ErrorRecoveryLevel, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(IFMarker);
-TPG_PARAM_ATTR(IFMarker, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(OFMarker);
-TPG_PARAM_ATTR(OFMarker, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(IFMarkInt);
-TPG_PARAM_ATTR(IFMarkInt, S_IRUGO | S_IWUSR);
-
DEF_TPG_PARAM(OFMarkInt);
-TPG_PARAM_ATTR(OFMarkInt, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_tpg_param_attrs[] = {
- &iscsi_tpg_param_AuthMethod.attr,
- &iscsi_tpg_param_HeaderDigest.attr,
- &iscsi_tpg_param_DataDigest.attr,
- &iscsi_tpg_param_MaxConnections.attr,
- &iscsi_tpg_param_TargetAlias.attr,
- &iscsi_tpg_param_InitialR2T.attr,
- &iscsi_tpg_param_ImmediateData.attr,
- &iscsi_tpg_param_MaxRecvDataSegmentLength.attr,
- &iscsi_tpg_param_MaxXmitDataSegmentLength.attr,
- &iscsi_tpg_param_MaxBurstLength.attr,
- &iscsi_tpg_param_FirstBurstLength.attr,
- &iscsi_tpg_param_DefaultTime2Wait.attr,
- &iscsi_tpg_param_DefaultTime2Retain.attr,
- &iscsi_tpg_param_MaxOutstandingR2T.attr,
- &iscsi_tpg_param_DataPDUInOrder.attr,
- &iscsi_tpg_param_DataSequenceInOrder.attr,
- &iscsi_tpg_param_ErrorRecoveryLevel.attr,
- &iscsi_tpg_param_IFMarker.attr,
- &iscsi_tpg_param_OFMarker.attr,
- &iscsi_tpg_param_IFMarkInt.attr,
- &iscsi_tpg_param_OFMarkInt.attr,
+ &iscsi_tpg_param_attr_AuthMethod,
+ &iscsi_tpg_param_attr_HeaderDigest,
+ &iscsi_tpg_param_attr_DataDigest,
+ &iscsi_tpg_param_attr_MaxConnections,
+ &iscsi_tpg_param_attr_TargetAlias,
+ &iscsi_tpg_param_attr_InitialR2T,
+ &iscsi_tpg_param_attr_ImmediateData,
+ &iscsi_tpg_param_attr_MaxRecvDataSegmentLength,
+ &iscsi_tpg_param_attr_MaxXmitDataSegmentLength,
+ &iscsi_tpg_param_attr_MaxBurstLength,
+ &iscsi_tpg_param_attr_FirstBurstLength,
+ &iscsi_tpg_param_attr_DefaultTime2Wait,
+ &iscsi_tpg_param_attr_DefaultTime2Retain,
+ &iscsi_tpg_param_attr_MaxOutstandingR2T,
+ &iscsi_tpg_param_attr_DataPDUInOrder,
+ &iscsi_tpg_param_attr_DataSequenceInOrder,
+ &iscsi_tpg_param_attr_ErrorRecoveryLevel,
+ &iscsi_tpg_param_attr_IFMarker,
+ &iscsi_tpg_param_attr_OFMarker,
+ &iscsi_tpg_param_attr_IFMarkInt,
+ &iscsi_tpg_param_attr_OFMarkInt,
NULL,
};
@@ -1338,68 +1020,16 @@ static struct configfs_attribute *lio_target_tpg_param_attrs[] = {
/* Start items for lio_target_tpg_cit */
-static ssize_t lio_target_tpg_show_enable(
- struct se_portal_group *se_tpg,
- char *page)
-{
- struct iscsi_portal_group *tpg = container_of(se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
- ssize_t len;
-
- spin_lock(&tpg->tpg_state_lock);
- len = sprintf(page, "%d\n",
- (tpg->tpg_state == TPG_STATE_ACTIVE) ? 1 : 0);
- spin_unlock(&tpg->tpg_state_lock);
-
- return len;
-}
-
-static ssize_t lio_target_tpg_store_enable(
- struct se_portal_group *se_tpg,
- const char *page,
- size_t count)
+static ssize_t lio_target_tpg_dynamic_sessions_show(struct config_item *item,
+ char *page)
{
- struct iscsi_portal_group *tpg = container_of(se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
- u32 op;
- int ret;
-
- ret = kstrtou32(page, 0, &op);
- if (ret)
- return ret;
- if ((op != 1) && (op != 0)) {
- pr_err("Illegal value for tpg_enable: %u\n", op);
- return -EINVAL;
- }
-
- ret = iscsit_get_tpg(tpg);
- if (ret < 0)
- return -EINVAL;
-
- if (op) {
- ret = iscsit_tpg_enable_portal_group(tpg);
- if (ret < 0)
- goto out;
- } else {
- /*
- * iscsit_tpg_disable_portal_group() assumes force=1
- */
- ret = iscsit_tpg_disable_portal_group(tpg, 1);
- if (ret < 0)
- goto out;
- }
-
- iscsit_put_tpg(tpg);
- return count;
-out:
- iscsit_put_tpg(tpg);
- return -EINVAL;
+ return target_show_dynamic_sessions(to_tpg(item), page);
}
-TF_TPG_BASE_ATTR(lio_target, enable, S_IRUGO | S_IWUSR);
+CONFIGFS_ATTR_RO(lio_target_tpg_, dynamic_sessions);
static struct configfs_attribute *lio_target_tpg_attrs[] = {
- &lio_target_tpg_enable.attr,
+ &lio_target_tpg_attr_dynamic_sessions,
NULL,
};
@@ -1407,10 +1037,8 @@ static struct configfs_attribute *lio_target_tpg_attrs[] = {
/* Start items for lio_target_tiqn_cit */
-static struct se_portal_group *lio_target_tiqn_addtpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *lio_target_tiqn_addtpg(struct se_wwn *wwn,
+ const char *name)
{
struct iscsi_portal_group *tpg;
struct iscsi_tiqn *tiqn;
@@ -1438,12 +1066,9 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
if (!tpg)
return NULL;
- ret = core_tpg_register(
- &lio_target_fabric_configfs->tf_ops,
- wwn, &tpg->tpg_se_tpg, tpg,
- TRANSPORT_TPG_TYPE_NORMAL);
+ ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI);
if (ret < 0)
- return NULL;
+ goto free_out;
ret = iscsit_tpg_add_portal_group(tiqn, tpg);
if (ret != 0)
@@ -1455,16 +1080,47 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
return &tpg->tpg_se_tpg;
out:
core_tpg_deregister(&tpg->tpg_se_tpg);
+free_out:
kfree(tpg);
return NULL;
}
+static int lio_target_tiqn_enabletpg(struct se_portal_group *se_tpg,
+ bool enable)
+{
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg);
+ int ret;
+
+ ret = iscsit_get_tpg(tpg);
+ if (ret < 0)
+ return -EINVAL;
+
+ if (enable) {
+ ret = iscsit_tpg_enable_portal_group(tpg);
+ if (ret < 0)
+ goto out;
+ } else {
+ /*
+ * iscsit_tpg_disable_portal_group() assumes force=1
+ */
+ ret = iscsit_tpg_disable_portal_group(tpg, 1);
+ if (ret < 0)
+ goto out;
+ }
+
+ iscsit_put_tpg(tpg);
+ return 0;
+out:
+ iscsit_put_tpg(tpg);
+ return -EINVAL;
+}
+
static void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
{
struct iscsi_portal_group *tpg;
struct iscsi_tiqn *tiqn;
- tpg = container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg);
+ tpg = to_iscsi_tpg(se_tpg);
tiqn = tpg->tpg_tiqn;
/*
* iscsit_tpg_del_portal_group() assumes force=1
@@ -1475,19 +1131,54 @@ static void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
/* End items for lio_target_tiqn_cit */
-/* Start LIO-Target TIQN struct contig_item lio_target_cit */
+/* Start LIO-Target TIQN struct config_item lio_target_cit */
-static ssize_t lio_target_wwn_show_attr_lio_version(
- struct target_fabric_configfs *tf,
- char *page)
+static ssize_t lio_target_wwn_lio_version_show(struct config_item *item,
+ char *page)
{
- return sprintf(page, "RisingTide Systems Linux-iSCSI Target "ISCSIT_VERSION"\n");
+ return sysfs_emit(page, "Datera Inc. iSCSI Target %s\n", ISCSIT_VERSION);
}
-TF_WWN_ATTR_RO(lio_target, lio_version);
+CONFIGFS_ATTR_RO(lio_target_wwn_, lio_version);
+
+static ssize_t lio_target_wwn_cpus_allowed_list_show(
+ struct config_item *item, char *page)
+{
+ return sysfs_emit(page, "%*pbl\n",
+ cpumask_pr_args(iscsit_global->allowed_cpumask));
+}
+
+static ssize_t lio_target_wwn_cpus_allowed_list_store(
+ struct config_item *item, const char *page, size_t count)
+{
+ int ret = -ENOMEM;
+ char *orig;
+ cpumask_var_t new_allowed_cpumask;
+
+ if (!zalloc_cpumask_var(&new_allowed_cpumask, GFP_KERNEL))
+ goto out;
+
+ orig = kstrdup(page, GFP_KERNEL);
+ if (!orig)
+ goto out_free_cpumask;
+
+ ret = cpulist_parse(orig, new_allowed_cpumask);
+ if (!ret)
+ cpumask_copy(iscsit_global->allowed_cpumask,
+ new_allowed_cpumask);
+
+ kfree(orig);
+out_free_cpumask:
+ free_cpumask_var(new_allowed_cpumask);
+out:
+ return ret ? ret : count;
+}
+
+CONFIGFS_ATTR(lio_target_wwn_, cpus_allowed_list);
static struct configfs_attribute *lio_target_wwn_attrs[] = {
- &lio_target_wwn_lio_version.attr,
+ &lio_target_wwn_attr_lio_version,
+ &lio_target_wwn_attr_cpus_allowed_list,
NULL,
};
@@ -1496,64 +1187,52 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
struct config_group *group,
const char *name)
{
- struct config_group *stats_cg;
struct iscsi_tiqn *tiqn;
tiqn = iscsit_add_tiqn((unsigned char *)name);
if (IS_ERR(tiqn))
return ERR_CAST(tiqn);
- /*
- * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.
- */
- stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
-
- stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
- GFP_KERNEL);
- if (!stats_cg->default_groups) {
- pr_err("Unable to allocate memory for"
- " stats_cg->default_groups\n");
- iscsit_del_tiqn(tiqn);
- return ERR_PTR(-ENOMEM);
- }
- stats_cg->default_groups[0] = &WWN_STAT_GRPS(tiqn)->iscsi_instance_group;
- stats_cg->default_groups[1] = &WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group;
- stats_cg->default_groups[2] = &WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group;
- stats_cg->default_groups[3] = &WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group;
- stats_cg->default_groups[4] = &WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group;
- stats_cg->default_groups[5] = NULL;
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_instance_group,
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
+ " %s\n", name);
+ return &tiqn->tiqn_wwn;
+}
+
+static void lio_target_add_wwn_groups(struct se_wwn *wwn)
+{
+ struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
+
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group,
"iscsi_instance", &iscsi_stat_instance_cit);
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_sess_err_group,
+ configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_instance_group,
+ &tiqn->tiqn_wwn.fabric_stat_group);
+
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_sess_err_group,
"iscsi_sess_err", &iscsi_stat_sess_err_cit);
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_tgt_attr_group,
+ configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_sess_err_group,
+ &tiqn->tiqn_wwn.fabric_stat_group);
+
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group,
"iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit);
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_login_stats_group,
+ configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group,
+ &tiqn->tiqn_wwn.fabric_stat_group);
+
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_login_stats_group,
"iscsi_login_stats", &iscsi_stat_login_cit);
- config_group_init_type_name(&WWN_STAT_GRPS(tiqn)->iscsi_logout_stats_group,
- "iscsi_logout_stats", &iscsi_stat_logout_cit);
+ configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_login_stats_group,
+ &tiqn->tiqn_wwn.fabric_stat_group);
- pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
- pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
- " %s\n", name);
- return &tiqn->tiqn_wwn;
+ config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
+ "iscsi_logout_stats", &iscsi_stat_logout_cit);
+ configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
+ &tiqn->tiqn_wwn.fabric_stat_group);
}
static void lio_target_call_coredeltiqn(
struct se_wwn *wwn)
{
struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
- struct config_item *df_item;
- struct config_group *stats_cg;
- int i;
-
- stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
- for (i = 0; stats_cg->default_groups[i]; i++) {
- df_item = &stats_cg->default_groups[i]->cg_item;
- stats_cg->default_groups[i] = NULL;
- config_item_put(df_item);
- }
- kfree(stats_cg->default_groups);
pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n",
tiqn->tiqn);
@@ -1566,77 +1245,47 @@ static void lio_target_call_coredeltiqn(
#define DEF_DISC_AUTH_STR(name, flags) \
__DEF_NACL_AUTH_STR(disc, name, flags) \
-static ssize_t iscsi_disc_show_##name( \
- struct target_fabric_configfs *tf, \
- char *page) \
+static ssize_t iscsi_disc_##name##_show(struct config_item *item, char *page) \
{ \
- return __iscsi_disc_show_##name(&iscsit_global->discovery_acl, \
+ return __iscsi_disc_##name##_show(&iscsit_global->discovery_acl,\
page); \
} \
-static ssize_t iscsi_disc_store_##name( \
- struct target_fabric_configfs *tf, \
- const char *page, \
- size_t count) \
+static ssize_t iscsi_disc_##name##_store(struct config_item *item, \
+ const char *page, size_t count) \
{ \
- return __iscsi_disc_store_##name(&iscsit_global->discovery_acl, \
+ return __iscsi_disc_##name##_store(&iscsit_global->discovery_acl, \
page, count); \
-}
+ \
+} \
+CONFIGFS_ATTR(iscsi_disc_, name)
+
+DEF_DISC_AUTH_STR(userid, NAF_USERID_SET);
+DEF_DISC_AUTH_STR(password, NAF_PASSWORD_SET);
+DEF_DISC_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
+DEF_DISC_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
#define DEF_DISC_AUTH_INT(name) \
__DEF_NACL_AUTH_INT(disc, name) \
-static ssize_t iscsi_disc_show_##name( \
- struct target_fabric_configfs *tf, \
- char *page) \
+static ssize_t iscsi_disc_##name##_show(struct config_item *item, char *page) \
{ \
- return __iscsi_disc_show_##name(&iscsit_global->discovery_acl, \
+ return __iscsi_disc_##name##_show(&iscsit_global->discovery_acl, \
page); \
-}
-
-#define DISC_AUTH_ATTR(_name, _mode) TF_DISC_ATTR(iscsi, _name, _mode)
-#define DISC_AUTH_ATTR_RO(_name) TF_DISC_ATTR_RO(iscsi, _name)
+} \
+CONFIGFS_ATTR_RO(iscsi_disc_, name)
-/*
- * One-way authentication userid
- */
-DEF_DISC_AUTH_STR(userid, NAF_USERID_SET);
-DISC_AUTH_ATTR(userid, S_IRUGO | S_IWUSR);
-/*
- * One-way authentication password
- */
-DEF_DISC_AUTH_STR(password, NAF_PASSWORD_SET);
-DISC_AUTH_ATTR(password, S_IRUGO | S_IWUSR);
-/*
- * Enforce mutual authentication
- */
DEF_DISC_AUTH_INT(authenticate_target);
-DISC_AUTH_ATTR_RO(authenticate_target);
-/*
- * Mutual authentication userid
- */
-DEF_DISC_AUTH_STR(userid_mutual, NAF_USERID_IN_SET);
-DISC_AUTH_ATTR(userid_mutual, S_IRUGO | S_IWUSR);
-/*
- * Mutual authentication password
- */
-DEF_DISC_AUTH_STR(password_mutual, NAF_PASSWORD_IN_SET);
-DISC_AUTH_ATTR(password_mutual, S_IRUGO | S_IWUSR);
-/*
- * enforce_discovery_auth
- */
-static ssize_t iscsi_disc_show_enforce_discovery_auth(
- struct target_fabric_configfs *tf,
- char *page)
+
+static ssize_t iscsi_disc_enforce_discovery_auth_show(struct config_item *item,
+ char *page)
{
struct iscsi_node_auth *discovery_auth = &iscsit_global->discovery_acl.node_auth;
- return sprintf(page, "%d\n", discovery_auth->enforce_discovery_auth);
+ return sysfs_emit(page, "%d\n", discovery_auth->enforce_discovery_auth);
}
-static ssize_t iscsi_disc_store_enforce_discovery_auth(
- struct target_fabric_configfs *tf,
- const char *page,
- size_t count)
+static ssize_t iscsi_disc_enforce_discovery_auth_store(struct config_item *item,
+ const char *page, size_t count)
{
struct iscsi_param *param;
struct iscsi_portal_group *discovery_tpg = iscsit_global->discovery_tpg;
@@ -1691,15 +1340,15 @@ static ssize_t iscsi_disc_store_enforce_discovery_auth(
return count;
}
-DISC_AUTH_ATTR(enforce_discovery_auth, S_IRUGO | S_IWUSR);
+CONFIGFS_ATTR(iscsi_disc_, enforce_discovery_auth);
static struct configfs_attribute *lio_target_discovery_auth_attrs[] = {
- &iscsi_disc_userid.attr,
- &iscsi_disc_password.attr,
- &iscsi_disc_authenticate_target.attr,
- &iscsi_disc_userid_mutual.attr,
- &iscsi_disc_password_mutual.attr,
- &iscsi_disc_enforce_discovery_auth.attr,
+ &iscsi_disc_attr_userid,
+ &iscsi_disc_attr_password,
+ &iscsi_disc_attr_authenticate_target,
+ &iscsi_disc_attr_userid_mutual,
+ &iscsi_disc_attr_password_mutual,
+ &iscsi_disc_attr_enforce_discovery_auth,
NULL,
};
@@ -1707,29 +1356,16 @@ static struct configfs_attribute *lio_target_discovery_auth_attrs[] = {
/* Start functions for target_core_fabric_ops */
-static char *iscsi_get_fabric_name(void)
-{
- return "iSCSI";
-}
-
-static u32 iscsi_get_task_tag(struct se_cmd *se_cmd)
-{
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
-
- /* only used for printks or comparism with ->ref_task_tag */
- return (__force u32)cmd->init_task_tag;
-}
-
static int iscsi_get_cmd_state(struct se_cmd *se_cmd)
{
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
return cmd->i_state;
}
static u32 lio_sess_get_index(struct se_session *se_sess)
{
- struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+ struct iscsit_session *sess = se_sess->fabric_sess_ptr;
return sess->session_index;
}
@@ -1739,29 +1375,26 @@ static u32 lio_sess_get_initiator_sid(
unsigned char *buf,
u32 size)
{
- struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+ struct iscsit_session *sess = se_sess->fabric_sess_ptr;
/*
* iSCSI Initiator Session Identifier from RFC-3720.
*/
- return snprintf(buf, size, "%02x%02x%02x%02x%02x%02x",
- sess->isid[0], sess->isid[1], sess->isid[2],
- sess->isid[3], sess->isid[4], sess->isid[5]);
+ return snprintf(buf, size, "%6phN", sess->isid);
}
static int lio_queue_data_in(struct se_cmd *se_cmd)
{
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
+ struct iscsit_conn *conn = cmd->conn;
cmd->i_state = ISTATE_SEND_DATAIN;
- cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd);
-
- return 0;
+ return conn->conn_transport->iscsit_queue_data_in(conn, cmd);
}
static int lio_write_pending(struct se_cmd *se_cmd)
{
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
+ struct iscsit_conn *conn = cmd->conn;
if (!cmd->immediate_data && !cmd->unsolicited_data)
return conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
@@ -1769,273 +1402,195 @@ static int lio_write_pending(struct se_cmd *se_cmd)
return 0;
}
-static int lio_write_pending_status(struct se_cmd *se_cmd)
-{
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
- int ret;
-
- spin_lock_bh(&cmd->istate_lock);
- ret = !(cmd->cmd_flags & ICF_GOT_LAST_DATAOUT);
- spin_unlock_bh(&cmd->istate_lock);
-
- return ret;
-}
-
static int lio_queue_status(struct se_cmd *se_cmd)
{
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
+ struct iscsit_conn *conn = cmd->conn;
cmd->i_state = ISTATE_SEND_STATUS;
- cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd);
- return 0;
+ if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
+ return iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ }
+ return conn->conn_transport->iscsit_queue_status(conn, cmd);
}
static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
{
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
cmd->i_state = ISTATE_SEND_TASKMGTRSP;
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
}
-static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
+static void lio_aborted_task(struct se_cmd *se_cmd)
{
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+ struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
- return &tpg->tpg_tiqn->tiqn[0];
+ cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd);
}
-static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg)
+static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
{
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+ return to_iscsi_tpg(se_tpg)->tpg_tiqn->tiqn;
+}
- return tpg->tpgt;
+static u16 lio_tpg_get_tag(struct se_portal_group *se_tpg)
+{
+ return to_iscsi_tpg(se_tpg)->tpgt;
}
static u32 lio_tpg_get_default_depth(struct se_portal_group *se_tpg)
{
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
- return ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.default_cmdsn_depth;
}
static int lio_tpg_check_demo_mode(struct se_portal_group *se_tpg)
{
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
- return ISCSI_TPG_ATTRIB(tpg)->generate_node_acls;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.generate_node_acls;
}
static int lio_tpg_check_demo_mode_cache(struct se_portal_group *se_tpg)
{
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
- return ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.cache_dynamic_acls;
}
static int lio_tpg_check_demo_mode_write_protect(
struct se_portal_group *se_tpg)
{
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
- return ISCSI_TPG_ATTRIB(tpg)->demo_mode_write_protect;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.demo_mode_write_protect;
}
static int lio_tpg_check_prod_mode_write_protect(
struct se_portal_group *se_tpg)
{
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
- return ISCSI_TPG_ATTRIB(tpg)->prod_mode_write_protect;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.prod_mode_write_protect;
}
-static void lio_tpg_release_fabric_acl(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_acl)
+static int lio_tpg_check_prot_fabric_only(
+ struct se_portal_group *se_tpg)
{
- struct iscsi_node_acl *acl = container_of(se_acl,
- struct iscsi_node_acl, se_node_acl);
- kfree(acl);
+ /*
+ * Only report fabric_prot_type if t10_pi has also been enabled
+ * for incoming ib_isert sessions.
+ */
+ if (!to_iscsi_tpg(se_tpg)->tpg_attrib.t10_pi)
+ return 0;
+ return to_iscsi_tpg(se_tpg)->tpg_attrib.fabric_prot_type;
}
/*
- * Called with spin_lock_bh(struct se_portal_group->session_lock) held..
- *
- * Also, this function calls iscsit_inc_session_usage_count() on the
- * struct iscsi_session in question.
+ * This function calls iscsit_inc_session_usage_count() on the
+ * struct iscsit_session in question.
*/
-static int lio_tpg_shutdown_session(struct se_session *se_sess)
+static void lio_tpg_close_session(struct se_session *se_sess)
{
- struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+ struct iscsit_session *sess = se_sess->fabric_sess_ptr;
+ struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg;
+ spin_lock_bh(&se_tpg->session_lock);
spin_lock(&sess->conn_lock);
if (atomic_read(&sess->session_fall_back_to_erl0) ||
atomic_read(&sess->session_logout) ||
+ atomic_read(&sess->session_close) ||
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
- return 0;
+ spin_unlock_bh(&se_tpg->session_lock);
+ return;
}
+ iscsit_inc_session_usage_count(sess);
atomic_set(&sess->session_reinstatement, 1);
+ atomic_set(&sess->session_fall_back_to_erl0, 1);
+ atomic_set(&sess->session_close, 1);
spin_unlock(&sess->conn_lock);
iscsit_stop_time2retain_timer(sess);
- iscsit_stop_session(sess, 1, 1);
+ spin_unlock_bh(&se_tpg->session_lock);
- return 1;
-}
-
-/*
- * Calls iscsit_dec_session_usage_count() as inverse of
- * lio_tpg_shutdown_session()
- */
-static void lio_tpg_close_session(struct se_session *se_sess)
-{
- struct iscsi_session *sess = se_sess->fabric_sess_ptr;
- /*
- * If the iSCSI Session for the iSCSI Initiator Node exists,
- * forcefully shutdown the iSCSI NEXUS.
- */
- iscsit_close_session(sess);
+ iscsit_stop_session(sess, 1, 1);
+ iscsit_dec_session_usage_count(sess);
}
static u32 lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
-
- return tpg->tpg_tiqn->tiqn_index;
+ return to_iscsi_tpg(se_tpg)->tpg_tiqn->tiqn_index;
}
static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
{
- struct iscsi_node_acl *acl = container_of(se_acl, struct iscsi_node_acl,
- se_node_acl);
+ struct iscsi_node_acl *acl = to_iscsi_nacl(se_acl);
+ struct se_portal_group *se_tpg = se_acl->se_tpg;
+ struct iscsi_portal_group *tpg = to_iscsi_tpg(se_tpg);
- ISCSI_NODE_ATTRIB(acl)->nacl = acl;
- iscsit_set_default_node_attribues(acl);
+ acl->node_attrib.nacl = acl;
+ iscsit_set_default_node_attribues(acl, tpg);
}
static int lio_check_stop_free(struct se_cmd *se_cmd)
{
- return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+ return target_put_sess_cmd(se_cmd);
}
static void lio_release_cmd(struct se_cmd *se_cmd)
{
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsit_cmd *cmd = container_of(se_cmd, struct iscsit_cmd, se_cmd);
pr_debug("Entering lio_release_cmd for se_cmd: %p\n", se_cmd);
- cmd->release_cmd(cmd);
+ iscsit_release_cmd(cmd);
}
-/* End functions for target_core_fabric_ops */
-
-int iscsi_target_register_configfs(void)
-{
- struct target_fabric_configfs *fabric;
- int ret;
-
- lio_target_fabric_configfs = NULL;
- fabric = target_fabric_configfs_init(THIS_MODULE, "iscsi");
- if (IS_ERR(fabric)) {
- pr_err("target_fabric_configfs_init() for"
- " LIO-Target failed!\n");
- return PTR_ERR(fabric);
- }
- /*
- * Setup the fabric API of function pointers used by target_core_mod..
- */
- fabric->tf_ops.get_fabric_name = &iscsi_get_fabric_name;
- fabric->tf_ops.get_fabric_proto_ident = &iscsi_get_fabric_proto_ident;
- fabric->tf_ops.tpg_get_wwn = &lio_tpg_get_endpoint_wwn;
- fabric->tf_ops.tpg_get_tag = &lio_tpg_get_tag;
- fabric->tf_ops.tpg_get_default_depth = &lio_tpg_get_default_depth;
- fabric->tf_ops.tpg_get_pr_transport_id = &iscsi_get_pr_transport_id;
- fabric->tf_ops.tpg_get_pr_transport_id_len =
- &iscsi_get_pr_transport_id_len;
- fabric->tf_ops.tpg_parse_pr_out_transport_id =
- &iscsi_parse_pr_out_transport_id;
- fabric->tf_ops.tpg_check_demo_mode = &lio_tpg_check_demo_mode;
- fabric->tf_ops.tpg_check_demo_mode_cache =
- &lio_tpg_check_demo_mode_cache;
- fabric->tf_ops.tpg_check_demo_mode_write_protect =
- &lio_tpg_check_demo_mode_write_protect;
- fabric->tf_ops.tpg_check_prod_mode_write_protect =
- &lio_tpg_check_prod_mode_write_protect;
- fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl;
- fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl;
- fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index;
- fabric->tf_ops.check_stop_free = &lio_check_stop_free,
- fabric->tf_ops.release_cmd = &lio_release_cmd;
- fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session;
- fabric->tf_ops.close_session = &lio_tpg_close_session;
- fabric->tf_ops.sess_get_index = &lio_sess_get_index;
- fabric->tf_ops.sess_get_initiator_sid = &lio_sess_get_initiator_sid;
- fabric->tf_ops.write_pending = &lio_write_pending;
- fabric->tf_ops.write_pending_status = &lio_write_pending_status;
- fabric->tf_ops.set_default_node_attributes =
- &lio_set_default_node_attributes;
- fabric->tf_ops.get_task_tag = &iscsi_get_task_tag;
- fabric->tf_ops.get_cmd_state = &iscsi_get_cmd_state;
- fabric->tf_ops.queue_data_in = &lio_queue_data_in;
- fabric->tf_ops.queue_status = &lio_queue_status;
- fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
- /*
- * Setup function pointers for generic logic in target_core_fabric_configfs.c
- */
- fabric->tf_ops.fabric_make_wwn = &lio_target_call_coreaddtiqn;
- fabric->tf_ops.fabric_drop_wwn = &lio_target_call_coredeltiqn;
- fabric->tf_ops.fabric_make_tpg = &lio_target_tiqn_addtpg;
- fabric->tf_ops.fabric_drop_tpg = &lio_target_tiqn_deltpg;
- fabric->tf_ops.fabric_post_link = NULL;
- fabric->tf_ops.fabric_pre_unlink = NULL;
- fabric->tf_ops.fabric_make_np = &lio_target_call_addnptotpg;
- fabric->tf_ops.fabric_drop_np = &lio_target_call_delnpfromtpg;
- fabric->tf_ops.fabric_make_nodeacl = &lio_target_make_nodeacl;
- fabric->tf_ops.fabric_drop_nodeacl = &lio_target_drop_nodeacl;
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- * sturct config_item_type's
- */
- TF_CIT_TMPL(fabric)->tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
-
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- pr_err("target_fabric_configfs_register() for"
- " LIO-Target failed!\n");
- target_fabric_configfs_free(fabric);
- return ret;
- }
-
- lio_target_fabric_configfs = fabric;
- pr_debug("LIO_TARGET[0] - Set fabric ->"
- " lio_target_fabric_configfs\n");
- return 0;
-}
-
-
-void iscsi_target_deregister_configfs(void)
-{
- if (!lio_target_fabric_configfs)
- return;
- /*
- * Shutdown discovery sessions and disable discovery TPG
- */
- if (iscsit_global->discovery_tpg)
- iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
-
- target_fabric_configfs_deregister(lio_target_fabric_configfs);
- lio_target_fabric_configfs = NULL;
- pr_debug("LIO_TARGET[0] - Cleared"
- " lio_target_fabric_configfs\n");
-}
+const struct target_core_fabric_ops iscsi_ops = {
+ .module = THIS_MODULE,
+ .fabric_alias = "iscsi",
+ .fabric_name = "iSCSI",
+ .node_acl_size = sizeof(struct iscsi_node_acl),
+ .tpg_get_wwn = lio_tpg_get_endpoint_wwn,
+ .tpg_get_tag = lio_tpg_get_tag,
+ .tpg_get_default_depth = lio_tpg_get_default_depth,
+ .tpg_check_demo_mode = lio_tpg_check_demo_mode,
+ .tpg_check_demo_mode_cache = lio_tpg_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect =
+ lio_tpg_check_demo_mode_write_protect,
+ .tpg_check_prod_mode_write_protect =
+ lio_tpg_check_prod_mode_write_protect,
+ .tpg_check_prot_fabric_only = &lio_tpg_check_prot_fabric_only,
+ .tpg_get_inst_index = lio_tpg_get_inst_index,
+ .check_stop_free = lio_check_stop_free,
+ .release_cmd = lio_release_cmd,
+ .close_session = lio_tpg_close_session,
+ .sess_get_index = lio_sess_get_index,
+ .sess_get_initiator_sid = lio_sess_get_initiator_sid,
+ .write_pending = lio_write_pending,
+ .set_default_node_attributes = lio_set_default_node_attributes,
+ .get_cmd_state = iscsi_get_cmd_state,
+ .queue_data_in = lio_queue_data_in,
+ .queue_status = lio_queue_status,
+ .queue_tm_rsp = lio_queue_tm_rsp,
+ .aborted_task = lio_aborted_task,
+ .fabric_make_wwn = lio_target_call_coreaddtiqn,
+ .fabric_drop_wwn = lio_target_call_coredeltiqn,
+ .add_wwn_groups = lio_target_add_wwn_groups,
+ .fabric_make_tpg = lio_target_tiqn_addtpg,
+ .fabric_enable_tpg = lio_target_tiqn_enabletpg,
+ .fabric_drop_tpg = lio_target_tiqn_deltpg,
+ .fabric_make_np = lio_target_call_addnptotpg,
+ .fabric_drop_np = lio_target_call_delnpfromtpg,
+ .fabric_init_nodeacl = lio_target_init_nodeacl,
+
+ .tfc_discovery_attrs = lio_target_discovery_auth_attrs,
+ .tfc_wwn_attrs = lio_target_wwn_attrs,
+ .tfc_tpg_base_attrs = lio_target_tpg_attrs,
+ .tfc_tpg_attrib_attrs = lio_target_tpg_attrib_attrs,
+ .tfc_tpg_auth_attrs = lio_target_tpg_auth_attrs,
+ .tfc_tpg_param_attrs = lio_target_tpg_param_attrs,
+ .tfc_tpg_np_base_attrs = lio_target_portal_attrs,
+ .tfc_tpg_nacl_base_attrs = lio_target_initiator_attrs,
+ .tfc_tpg_nacl_attrib_attrs = lio_target_nacl_attrib_attrs,
+ .tfc_tpg_nacl_auth_attrs = lio_target_nacl_auth_attrs,
+ .tfc_tpg_nacl_param_attrs = lio_target_nacl_param_attrs,
+
+ .write_pending_must_be_called = 1,
+
+ .default_submit_type = TARGET_DIRECT_SUBMIT,
+ .direct_submit_supp = 1,
+};
diff --git a/drivers/target/iscsi/iscsi_target_configfs.h b/drivers/target/iscsi/iscsi_target_configfs.h
deleted file mode 100644
index 8cd5a63c4edc..000000000000
--- a/drivers/target/iscsi/iscsi_target_configfs.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef ISCSI_TARGET_CONFIGFS_H
-#define ISCSI_TARGET_CONFIGFS_H
-
-extern int iscsi_target_register_configfs(void);
-extern void iscsi_target_deregister_configfs(void);
-
-#endif /* ISCSI_TARGET_CONFIGFS_H */
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
deleted file mode 100644
index 4f77a78edef9..000000000000
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ /dev/null
@@ -1,876 +0,0 @@
-#ifndef ISCSI_TARGET_CORE_H
-#define ISCSI_TARGET_CORE_H
-
-#include <linux/in.h>
-#include <linux/configfs.h>
-#include <net/sock.h>
-#include <net/tcp.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/iscsi_proto.h>
-#include <target/target_core_base.h>
-
-#define ISCSIT_VERSION "v4.1.0-rc2"
-#define ISCSI_MAX_DATASN_MISSING_COUNT 16
-#define ISCSI_TX_THREAD_TCP_TIMEOUT 2
-#define ISCSI_RX_THREAD_TCP_TIMEOUT 2
-#define SECONDS_FOR_ASYNC_LOGOUT 10
-#define SECONDS_FOR_ASYNC_TEXT 10
-#define SECONDS_FOR_LOGOUT_COMP 15
-#define WHITE_SPACE " \t\v\f\n\r"
-
-/* struct iscsi_node_attrib sanity values */
-#define NA_DATAOUT_TIMEOUT 3
-#define NA_DATAOUT_TIMEOUT_MAX 60
-#define NA_DATAOUT_TIMEOUT_MIX 2
-#define NA_DATAOUT_TIMEOUT_RETRIES 5
-#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
-#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
-#define NA_NOPIN_TIMEOUT 15
-#define NA_NOPIN_TIMEOUT_MAX 60
-#define NA_NOPIN_TIMEOUT_MIN 3
-#define NA_NOPIN_RESPONSE_TIMEOUT 30
-#define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60
-#define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3
-#define NA_RANDOM_DATAIN_PDU_OFFSETS 0
-#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0
-#define NA_RANDOM_R2T_OFFSETS 0
-#define NA_DEFAULT_ERL 0
-#define NA_DEFAULT_ERL_MAX 2
-#define NA_DEFAULT_ERL_MIN 0
-
-/* struct iscsi_tpg_attrib sanity values */
-#define TA_AUTHENTICATION 1
-#define TA_LOGIN_TIMEOUT 15
-#define TA_LOGIN_TIMEOUT_MAX 30
-#define TA_LOGIN_TIMEOUT_MIN 5
-#define TA_NETIF_TIMEOUT 2
-#define TA_NETIF_TIMEOUT_MAX 15
-#define TA_NETIF_TIMEOUT_MIN 2
-#define TA_GENERATE_NODE_ACLS 0
-#define TA_DEFAULT_CMDSN_DEPTH 16
-#define TA_DEFAULT_CMDSN_DEPTH_MAX 512
-#define TA_DEFAULT_CMDSN_DEPTH_MIN 1
-#define TA_CACHE_DYNAMIC_ACLS 0
-/* Enabled by default in demo mode (generic_node_acls=1) */
-#define TA_DEMO_MODE_WRITE_PROTECT 1
-/* Disabled by default in production mode w/ explict ACLs */
-#define TA_PROD_MODE_WRITE_PROTECT 0
-#define TA_CACHE_CORE_NPS 0
-
-
-#define ISCSI_IOV_DATA_BUFFER 5
-
-enum iscsit_transport_type {
- ISCSI_TCP = 0,
- ISCSI_SCTP_TCP = 1,
- ISCSI_SCTP_UDP = 2,
- ISCSI_IWARP_TCP = 3,
- ISCSI_IWARP_SCTP = 4,
- ISCSI_INFINIBAND = 5,
-};
-
-/* RFC-3720 7.1.4 Standard Connection State Diagram for a Target */
-enum target_conn_state_table {
- TARG_CONN_STATE_FREE = 0x1,
- TARG_CONN_STATE_XPT_UP = 0x3,
- TARG_CONN_STATE_IN_LOGIN = 0x4,
- TARG_CONN_STATE_LOGGED_IN = 0x5,
- TARG_CONN_STATE_IN_LOGOUT = 0x6,
- TARG_CONN_STATE_LOGOUT_REQUESTED = 0x7,
- TARG_CONN_STATE_CLEANUP_WAIT = 0x8,
-};
-
-/* RFC-3720 7.3.2 Session State Diagram for a Target */
-enum target_sess_state_table {
- TARG_SESS_STATE_FREE = 0x1,
- TARG_SESS_STATE_ACTIVE = 0x2,
- TARG_SESS_STATE_LOGGED_IN = 0x3,
- TARG_SESS_STATE_FAILED = 0x4,
- TARG_SESS_STATE_IN_CONTINUE = 0x5,
-};
-
-/* struct iscsi_data_count->type */
-enum data_count_type {
- ISCSI_RX_DATA = 1,
- ISCSI_TX_DATA = 2,
-};
-
-/* struct iscsi_datain_req->dr_complete */
-enum datain_req_comp_table {
- DATAIN_COMPLETE_NORMAL = 1,
- DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2,
- DATAIN_COMPLETE_CONNECTION_RECOVERY = 3,
-};
-
-/* struct iscsi_datain_req->recovery */
-enum datain_req_rec_table {
- DATAIN_WITHIN_COMMAND_RECOVERY = 1,
- DATAIN_CONNECTION_RECOVERY = 2,
-};
-
-/* struct iscsi_portal_group->state */
-enum tpg_state_table {
- TPG_STATE_FREE = 0,
- TPG_STATE_ACTIVE = 1,
- TPG_STATE_INACTIVE = 2,
- TPG_STATE_COLD_RESET = 3,
-};
-
-/* struct iscsi_tiqn->tiqn_state */
-enum tiqn_state_table {
- TIQN_STATE_ACTIVE = 1,
- TIQN_STATE_SHUTDOWN = 2,
-};
-
-/* struct iscsi_cmd->cmd_flags */
-enum cmd_flags_table {
- ICF_GOT_LAST_DATAOUT = 0x00000001,
- ICF_GOT_DATACK_SNACK = 0x00000002,
- ICF_NON_IMMEDIATE_UNSOLICITED_DATA = 0x00000004,
- ICF_SENT_LAST_R2T = 0x00000008,
- ICF_WITHIN_COMMAND_RECOVERY = 0x00000010,
- ICF_CONTIG_MEMORY = 0x00000020,
- ICF_ATTACHED_TO_RQUEUE = 0x00000040,
- ICF_OOO_CMDSN = 0x00000080,
- IFC_SENDTARGETS_ALL = 0x00000100,
- IFC_SENDTARGETS_SINGLE = 0x00000200,
-};
-
-/* struct iscsi_cmd->i_state */
-enum cmd_i_state_table {
- ISTATE_NO_STATE = 0,
- ISTATE_NEW_CMD = 1,
- ISTATE_DEFERRED_CMD = 2,
- ISTATE_UNSOLICITED_DATA = 3,
- ISTATE_RECEIVE_DATAOUT = 4,
- ISTATE_RECEIVE_DATAOUT_RECOVERY = 5,
- ISTATE_RECEIVED_LAST_DATAOUT = 6,
- ISTATE_WITHIN_DATAOUT_RECOVERY = 7,
- ISTATE_IN_CONNECTION_RECOVERY = 8,
- ISTATE_RECEIVED_TASKMGT = 9,
- ISTATE_SEND_ASYNCMSG = 10,
- ISTATE_SENT_ASYNCMSG = 11,
- ISTATE_SEND_DATAIN = 12,
- ISTATE_SEND_LAST_DATAIN = 13,
- ISTATE_SENT_LAST_DATAIN = 14,
- ISTATE_SEND_LOGOUTRSP = 15,
- ISTATE_SENT_LOGOUTRSP = 16,
- ISTATE_SEND_NOPIN = 17,
- ISTATE_SENT_NOPIN = 18,
- ISTATE_SEND_REJECT = 19,
- ISTATE_SENT_REJECT = 20,
- ISTATE_SEND_R2T = 21,
- ISTATE_SENT_R2T = 22,
- ISTATE_SEND_R2T_RECOVERY = 23,
- ISTATE_SENT_R2T_RECOVERY = 24,
- ISTATE_SEND_LAST_R2T = 25,
- ISTATE_SENT_LAST_R2T = 26,
- ISTATE_SEND_LAST_R2T_RECOVERY = 27,
- ISTATE_SENT_LAST_R2T_RECOVERY = 28,
- ISTATE_SEND_STATUS = 29,
- ISTATE_SEND_STATUS_BROKEN_PC = 30,
- ISTATE_SENT_STATUS = 31,
- ISTATE_SEND_STATUS_RECOVERY = 32,
- ISTATE_SENT_STATUS_RECOVERY = 33,
- ISTATE_SEND_TASKMGTRSP = 34,
- ISTATE_SENT_TASKMGTRSP = 35,
- ISTATE_SEND_TEXTRSP = 36,
- ISTATE_SENT_TEXTRSP = 37,
- ISTATE_SEND_NOPIN_WANT_RESPONSE = 38,
- ISTATE_SENT_NOPIN_WANT_RESPONSE = 39,
- ISTATE_SEND_NOPIN_NO_RESPONSE = 40,
- ISTATE_REMOVE = 41,
- ISTATE_FREE = 42,
-};
-
-/* Used for iscsi_recover_cmdsn() return values */
-enum recover_cmdsn_ret_table {
- CMDSN_ERROR_CANNOT_RECOVER = -1,
- CMDSN_NORMAL_OPERATION = 0,
- CMDSN_LOWER_THAN_EXP = 1,
- CMDSN_HIGHER_THAN_EXP = 2,
-};
-
-/* Used for iscsi_handle_immediate_data() return values */
-enum immedate_data_ret_table {
- IMMEDIATE_DATA_CANNOT_RECOVER = -1,
- IMMEDIATE_DATA_NORMAL_OPERATION = 0,
- IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1,
-};
-
-/* Used for iscsi_decide_dataout_action() return values */
-enum dataout_action_ret_table {
- DATAOUT_CANNOT_RECOVER = -1,
- DATAOUT_NORMAL = 0,
- DATAOUT_SEND_R2T = 1,
- DATAOUT_SEND_TO_TRANSPORT = 2,
- DATAOUT_WITHIN_COMMAND_RECOVERY = 3,
-};
-
-/* Used for struct iscsi_node_auth->naf_flags */
-enum naf_flags_table {
- NAF_USERID_SET = 0x01,
- NAF_PASSWORD_SET = 0x02,
- NAF_USERID_IN_SET = 0x04,
- NAF_PASSWORD_IN_SET = 0x08,
-};
-
-/* Used by various struct timer_list to manage iSCSI specific state */
-enum iscsi_timer_flags_table {
- ISCSI_TF_RUNNING = 0x01,
- ISCSI_TF_STOP = 0x02,
- ISCSI_TF_EXPIRED = 0x04,
-};
-
-/* Used for struct iscsi_np->np_flags */
-enum np_flags_table {
- NPF_IP_NETWORK = 0x00,
-};
-
-/* Used for struct iscsi_np->np_thread_state */
-enum np_thread_state_table {
- ISCSI_NP_THREAD_ACTIVE = 1,
- ISCSI_NP_THREAD_INACTIVE = 2,
- ISCSI_NP_THREAD_RESET = 3,
- ISCSI_NP_THREAD_SHUTDOWN = 4,
- ISCSI_NP_THREAD_EXIT = 5,
-};
-
-struct iscsi_conn_ops {
- u8 HeaderDigest; /* [0,1] == [None,CRC32C] */
- u8 DataDigest; /* [0,1] == [None,CRC32C] */
- u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */
- u32 MaxXmitDataSegmentLength; /* [512..2**24-1] */
- u8 OFMarker; /* [0,1] == [No,Yes] */
- u8 IFMarker; /* [0,1] == [No,Yes] */
- u32 OFMarkInt; /* [1..65535] */
- u32 IFMarkInt; /* [1..65535] */
- /*
- * iSER specific connection parameters
- */
- u32 InitiatorRecvDataSegmentLength; /* [512..2**24-1] */
- u32 TargetRecvDataSegmentLength; /* [512..2**24-1] */
-};
-
-struct iscsi_sess_ops {
- char InitiatorName[224];
- char InitiatorAlias[256];
- char TargetName[224];
- char TargetAlias[256];
- char TargetAddress[256];
- u16 TargetPortalGroupTag; /* [0..65535] */
- u16 MaxConnections; /* [1..65535] */
- u8 InitialR2T; /* [0,1] == [No,Yes] */
- u8 ImmediateData; /* [0,1] == [No,Yes] */
- u32 MaxBurstLength; /* [512..2**24-1] */
- u32 FirstBurstLength; /* [512..2**24-1] */
- u16 DefaultTime2Wait; /* [0..3600] */
- u16 DefaultTime2Retain; /* [0..3600] */
- u16 MaxOutstandingR2T; /* [1..65535] */
- u8 DataPDUInOrder; /* [0,1] == [No,Yes] */
- u8 DataSequenceInOrder; /* [0,1] == [No,Yes] */
- u8 ErrorRecoveryLevel; /* [0..2] */
- u8 SessionType; /* [0,1] == [Normal,Discovery]*/
- /*
- * iSER specific session parameters
- */
- u8 RDMAExtensions; /* [0,1] == [No,Yes] */
-};
-
-struct iscsi_queue_req {
- int state;
- struct iscsi_cmd *cmd;
- struct list_head qr_list;
-};
-
-struct iscsi_data_count {
- int data_length;
- int sync_and_steering;
- enum data_count_type type;
- u32 iov_count;
- u32 ss_iov_count;
- u32 ss_marker_count;
- struct kvec *iov;
-};
-
-struct iscsi_param_list {
- bool iser;
- struct list_head param_list;
- struct list_head extra_response_list;
-};
-
-struct iscsi_datain_req {
- enum datain_req_comp_table dr_complete;
- int generate_recovery_values;
- enum datain_req_rec_table recovery;
- u32 begrun;
- u32 runlength;
- u32 data_length;
- u32 data_offset;
- u32 data_sn;
- u32 next_burst_len;
- u32 read_data_done;
- u32 seq_send_order;
- struct list_head cmd_datain_node;
-} ____cacheline_aligned;
-
-struct iscsi_ooo_cmdsn {
- u16 cid;
- u32 batch_count;
- u32 cmdsn;
- u32 exp_cmdsn;
- struct iscsi_cmd *cmd;
- struct list_head ooo_list;
-} ____cacheline_aligned;
-
-struct iscsi_datain {
- u8 flags;
- u32 data_sn;
- u32 length;
- u32 offset;
-} ____cacheline_aligned;
-
-struct iscsi_r2t {
- int seq_complete;
- int recovery_r2t;
- int sent_r2t;
- u32 r2t_sn;
- u32 offset;
- u32 targ_xfer_tag;
- u32 xfer_len;
- struct list_head r2t_list;
-} ____cacheline_aligned;
-
-struct iscsi_cmd {
- enum iscsi_timer_flags_table dataout_timer_flags;
- /* DataOUT timeout retries */
- u8 dataout_timeout_retries;
- /* Within command recovery count */
- u8 error_recovery_count;
- /* iSCSI dependent state for out or order CmdSNs */
- enum cmd_i_state_table deferred_i_state;
- /* iSCSI dependent state */
- enum cmd_i_state_table i_state;
- /* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */
- u8 immediate_cmd;
- /* Immediate data present */
- u8 immediate_data;
- /* iSCSI Opcode */
- u8 iscsi_opcode;
- /* iSCSI Response Code */
- u8 iscsi_response;
- /* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
- u8 logout_reason;
- /* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
- u8 logout_response;
- /* MaxCmdSN has been incremented */
- u8 maxcmdsn_inc;
- /* Immediate Unsolicited Dataout */
- u8 unsolicited_data;
- /* Reject reason code */
- u8 reject_reason;
- /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
- u16 logout_cid;
- /* Command flags */
- enum cmd_flags_table cmd_flags;
- /* Initiator Task Tag assigned from Initiator */
- itt_t init_task_tag;
- /* Target Transfer Tag assigned from Target */
- u32 targ_xfer_tag;
- /* CmdSN assigned from Initiator */
- u32 cmd_sn;
- /* ExpStatSN assigned from Initiator */
- u32 exp_stat_sn;
- /* StatSN assigned to this ITT */
- u32 stat_sn;
- /* DataSN Counter */
- u32 data_sn;
- /* R2TSN Counter */
- u32 r2t_sn;
- /* Last DataSN acknowledged via DataAck SNACK */
- u32 acked_data_sn;
- /* Used for echoing NOPOUT ping data */
- u32 buf_ptr_size;
- /* Used to store DataDigest */
- u32 data_crc;
- /* Counter for MaxOutstandingR2T */
- u32 outstanding_r2ts;
- /* Next R2T Offset when DataSequenceInOrder=Yes */
- u32 r2t_offset;
- /* Iovec current and orig count for iscsi_cmd->iov_data */
- u32 iov_data_count;
- u32 orig_iov_data_count;
- /* Number of miscellaneous iovecs used for IP stack calls */
- u32 iov_misc_count;
- /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
- u32 pdu_count;
- /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
- u32 pdu_send_order;
- /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
- u32 pdu_start;
- /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
- u32 seq_send_order;
- /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
- u32 seq_count;
- /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
- u32 seq_no;
- /* Lowest offset in current DataOUT sequence */
- u32 seq_start_offset;
- /* Highest offset in current DataOUT sequence */
- u32 seq_end_offset;
- /* Total size in bytes received so far of READ data */
- u32 read_data_done;
- /* Total size in bytes received so far of WRITE data */
- u32 write_data_done;
- /* Counter for FirstBurstLength key */
- u32 first_burst_len;
- /* Counter for MaxBurstLength key */
- u32 next_burst_len;
- /* Transfer size used for IP stack calls */
- u32 tx_size;
- /* Buffer used for various purposes */
- void *buf_ptr;
- /* Used by SendTargets=[iqn.,eui.] discovery */
- void *text_in_ptr;
- /* See include/linux/dma-mapping.h */
- enum dma_data_direction data_direction;
- /* iSCSI PDU Header + CRC */
- unsigned char pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
- /* Number of times struct iscsi_cmd is present in immediate queue */
- atomic_t immed_queue_count;
- atomic_t response_queue_count;
- spinlock_t datain_lock;
- spinlock_t dataout_timeout_lock;
- /* spinlock for protecting struct iscsi_cmd->i_state */
- spinlock_t istate_lock;
- /* spinlock for adding within command recovery entries */
- spinlock_t error_lock;
- /* spinlock for adding R2Ts */
- spinlock_t r2t_lock;
- /* DataIN List */
- struct list_head datain_list;
- /* R2T List */
- struct list_head cmd_r2t_list;
- /* Timer for DataOUT */
- struct timer_list dataout_timer;
- /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
- struct kvec *iov_data;
- /* Iovecs for miscellaneous purposes */
-#define ISCSI_MISC_IOVECS 5
- struct kvec iov_misc[ISCSI_MISC_IOVECS];
- /* Array of struct iscsi_pdu used for DataPDUInOrder=No */
- struct iscsi_pdu *pdu_list;
- /* Current struct iscsi_pdu used for DataPDUInOrder=No */
- struct iscsi_pdu *pdu_ptr;
- /* Array of struct iscsi_seq used for DataSequenceInOrder=No */
- struct iscsi_seq *seq_list;
- /* Current struct iscsi_seq used for DataSequenceInOrder=No */
- struct iscsi_seq *seq_ptr;
- /* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
- struct iscsi_tmr_req *tmr_req;
- /* Connection this command is alligient to */
- struct iscsi_conn *conn;
- /* Pointer to connection recovery entry */
- struct iscsi_conn_recovery *cr;
- /* Session the command is part of, used for connection recovery */
- struct iscsi_session *sess;
- /* list_head for connection list */
- struct list_head i_conn_node;
- /* The TCM I/O descriptor that is accessed via container_of() */
- struct se_cmd se_cmd;
- /* Sense buffer that will be mapped into outgoing status */
-#define ISCSI_SENSE_BUFFER_LEN (TRANSPORT_SENSE_BUFFER + 2)
- unsigned char sense_buffer[ISCSI_SENSE_BUFFER_LEN];
-
- u32 padding;
- u8 pad_bytes[4];
-
- struct scatterlist *first_data_sg;
- u32 first_data_sg_off;
- u32 kmapped_nents;
- sense_reason_t sense_reason;
- void (*release_cmd)(struct iscsi_cmd *);
-} ____cacheline_aligned;
-
-struct iscsi_tmr_req {
- bool task_reassign:1;
- u32 exp_data_sn;
- struct iscsi_cmd *ref_cmd;
- struct iscsi_conn_recovery *conn_recovery;
- struct se_tmr_req *se_tmr_req;
-};
-
-struct iscsi_conn {
- wait_queue_head_t queues_wq;
- /* Authentication Successful for this connection */
- u8 auth_complete;
- /* State connection is currently in */
- u8 conn_state;
- u8 conn_logout_reason;
- u8 network_transport;
- enum iscsi_timer_flags_table nopin_timer_flags;
- enum iscsi_timer_flags_table nopin_response_timer_flags;
- /* Used to know what thread encountered a transport failure */
- u8 which_thread;
- /* connection id assigned by the Initiator */
- u16 cid;
- /* Remote TCP Port */
- u16 login_port;
- u16 local_port;
- int net_size;
- int login_family;
- u32 auth_id;
- u32 conn_flags;
- /* Used for iscsi_tx_login_rsp() */
- itt_t login_itt;
- u32 exp_statsn;
- /* Per connection status sequence number */
- u32 stat_sn;
- /* IFMarkInt's Current Value */
- u32 if_marker;
- /* OFMarkInt's Current Value */
- u32 of_marker;
- /* Used for calculating OFMarker offset to next PDU */
- u32 of_marker_offset;
-#define IPV6_ADDRESS_SPACE 48
- unsigned char login_ip[IPV6_ADDRESS_SPACE];
- unsigned char local_ip[IPV6_ADDRESS_SPACE];
- int conn_usage_count;
- int conn_waiting_on_uc;
- atomic_t check_immediate_queue;
- atomic_t conn_logout_remove;
- atomic_t connection_exit;
- atomic_t connection_recovery;
- atomic_t connection_reinstatement;
- atomic_t connection_wait_rcfr;
- atomic_t sleep_on_conn_wait_comp;
- atomic_t transport_failed;
- struct completion conn_post_wait_comp;
- struct completion conn_wait_comp;
- struct completion conn_wait_rcfr_comp;
- struct completion conn_waiting_on_uc_comp;
- struct completion conn_logout_comp;
- struct completion tx_half_close_comp;
- struct completion rx_half_close_comp;
- /* socket used by this connection */
- struct socket *sock;
- struct timer_list nopin_timer;
- struct timer_list nopin_response_timer;
- struct timer_list transport_timer;
- /* Spinlock used for add/deleting cmd's from conn_cmd_list */
- spinlock_t cmd_lock;
- spinlock_t conn_usage_lock;
- spinlock_t immed_queue_lock;
- spinlock_t nopin_timer_lock;
- spinlock_t response_queue_lock;
- spinlock_t state_lock;
- /* libcrypto RX and TX contexts for crc32c */
- struct hash_desc conn_rx_hash;
- struct hash_desc conn_tx_hash;
- /* Used for scheduling TX and RX connection kthreads */
- cpumask_var_t conn_cpumask;
- unsigned int conn_rx_reset_cpumask:1;
- unsigned int conn_tx_reset_cpumask:1;
- /* list_head of struct iscsi_cmd for this connection */
- struct list_head conn_cmd_list;
- struct list_head immed_queue_list;
- struct list_head response_queue_list;
- struct iscsi_conn_ops *conn_ops;
- struct iscsi_login *conn_login;
- struct iscsit_transport *conn_transport;
- struct iscsi_param_list *param_list;
- /* Used for per connection auth state machine */
- void *auth_protocol;
- void *context;
- struct iscsi_login_thread_s *login_thread;
- struct iscsi_portal_group *tpg;
- /* Pointer to parent session */
- struct iscsi_session *sess;
- /* Pointer to thread_set in use for this conn's threads */
- struct iscsi_thread_set *thread_set;
- /* list_head for session connection list */
- struct list_head conn_list;
-} ____cacheline_aligned;
-
-struct iscsi_conn_recovery {
- u16 cid;
- u32 cmd_count;
- u32 maxrecvdatasegmentlength;
- u32 maxxmitdatasegmentlength;
- int ready_for_reallegiance;
- struct list_head conn_recovery_cmd_list;
- spinlock_t conn_recovery_cmd_lock;
- struct timer_list time2retain_timer;
- struct iscsi_session *sess;
- struct list_head cr_list;
-} ____cacheline_aligned;
-
-struct iscsi_session {
- u8 initiator_vendor;
- u8 isid[6];
- enum iscsi_timer_flags_table time2retain_timer_flags;
- u8 version_active;
- u16 cid_called;
- u16 conn_recovery_count;
- u16 tsih;
- /* state session is currently in */
- u32 session_state;
- /* session wide counter: initiator assigned task tag */
- itt_t init_task_tag;
- /* session wide counter: target assigned task tag */
- u32 targ_xfer_tag;
- u32 cmdsn_window;
-
- /* protects cmdsn values */
- struct mutex cmdsn_mutex;
- /* session wide counter: expected command sequence number */
- u32 exp_cmd_sn;
- /* session wide counter: maximum allowed command sequence number */
- u32 max_cmd_sn;
- struct list_head sess_ooo_cmdsn_list;
-
- /* LIO specific session ID */
- u32 sid;
- char auth_type[8];
- /* unique within the target */
- int session_index;
- /* Used for session reference counting */
- int session_usage_count;
- int session_waiting_on_uc;
- u32 cmd_pdus;
- u32 rsp_pdus;
- u64 tx_data_octets;
- u64 rx_data_octets;
- u32 conn_digest_errors;
- u32 conn_timeout_errors;
- u64 creation_time;
- spinlock_t session_stats_lock;
- /* Number of active connections */
- atomic_t nconn;
- atomic_t session_continuation;
- atomic_t session_fall_back_to_erl0;
- atomic_t session_logout;
- atomic_t session_reinstatement;
- atomic_t session_stop_active;
- atomic_t sleep_on_sess_wait_comp;
- /* connection list */
- struct list_head sess_conn_list;
- struct list_head cr_active_list;
- struct list_head cr_inactive_list;
- spinlock_t conn_lock;
- spinlock_t cr_a_lock;
- spinlock_t cr_i_lock;
- spinlock_t session_usage_lock;
- spinlock_t ttt_lock;
- struct completion async_msg_comp;
- struct completion reinstatement_comp;
- struct completion session_wait_comp;
- struct completion session_waiting_on_uc_comp;
- struct timer_list time2retain_timer;
- struct iscsi_sess_ops *sess_ops;
- struct se_session *se_sess;
- struct iscsi_portal_group *tpg;
-} ____cacheline_aligned;
-
-struct iscsi_login {
- u8 auth_complete;
- u8 checked_for_existing;
- u8 current_stage;
- u8 leading_connection;
- u8 first_request;
- u8 version_min;
- u8 version_max;
- u8 login_complete;
- u8 login_failed;
- char isid[6];
- u32 cmd_sn;
- itt_t init_task_tag;
- u32 initial_exp_statsn;
- u32 rsp_length;
- u16 cid;
- u16 tsih;
- char req[ISCSI_HDR_LEN];
- char rsp[ISCSI_HDR_LEN];
- char *req_buf;
- char *rsp_buf;
- struct iscsi_conn *conn;
-} ____cacheline_aligned;
-
-struct iscsi_node_attrib {
- u32 dataout_timeout;
- u32 dataout_timeout_retries;
- u32 default_erl;
- u32 nopin_timeout;
- u32 nopin_response_timeout;
- u32 random_datain_pdu_offsets;
- u32 random_datain_seq_offsets;
- u32 random_r2t_offsets;
- u32 tmr_cold_reset;
- u32 tmr_warm_reset;
- struct iscsi_node_acl *nacl;
-};
-
-struct se_dev_entry_s;
-
-struct iscsi_node_auth {
- enum naf_flags_table naf_flags;
- int authenticate_target;
- /* Used for iscsit_global->discovery_auth,
- * set to zero (auth disabled) by default */
- int enforce_discovery_auth;
-#define MAX_USER_LEN 256
-#define MAX_PASS_LEN 256
- char userid[MAX_USER_LEN];
- char password[MAX_PASS_LEN];
- char userid_mutual[MAX_USER_LEN];
- char password_mutual[MAX_PASS_LEN];
-};
-
-#include "iscsi_target_stat.h"
-
-struct iscsi_node_stat_grps {
- struct config_group iscsi_sess_stats_group;
- struct config_group iscsi_conn_stats_group;
-};
-
-struct iscsi_node_acl {
- struct iscsi_node_attrib node_attrib;
- struct iscsi_node_auth node_auth;
- struct iscsi_node_stat_grps node_stat_grps;
- struct se_node_acl se_node_acl;
-};
-
-#define NODE_STAT_GRPS(nacl) (&(nacl)->node_stat_grps)
-
-#define ISCSI_NODE_ATTRIB(t) (&(t)->node_attrib)
-#define ISCSI_NODE_AUTH(t) (&(t)->node_auth)
-
-struct iscsi_tpg_attrib {
- u32 authentication;
- u32 login_timeout;
- u32 netif_timeout;
- u32 generate_node_acls;
- u32 cache_dynamic_acls;
- u32 default_cmdsn_depth;
- u32 demo_mode_write_protect;
- u32 prod_mode_write_protect;
- struct iscsi_portal_group *tpg;
-};
-
-struct iscsi_np {
- int np_network_transport;
- int np_ip_proto;
- int np_sock_type;
- enum np_thread_state_table np_thread_state;
- enum iscsi_timer_flags_table np_login_timer_flags;
- u32 np_exports;
- enum np_flags_table np_flags;
- unsigned char np_ip[IPV6_ADDRESS_SPACE];
- u16 np_port;
- spinlock_t np_thread_lock;
- struct completion np_restart_comp;
- struct socket *np_socket;
- struct __kernel_sockaddr_storage np_sockaddr;
- struct task_struct *np_thread;
- struct timer_list np_login_timer;
- struct iscsi_portal_group *np_login_tpg;
- void *np_context;
- struct iscsit_transport *np_transport;
- struct list_head np_list;
-} ____cacheline_aligned;
-
-struct iscsi_tpg_np {
- struct iscsi_np *tpg_np;
- struct iscsi_portal_group *tpg;
- struct iscsi_tpg_np *tpg_np_parent;
- struct list_head tpg_np_list;
- struct list_head tpg_np_child_list;
- struct list_head tpg_np_parent_list;
- struct se_tpg_np se_tpg_np;
- spinlock_t tpg_np_parent_lock;
-};
-
-struct iscsi_portal_group {
- unsigned char tpg_chap_id;
- /* TPG State */
- enum tpg_state_table tpg_state;
- /* Target Portal Group Tag */
- u16 tpgt;
- /* Id assigned to target sessions */
- u16 ntsih;
- /* Number of active sessions */
- u32 nsessions;
- /* Number of Network Portals available for this TPG */
- u32 num_tpg_nps;
- /* Per TPG LIO specific session ID. */
- u32 sid;
- /* Spinlock for adding/removing Network Portals */
- spinlock_t tpg_np_lock;
- spinlock_t tpg_state_lock;
- struct se_portal_group tpg_se_tpg;
- struct mutex tpg_access_lock;
- struct mutex np_login_lock;
- struct iscsi_tpg_attrib tpg_attrib;
- struct iscsi_node_auth tpg_demo_auth;
- /* Pointer to default list of iSCSI parameters for TPG */
- struct iscsi_param_list *param_list;
- struct iscsi_tiqn *tpg_tiqn;
- struct list_head tpg_gnp_list;
- struct list_head tpg_list;
-} ____cacheline_aligned;
-
-#define ISCSI_TPG_C(c) ((struct iscsi_portal_group *)(c)->tpg)
-#define ISCSI_TPG_LUN(c, l) ((iscsi_tpg_list_t *)(c)->tpg->tpg_lun_list_t[l])
-#define ISCSI_TPG_S(s) ((struct iscsi_portal_group *)(s)->tpg)
-#define ISCSI_TPG_ATTRIB(t) (&(t)->tpg_attrib)
-#define SE_TPG(tpg) (&(tpg)->tpg_se_tpg)
-
-struct iscsi_wwn_stat_grps {
- struct config_group iscsi_stat_group;
- struct config_group iscsi_instance_group;
- struct config_group iscsi_sess_err_group;
- struct config_group iscsi_tgt_attr_group;
- struct config_group iscsi_login_stats_group;
- struct config_group iscsi_logout_stats_group;
-};
-
-struct iscsi_tiqn {
-#define ISCSI_IQN_LEN 224
- unsigned char tiqn[ISCSI_IQN_LEN];
- enum tiqn_state_table tiqn_state;
- int tiqn_access_count;
- u32 tiqn_active_tpgs;
- u32 tiqn_ntpgs;
- u32 tiqn_num_tpg_nps;
- u32 tiqn_nsessions;
- struct list_head tiqn_list;
- struct list_head tiqn_tpg_list;
- spinlock_t tiqn_state_lock;
- spinlock_t tiqn_tpg_lock;
- struct se_wwn tiqn_wwn;
- struct iscsi_wwn_stat_grps tiqn_stat_grps;
- int tiqn_index;
- struct iscsi_sess_err_stats sess_err_stats;
- struct iscsi_login_stats login_stats;
- struct iscsi_logout_stats logout_stats;
-} ____cacheline_aligned;
-
-#define WWN_STAT_GRPS(tiqn) (&(tiqn)->tiqn_stat_grps)
-
-struct iscsit_global {
- /* In core shutdown */
- u32 in_shutdown;
- u32 active_ts;
- /* Unique identifier used for the authentication daemon */
- u32 auth_id;
- u32 inactive_ts;
- /* Thread Set bitmap count */
- int ts_bitmap_count;
- /* Thread Set bitmap pointer */
- unsigned long *ts_bitmap;
- /* Used for iSCSI discovery session authentication */
- struct iscsi_node_acl discovery_acl;
- struct iscsi_portal_group *discovery_tpg;
-};
-
-#endif /* ISCSI_TARGET_CORE_H */
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
index 848fee768948..2d44781be3c6 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.c
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -1,26 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains the iSCSI Target DataIN value generation functions.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
******************************************************************************/
+#include <linux/slab.h>
#include <scsi/iscsi_proto.h>
-
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_erl1.h"
#include "iscsi_target_util.h"
@@ -42,14 +32,14 @@ struct iscsi_datain_req *iscsit_allocate_datain_req(void)
return dr;
}
-void iscsit_attach_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
+void iscsit_attach_datain_req(struct iscsit_cmd *cmd, struct iscsi_datain_req *dr)
{
spin_lock(&cmd->datain_lock);
list_add_tail(&dr->cmd_datain_node, &cmd->datain_list);
spin_unlock(&cmd->datain_lock);
}
-void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
+void iscsit_free_datain_req(struct iscsit_cmd *cmd, struct iscsi_datain_req *dr)
{
spin_lock(&cmd->datain_lock);
list_del(&dr->cmd_datain_node);
@@ -58,7 +48,7 @@ void iscsit_free_datain_req(struct iscsi_cmd *cmd, struct iscsi_datain_req *dr)
kmem_cache_free(lio_dr_cache, dr);
}
-void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd)
+void iscsit_free_all_datain_reqs(struct iscsit_cmd *cmd)
{
struct iscsi_datain_req *dr, *dr_tmp;
@@ -70,7 +60,7 @@ void iscsit_free_all_datain_reqs(struct iscsi_cmd *cmd)
spin_unlock(&cmd->datain_lock);
}
-struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd)
+struct iscsi_datain_req *iscsit_get_datain_req(struct iscsit_cmd *cmd)
{
if (list_empty(&cmd->datain_list)) {
pr_err("cmd->datain_list is empty for ITT:"
@@ -86,11 +76,11 @@ struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *cmd)
* For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=Yes.
*/
static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
struct iscsi_datain *datain)
{
u32 next_burst_len, read_data_done, read_data_left;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_datain_req *dr;
dr = iscsit_get_datain_req(cmd);
@@ -184,11 +174,11 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_yes(
* For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=Yes.
*/
static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
struct iscsi_datain *datain)
{
u32 offset, read_data_done, read_data_left, seq_send_order;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_datain_req *dr;
struct iscsi_seq *seq;
@@ -305,11 +295,11 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_yes(
* For Normal and Recovery DataSequenceInOrder=Yes and DataPDUInOrder=No.
*/
static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
struct iscsi_datain *datain)
{
u32 next_burst_len, read_data_done, read_data_left;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_datain_req *dr;
struct iscsi_pdu *pdu;
@@ -404,11 +394,11 @@ static struct iscsi_datain_req *iscsit_set_datain_values_yes_and_no(
* For Normal and Recovery DataSequenceInOrder=No and DataPDUInOrder=No.
*/
static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
struct iscsi_datain *datain)
{
u32 read_data_done, read_data_left, seq_send_order;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_datain_req *dr;
struct iscsi_pdu *pdu;
struct iscsi_seq *seq = NULL;
@@ -506,10 +496,10 @@ static struct iscsi_datain_req *iscsit_set_datain_values_no_and_no(
}
struct iscsi_datain_req *iscsit_get_datain_values(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
struct iscsi_datain *datain)
{
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
if (conn->sess->sess_ops->DataSequenceInOrder &&
conn->sess->sess_ops->DataPDUInOrder)
@@ -526,3 +516,4 @@ struct iscsi_datain_req *iscsit_get_datain_values(
return NULL;
}
+EXPORT_SYMBOL(iscsit_get_datain_values);
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.h b/drivers/target/iscsi/iscsi_target_datain_values.h
index 646429ac5a02..b28df886d828 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.h
+++ b/drivers/target/iscsi/iscsi_target_datain_values.h
@@ -1,12 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_DATAIN_VALUES_H
#define ISCSI_TARGET_DATAIN_VALUES_H
+struct iscsit_cmd;
+struct iscsi_datain;
+
extern struct iscsi_datain_req *iscsit_allocate_datain_req(void);
-extern void iscsit_attach_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
-extern void iscsit_free_datain_req(struct iscsi_cmd *, struct iscsi_datain_req *);
-extern void iscsit_free_all_datain_reqs(struct iscsi_cmd *);
-extern struct iscsi_datain_req *iscsit_get_datain_req(struct iscsi_cmd *);
-extern struct iscsi_datain_req *iscsit_get_datain_values(struct iscsi_cmd *,
+extern void iscsit_attach_datain_req(struct iscsit_cmd *, struct iscsi_datain_req *);
+extern void iscsit_free_datain_req(struct iscsit_cmd *, struct iscsi_datain_req *);
+extern void iscsit_free_all_datain_reqs(struct iscsit_cmd *);
+extern struct iscsi_datain_req *iscsit_get_datain_req(struct iscsit_cmd *);
+extern struct iscsi_datain_req *iscsit_get_datain_values(struct iscsit_cmd *,
struct iscsi_datain *);
#endif /*** ISCSI_TARGET_DATAIN_VALUES_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index 1b74033510a0..b565ce3b2677 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -1,34 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains the iSCSI Virtual Device and Disk Transport
* agnostic related functions.
*
- \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
******************************************************************************/
-#include <scsi/scsi_device.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_device.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
-void iscsit_determine_maxcmdsn(struct iscsi_session *sess)
+void iscsit_determine_maxcmdsn(struct iscsit_session *sess)
{
struct se_node_acl *se_nacl;
@@ -50,23 +39,19 @@ void iscsit_determine_maxcmdsn(struct iscsi_session *sess)
* core_set_queue_depth_for_node().
*/
sess->cmdsn_window = se_nacl->queue_depth;
- sess->max_cmd_sn = (sess->max_cmd_sn + se_nacl->queue_depth) - 1;
+ atomic_add(se_nacl->queue_depth - 1, &sess->max_cmd_sn);
}
-void iscsit_increment_maxcmdsn(struct iscsi_cmd *cmd, struct iscsi_session *sess)
+void iscsit_increment_maxcmdsn(struct iscsit_cmd *cmd, struct iscsit_session *sess)
{
+ u32 max_cmd_sn;
+
if (cmd->immediate_cmd || cmd->maxcmdsn_inc)
return;
cmd->maxcmdsn_inc = 1;
- if (!mutex_trylock(&sess->cmdsn_mutex)) {
- sess->max_cmd_sn += 1;
- pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
- return;
- }
- sess->max_cmd_sn += 1;
- pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
- mutex_unlock(&sess->cmdsn_mutex);
+ max_cmd_sn = atomic_inc_return(&sess->max_cmd_sn);
+ pr_debug("Updated MaxCmdSN to 0x%08x\n", max_cmd_sn);
}
EXPORT_SYMBOL(iscsit_increment_maxcmdsn);
diff --git a/drivers/target/iscsi/iscsi_target_device.h b/drivers/target/iscsi/iscsi_target_device.h
index a0e2df9e8090..366340120558 100644
--- a/drivers/target/iscsi/iscsi_target_device.h
+++ b/drivers/target/iscsi/iscsi_target_device.h
@@ -1,7 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_DEVICE_H
#define ISCSI_TARGET_DEVICE_H
-extern void iscsit_determine_maxcmdsn(struct iscsi_session *);
-extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
+struct iscsit_cmd;
+struct iscsit_session;
+
+extern void iscsit_determine_maxcmdsn(struct iscsit_session *);
+extern void iscsit_increment_maxcmdsn(struct iscsit_cmd *, struct iscsit_session *);
#endif /* ISCSI_TARGET_DEVICE_H */
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 08bd87833321..24db6b07493e 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -1,31 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/******************************************************************************
* This file contains error recovery level zero functions used by
* the iSCSI Target driver.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
******************************************************************************/
+#include <linux/sched/signal.h>
+
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_seq_pdu_list.h"
-#include "iscsi_target_tq.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl1.h"
#include "iscsi_target_erl2.h"
@@ -33,24 +24,22 @@
#include "iscsi_target.h"
/*
- * Used to set values in struct iscsi_cmd that iscsit_dataout_check_sequence()
+ * Used to set values in struct iscsit_cmd that iscsit_dataout_check_sequence()
* checks against to determine a PDU's Offset+Length is within the current
* DataOUT Sequence. Used for DataSequenceInOrder=Yes only.
*/
void iscsit_set_dataout_sequence_values(
- struct iscsi_cmd *cmd)
+ struct iscsit_cmd *cmd)
{
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
/*
* Still set seq_start_offset and seq_end_offset for Unsolicited
* DataOUT, even if DataSequenceInOrder=No.
*/
if (cmd->unsolicited_data) {
cmd->seq_start_offset = cmd->write_data_done;
- cmd->seq_end_offset = (cmd->write_data_done +
- ((cmd->se_cmd.data_length >
- conn->sess->sess_ops->FirstBurstLength) ?
- conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length));
+ cmd->seq_end_offset = min(cmd->se_cmd.data_length,
+ conn->sess->sess_ops->FirstBurstLength);
return;
}
@@ -74,10 +63,10 @@ void iscsit_set_dataout_sequence_values(
}
static int iscsit_dataout_within_command_recovery_check(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
unsigned char *buf)
{
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
@@ -140,11 +129,11 @@ dump:
}
static int iscsit_dataout_check_unsolicited_sequence(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
unsigned char *buf)
{
u32 first_burst_len;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
@@ -215,11 +204,11 @@ out:
}
static int iscsit_dataout_check_sequence(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
unsigned char *buf)
{
u32 next_burst_len;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_seq *seq = NULL;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
@@ -344,12 +333,11 @@ out:
}
static int iscsit_dataout_check_datasn(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
unsigned char *buf)
{
- int dump = 0, recovery = 0;
u32 data_sn = 0;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
@@ -372,13 +360,11 @@ static int iscsit_dataout_check_datasn(
pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
" higher than expected 0x%08x.\n", cmd->init_task_tag,
be32_to_cpu(hdr->datasn), data_sn);
- recovery = 1;
goto recover;
} else if (be32_to_cpu(hdr->datasn) < data_sn) {
pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
" lower than expected 0x%08x, discarding payload.\n",
cmd->init_task_tag, be32_to_cpu(hdr->datasn), data_sn);
- dump = 1;
goto dump;
}
@@ -394,22 +380,21 @@ dump:
if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
return DATAOUT_CANNOT_RECOVER;
- return (recovery || dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY :
- DATAOUT_NORMAL;
+ return DATAOUT_WITHIN_COMMAND_RECOVERY;
}
static int iscsit_dataout_pre_datapduinorder_yes(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
unsigned char *buf)
{
int dump = 0, recovery = 0;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
/*
* For DataSequenceInOrder=Yes: If the offset is greater than the global
- * DataPDUInOrder=Yes offset counter in struct iscsi_cmd a protcol error has
+ * DataPDUInOrder=Yes offset counter in struct iscsit_cmd a protcol error has
* occurred and fail the connection.
*
* For DataSequenceInOrder=No: If the offset is greater than the per
@@ -461,7 +446,7 @@ dump:
}
static int iscsit_dataout_pre_datapduinorder_no(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
unsigned char *buf)
{
struct iscsi_pdu *pdu;
@@ -492,7 +477,7 @@ static int iscsit_dataout_pre_datapduinorder_no(
return DATAOUT_NORMAL;
}
-static int iscsit_dataout_update_r2t(struct iscsi_cmd *cmd, u32 offset, u32 length)
+static int iscsit_dataout_update_r2t(struct iscsit_cmd *cmd, u32 offset, u32 length)
{
struct iscsi_r2t *r2t;
@@ -512,7 +497,7 @@ static int iscsit_dataout_update_r2t(struct iscsi_cmd *cmd, u32 offset, u32 leng
}
static int iscsit_dataout_update_datapduinorder_no(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
u32 data_sn,
int f_bit)
{
@@ -545,11 +530,11 @@ static int iscsit_dataout_update_datapduinorder_no(
}
static int iscsit_dataout_post_crc_passed(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
unsigned char *buf)
{
int ret, send_r2t = 0;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_seq *seq = NULL;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
@@ -656,10 +641,10 @@ static int iscsit_dataout_post_crc_passed(
}
static int iscsit_dataout_post_crc_failed(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
unsigned char *buf)
{
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_pdu *pdu;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
@@ -694,11 +679,11 @@ recover:
* and CRC computed.
*/
int iscsit_check_pre_dataout(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
unsigned char *buf)
{
int ret;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
ret = iscsit_dataout_within_command_recovery_check(cmd, buf);
if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
@@ -732,11 +717,11 @@ int iscsit_check_pre_dataout(
* and CRC computed.
*/
int iscsit_check_post_dataout(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
unsigned char *buf,
u8 data_crc_failed)
{
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
cmd->dataout_timeout_retries = 0;
@@ -756,10 +741,11 @@ int iscsit_check_post_dataout(
}
}
-static void iscsit_handle_time2retain_timeout(unsigned long data)
+void iscsit_handle_time2retain_timeout(struct timer_list *t)
{
- struct iscsi_session *sess = (struct iscsi_session *) data;
- struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct iscsit_session *sess = timer_container_of(sess, t,
+ time2retain_timer);
+ struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
spin_lock_bh(&se_tpg->session_lock);
@@ -777,35 +763,22 @@ static void iscsit_handle_time2retain_timeout(unsigned long data)
pr_err("Time2Retain timer expired for SID: %u, cleaning up"
" iSCSI session.\n", sess->sid);
- {
- struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
-
- if (tiqn) {
- spin_lock(&tiqn->sess_err_stats.lock);
- strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
- (void *)sess->sess_ops->InitiatorName);
- tiqn->sess_err_stats.last_sess_failure_type =
- ISCSI_SESS_ERR_CXN_TIMEOUT;
- tiqn->sess_err_stats.cxn_timeout_errors++;
- sess->conn_timeout_errors++;
- spin_unlock(&tiqn->sess_err_stats.lock);
- }
- }
+ iscsit_fill_cxn_timeout_err_stats(sess);
spin_unlock_bh(&se_tpg->session_lock);
- target_put_session(sess->se_sess);
+ iscsit_close_session(sess, false);
}
-void iscsit_start_time2retain_handler(struct iscsi_session *sess)
+void iscsit_start_time2retain_handler(struct iscsit_session *sess)
{
int tpg_active;
/*
* Only start Time2Retain timer when the associated TPG is still in
* an ACTIVE (eg: not disabled or shutdown) state.
*/
- spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock);
- tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE);
- spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock);
+ spin_lock(&sess->tpg->tpg_state_lock);
+ tpg_active = (sess->tpg->tpg_state == TPG_STATE_ACTIVE);
+ spin_unlock(&sess->tpg->tpg_state_lock);
if (!tpg_active)
return;
@@ -816,24 +789,19 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess)
pr_debug("Starting Time2Retain timer for %u seconds on"
" SID: %u\n", sess->sess_ops->DefaultTime2Retain, sess->sid);
- init_timer(&sess->time2retain_timer);
- sess->time2retain_timer.expires =
- (get_jiffies_64() + sess->sess_ops->DefaultTime2Retain * HZ);
- sess->time2retain_timer.data = (unsigned long)sess;
- sess->time2retain_timer.function = iscsit_handle_time2retain_timeout;
sess->time2retain_timer_flags &= ~ISCSI_TF_STOP;
sess->time2retain_timer_flags |= ISCSI_TF_RUNNING;
- add_timer(&sess->time2retain_timer);
+ mod_timer(&sess->time2retain_timer,
+ jiffies + sess->sess_ops->DefaultTime2Retain * HZ);
}
-/*
- * Called with spin_lock_bh(&struct se_portal_group->session_lock) held
- */
-int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
+int iscsit_stop_time2retain_timer(struct iscsit_session *sess)
{
- struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+ lockdep_assert_held(&se_tpg->session_lock);
+
if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
return -1;
@@ -843,7 +811,7 @@ int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
sess->time2retain_timer_flags |= ISCSI_TF_STOP;
spin_unlock(&se_tpg->session_lock);
- del_timer_sync(&sess->time2retain_timer);
+ timer_delete_sync(&sess->time2retain_timer);
spin_lock(&se_tpg->session_lock);
sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
@@ -852,7 +820,7 @@ int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
return 0;
}
-void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
+void iscsit_connection_reinstatement_rcfr(struct iscsit_conn *conn)
{
spin_lock_bh(&conn->state_lock);
if (atomic_read(&conn->connection_exit)) {
@@ -866,14 +834,17 @@ void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
}
spin_unlock_bh(&conn->state_lock);
- iscsi_thread_set_force_reinstatement(conn);
+ if (conn->tx_thread && conn->tx_thread_active)
+ send_sig(SIGINT, conn->tx_thread, 1);
+ if (conn->rx_thread && conn->rx_thread_active)
+ send_sig(SIGINT, conn->rx_thread, 1);
sleep:
wait_for_completion(&conn->conn_wait_rcfr_comp);
complete(&conn->conn_post_wait_comp);
}
-void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
+void iscsit_cause_connection_reinstatement(struct iscsit_conn *conn, int sleep)
{
spin_lock_bh(&conn->state_lock);
if (atomic_read(&conn->connection_exit)) {
@@ -891,10 +862,10 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
return;
}
- if (iscsi_thread_set_force_reinstatement(conn) < 0) {
- spin_unlock_bh(&conn->state_lock);
- return;
- }
+ if (conn->tx_thread && conn->tx_thread_active)
+ send_sig(SIGINT, conn->tx_thread, 1);
+ if (conn->rx_thread && conn->rx_thread_active)
+ send_sig(SIGINT, conn->rx_thread, 1);
atomic_set(&conn->connection_reinstatement, 1);
if (!sleep) {
@@ -910,7 +881,7 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
}
EXPORT_SYMBOL(iscsit_cause_connection_reinstatement);
-void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
+void iscsit_fall_back_to_erl0(struct iscsit_session *sess)
{
pr_debug("Falling back to ErrorRecoveryLevel=0 for SID:"
" %u\n", sess->sid);
@@ -918,9 +889,9 @@ void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
atomic_set(&sess->session_fall_back_to_erl0, 1);
}
-static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
+static void iscsit_handle_connection_cleanup(struct iscsit_conn *conn)
{
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_session *sess = conn->sess;
if ((sess->sess_ops->ErrorRecoveryLevel == 2) &&
!atomic_read(&sess->session_reinstatement) &&
@@ -934,8 +905,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
}
}
-void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
+void iscsit_take_action_for_connection_exit(struct iscsit_conn *conn, bool *conn_freed)
{
+ *conn_freed = false;
+
spin_lock_bh(&conn->state_lock);
if (atomic_read(&conn->connection_exit)) {
spin_unlock_bh(&conn->state_lock);
@@ -946,6 +919,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
spin_unlock_bh(&conn->state_lock);
iscsit_close_connection(conn);
+ *conn_freed = true;
return;
}
@@ -959,57 +933,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
spin_unlock_bh(&conn->state_lock);
iscsit_handle_connection_cleanup(conn);
-}
-
-/*
- * This is the simple function that makes the magic of
- * sync and steering happen in the follow paradoxical order:
- *
- * 0) Receive conn->of_marker (bytes left until next OFMarker)
- * bytes into an offload buffer. When we pass the exact number
- * of bytes in conn->of_marker, iscsit_dump_data_payload() and hence
- * rx_data() will automatically receive the identical u32 marker
- * values and store it in conn->of_marker_offset;
- * 1) Now conn->of_marker_offset will contain the offset to the start
- * of the next iSCSI PDU. Dump these remaining bytes into another
- * offload buffer.
- * 2) We are done!
- * Next byte in the TCP stream will contain the next iSCSI PDU!
- * Cool Huh?!
- */
-int iscsit_recover_from_unknown_opcode(struct iscsi_conn *conn)
-{
- /*
- * Make sure the remaining bytes to next maker is a sane value.
- */
- if (conn->of_marker > (conn->conn_ops->OFMarkInt * 4)) {
- pr_err("Remaining bytes to OFMarker: %u exceeds"
- " OFMarkInt bytes: %u.\n", conn->of_marker,
- conn->conn_ops->OFMarkInt * 4);
- return -1;
- }
-
- pr_debug("Advancing %u bytes in TCP stream to get to the"
- " next OFMarker.\n", conn->of_marker);
-
- if (iscsit_dump_data_payload(conn, conn->of_marker, 0) < 0)
- return -1;
-
- /*
- * Make sure the offset marker we retrived is a valid value.
- */
- if (conn->of_marker_offset > (ISCSI_HDR_LEN + (ISCSI_CRC_LEN * 2) +
- conn->conn_ops->MaxRecvDataSegmentLength)) {
- pr_err("OfMarker offset value: %u exceeds limit.\n",
- conn->of_marker_offset);
- return -1;
- }
-
- pr_debug("Discarding %u bytes of TCP stream to get to the"
- " next iSCSI Opcode.\n", conn->of_marker_offset);
-
- if (iscsit_dump_data_payload(conn, conn->of_marker_offset, 0) < 0)
- return -1;
-
- return 0;
+ *conn_freed = true;
}
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
index 21acc9a06376..2a877d13977d 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.h
+++ b/drivers/target/iscsi/iscsi_target_erl0.h
@@ -1,15 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_ERL0_H
#define ISCSI_TARGET_ERL0_H
-extern void iscsit_set_dataout_sequence_values(struct iscsi_cmd *);
-extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *);
-extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8);
-extern void iscsit_start_time2retain_handler(struct iscsi_session *);
-extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
-extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
-extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
-extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
-extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
-extern int iscsit_recover_from_unknown_opcode(struct iscsi_conn *);
+#include <linux/types.h>
+
+struct iscsit_cmd;
+struct iscsit_conn;
+struct iscsit_session;
+
+extern void iscsit_set_dataout_sequence_values(struct iscsit_cmd *);
+extern int iscsit_check_pre_dataout(struct iscsit_cmd *, unsigned char *);
+extern int iscsit_check_post_dataout(struct iscsit_cmd *, unsigned char *, u8);
+extern void iscsit_start_time2retain_handler(struct iscsit_session *);
+extern void iscsit_handle_time2retain_timeout(struct timer_list *t);
+extern int iscsit_stop_time2retain_timer(struct iscsit_session *);
+extern void iscsit_connection_reinstatement_rcfr(struct iscsit_conn *);
+extern void iscsit_cause_connection_reinstatement(struct iscsit_conn *, int);
+extern void iscsit_fall_back_to_erl0(struct iscsit_session *);
+extern void iscsit_take_action_for_connection_exit(struct iscsit_conn *, bool *);
#endif /*** ISCSI_TARGET_ERL0_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 586c268679a4..d8ca06e697d6 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -1,30 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains error recovery level one used by the iSCSI Target driver.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
******************************************************************************/
#include <linux/list.h>
+#include <linux/slab.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h>
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_datain_values.h"
#include "iscsi_target_device.h"
@@ -35,7 +26,7 @@
#include "iscsi_target_erl2.h"
#include "iscsi_target.h"
-#define OFFLOAD_BUF_SIZE 32768
+#define OFFLOAD_BUF_SIZE 32768U
/*
* Used to dump excess datain payload for certain error recovery
@@ -45,19 +36,25 @@
* to be dumped.
*/
int iscsit_dump_data_payload(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
u32 buf_len,
int dump_padding_digest)
{
- char *buf, pad_bytes[4];
+ char *buf;
int ret = DATAOUT_WITHIN_COMMAND_RECOVERY, rx_got;
- u32 length, padding, offset = 0, size;
+ u32 length, offset = 0, size;
struct kvec iov;
if (conn->sess->sess_ops->RDMAExtensions)
return 0;
- length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
+ if (dump_padding_digest) {
+ buf_len = ALIGN(buf_len, 4);
+ if (conn->conn_ops->DataDigest)
+ buf_len += ISCSI_CRC_LEN;
+ }
+
+ length = min(buf_len, OFFLOAD_BUF_SIZE);
buf = kzalloc(length, GFP_ATOMIC);
if (!buf) {
@@ -68,8 +65,7 @@ int iscsit_dump_data_payload(
memset(&iov, 0, sizeof(struct kvec));
while (offset < buf_len) {
- size = ((offset + length) > buf_len) ?
- (buf_len - offset) : length;
+ size = min(buf_len - offset, length);
iov.iov_len = size;
iov.iov_base = buf;
@@ -77,41 +73,12 @@ int iscsit_dump_data_payload(
rx_got = rx_data(conn, &iov, 1, size);
if (rx_got != size) {
ret = DATAOUT_CANNOT_RECOVER;
- goto out;
+ break;
}
offset += size;
}
- if (!dump_padding_digest)
- goto out;
-
- padding = ((-buf_len) & 3);
- if (padding != 0) {
- iov.iov_len = padding;
- iov.iov_base = pad_bytes;
-
- rx_got = rx_data(conn, &iov, 1, padding);
- if (rx_got != padding) {
- ret = DATAOUT_CANNOT_RECOVER;
- goto out;
- }
- }
-
- if (conn->conn_ops->DataDigest) {
- u32 data_crc;
-
- iov.iov_len = ISCSI_CRC_LEN;
- iov.iov_base = &data_crc;
-
- rx_got = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
- if (rx_got != ISCSI_CRC_LEN) {
- ret = DATAOUT_CANNOT_RECOVER;
- goto out;
- }
- }
-
-out:
kfree(buf);
return ret;
}
@@ -120,7 +87,7 @@ out:
* Used for retransmitting R2Ts from a R2T SNACK request.
*/
static int iscsit_send_recovery_r2t_for_snack(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
struct iscsi_r2t *r2t)
{
/*
@@ -142,7 +109,7 @@ static int iscsit_send_recovery_r2t_for_snack(
}
static int iscsit_handle_r2t_snack(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
unsigned char *buf,
u32 begrun,
u32 runlength)
@@ -162,8 +129,7 @@ static int iscsit_handle_r2t_snack(
" protocol error.\n", cmd->init_task_tag, begrun,
(begrun + runlength), cmd->acked_data_sn);
- return iscsit_reject_cmd(cmd,
- ISCSI_REASON_PROTOCOL_ERROR, buf);
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
}
if (runlength) {
@@ -201,13 +167,13 @@ static int iscsit_handle_r2t_snack(
* FIXME: How is this handled for a RData SNACK?
*/
int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
struct iscsi_datain_req *dr)
{
u32 data_sn = 0, data_sn_count = 0;
u32 pdu_start = 0, seq_no = 0;
u32 begrun = dr->begrun;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
while (begrun > data_sn++) {
data_sn_count++;
@@ -247,18 +213,18 @@ int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
* FIXME: How is this handled for a RData SNACK?
*/
int iscsit_create_recovery_datain_values_datasequenceinorder_no(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
struct iscsi_datain_req *dr)
{
int found_seq = 0, i;
u32 data_sn, read_data_done = 0, seq_send_order = 0;
u32 begrun = dr->begrun;
u32 runlength = dr->runlength;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_seq *first_seq = NULL, *seq = NULL;
if (!cmd->seq_list) {
- pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+ pr_err("struct iscsit_cmd->seq_list is NULL!\n");
return -1;
}
@@ -405,12 +371,12 @@ done:
}
static int iscsit_handle_recovery_datain(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
unsigned char *buf,
u32 begrun,
u32 runlength)
{
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_datain_req *dr;
struct se_cmd *se_cmd = &cmd->se_cmd;
@@ -466,14 +432,14 @@ static int iscsit_handle_recovery_datain(
}
int iscsit_handle_recovery_datain_or_r2t(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
unsigned char *buf,
itt_t init_task_tag,
u32 targ_xfer_tag,
u32 begrun,
u32 runlength)
{
- struct iscsi_cmd *cmd;
+ struct iscsit_cmd *cmd;
cmd = iscsit_find_cmd_from_itt(conn, init_task_tag);
if (!cmd)
@@ -499,17 +465,19 @@ int iscsit_handle_recovery_datain_or_r2t(
/* #warning FIXME: Status SNACK needs to be dependent on OPCODE!!! */
int iscsit_handle_status_snack(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
itt_t init_task_tag,
u32 targ_xfer_tag,
u32 begrun,
u32 runlength)
{
- struct iscsi_cmd *cmd = NULL;
+ struct iscsit_cmd *cmd = NULL;
u32 last_statsn;
int found_cmd;
- if (conn->exp_statsn > begrun) {
+ if (!begrun) {
+ begrun = conn->exp_statsn;
+ } else if (conn->exp_statsn > begrun) {
pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"
" 0x%08x but already got ExpStatSN: 0x%08x on CID:"
" %hu.\n", begrun, runlength, conn->exp_statsn,
@@ -561,12 +529,12 @@ int iscsit_handle_status_snack(
}
int iscsit_handle_data_ack(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
u32 targ_xfer_tag,
u32 begrun,
u32 runlength)
{
- struct iscsi_cmd *cmd = NULL;
+ struct iscsit_cmd *cmd = NULL;
cmd = iscsit_find_cmd_from_ttt(conn, targ_xfer_tag);
if (!cmd) {
@@ -597,7 +565,7 @@ int iscsit_handle_data_ack(
}
static int iscsit_send_recovery_r2t(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
u32 offset,
u32 xfer_len)
{
@@ -611,12 +579,12 @@ static int iscsit_send_recovery_r2t(
}
int iscsit_dataout_datapduinorder_no_fbit(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
struct iscsi_pdu *pdu)
{
int i, send_recovery_r2t = 0, recovery = 0;
- u32 length = 0, offset = 0, pdu_count = 0, xfer_len = 0;
- struct iscsi_conn *conn = cmd->conn;
+ u32 length = 0, offset = 0, pdu_count = 0;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_pdu *first_pdu = NULL;
/*
@@ -628,8 +596,7 @@ int iscsit_dataout_datapduinorder_no_fbit(
if (cmd->pdu_list[i].seq_no == pdu->seq_no) {
if (!first_pdu)
first_pdu = &cmd->pdu_list[i];
- xfer_len += cmd->pdu_list[i].length;
- pdu_count++;
+ pdu_count++;
} else if (pdu_count)
break;
}
@@ -687,14 +654,14 @@ int iscsit_dataout_datapduinorder_no_fbit(
}
static int iscsit_recalculate_dataout_values(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
u32 pdu_offset,
u32 pdu_length,
u32 *r2t_offset,
u32 *r2t_length)
{
int i;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_pdu *pdu = NULL;
if (conn->sess->sess_ops->DataSequenceInOrder) {
@@ -764,7 +731,7 @@ static int iscsit_recalculate_dataout_values(
}
int iscsit_recover_dataout_sequence(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
u32 pdu_offset,
u32 pdu_length)
{
@@ -798,14 +765,14 @@ static struct iscsi_ooo_cmdsn *iscsit_allocate_ooo_cmdsn(void)
return ooo_cmdsn;
}
-/*
- * Called with sess->cmdsn_mutex held.
- */
static int iscsit_attach_ooo_cmdsn(
- struct iscsi_session *sess,
+ struct iscsit_session *sess,
struct iscsi_ooo_cmdsn *ooo_cmdsn)
{
struct iscsi_ooo_cmdsn *ooo_tail, *ooo_tmp;
+
+ lockdep_assert_held(&sess->cmdsn_mutex);
+
/*
* We attach the struct iscsi_ooo_cmdsn entry to the out of order
* list in increasing CmdSN order.
@@ -847,20 +814,20 @@ static int iscsit_attach_ooo_cmdsn(
/*
* Removes an struct iscsi_ooo_cmdsn from a session's list,
- * called with struct iscsi_session->cmdsn_mutex held.
+ * called with struct iscsit_session->cmdsn_mutex held.
*/
void iscsit_remove_ooo_cmdsn(
- struct iscsi_session *sess,
+ struct iscsit_session *sess,
struct iscsi_ooo_cmdsn *ooo_cmdsn)
{
list_del(&ooo_cmdsn->ooo_list);
kmem_cache_free(lio_ooo_cache, ooo_cmdsn);
}
-void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
+void iscsit_clear_ooo_cmdsns_for_conn(struct iscsit_conn *conn)
{
struct iscsi_ooo_cmdsn *ooo_cmdsn;
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_session *sess = conn->sess;
mutex_lock(&sess->cmdsn_mutex);
list_for_each_entry(ooo_cmdsn, &sess->sess_ooo_cmdsn_list, ooo_list) {
@@ -872,15 +839,14 @@ void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
mutex_unlock(&sess->cmdsn_mutex);
}
-/*
- * Called with sess->cmdsn_mutex held.
- */
-int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
+int iscsit_execute_ooo_cmdsns(struct iscsit_session *sess)
{
int ooo_count = 0;
- struct iscsi_cmd *cmd = NULL;
+ struct iscsit_cmd *cmd = NULL;
struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
+ lockdep_assert_held(&sess->cmdsn_mutex);
+
list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
&sess->sess_ooo_cmdsn_list, ooo_list) {
if (ooo_cmdsn->cmdsn != sess->exp_cmd_sn)
@@ -904,8 +870,6 @@ int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
if (iscsit_execute_cmd(cmd, 1) < 0)
return -1;
-
- continue;
}
return ooo_count;
@@ -919,10 +883,10 @@ int iscsit_execute_ooo_cmdsns(struct iscsi_session *sess)
* 2. With no locks held directly from iscsi_handle_XXX_pdu() functions
* for immediate commands.
*/
-int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
+int iscsit_execute_cmd(struct iscsit_cmd *cmd, int ooo)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
int lr = 0;
spin_lock_bh(&cmd->istate_lock);
@@ -944,20 +908,8 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
return 0;
}
spin_unlock_bh(&cmd->istate_lock);
- /*
- * Determine if delayed TASK_ABORTED status for WRITEs
- * should be sent now if no unsolicited data out
- * payloads are expected, or if the delayed status
- * should be sent after unsolicited data out with
- * ISCSI_FLAG_CMD_FINAL set in iscsi_handle_data_out()
- */
- if (transport_check_aborted_status(se_cmd,
- (cmd->unsolicited_data == 0)) != 0)
+ if (cmd->se_cmd.transport_state & CMD_T_ABORTED)
return 0;
- /*
- * Otherwise send CHECK_CONDITION and sense for
- * exception
- */
return transport_send_check_condition_and_sense(se_cmd,
cmd->sense_reason, 0);
}
@@ -975,13 +927,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
if (!(cmd->cmd_flags &
ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
- /*
- * Send the delayed TASK_ABORTED status for
- * WRITEs if no more unsolicitied data is
- * expected.
- */
- if (transport_check_aborted_status(se_cmd, 1)
- != 0)
+ if (cmd->se_cmd.transport_state & CMD_T_ABORTED)
return 0;
iscsit_set_dataout_sequence_values(cmd);
@@ -996,16 +942,12 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
if ((cmd->data_direction == DMA_TO_DEVICE) &&
!(cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA)) {
- /*
- * Send the delayed TASK_ABORTED status for WRITEs if
- * no more nsolicitied data is expected.
- */
- if (transport_check_aborted_status(se_cmd, 1) != 0)
+ if (cmd->se_cmd.transport_state & CMD_T_ABORTED)
return 0;
- iscsit_set_unsoliticed_dataout(cmd);
+ iscsit_set_unsolicited_dataout(cmd);
}
- return transport_handle_cdb_direct(&cmd->se_cmd);
+ return target_submit(&cmd->se_cmd);
case ISCSI_OP_NOOP_OUT:
case ISCSI_OP_TEXT:
@@ -1051,7 +993,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
return 0;
}
-void iscsit_free_all_ooo_cmdsns(struct iscsi_session *sess)
+void iscsit_free_all_ooo_cmdsns(struct iscsit_session *sess)
{
struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
@@ -1066,8 +1008,8 @@ void iscsit_free_all_ooo_cmdsns(struct iscsi_session *sess)
}
int iscsit_handle_ooo_cmdsn(
- struct iscsi_session *sess,
- struct iscsi_cmd *cmd,
+ struct iscsit_session *sess,
+ struct iscsit_cmd *cmd,
u32 cmdsn)
{
int batch = 0;
@@ -1106,11 +1048,11 @@ int iscsit_handle_ooo_cmdsn(
}
static int iscsit_set_dataout_timeout_values(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
u32 *offset,
u32 *length)
{
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_r2t *r2t;
if (cmd->unsolicited_data) {
@@ -1148,13 +1090,13 @@ static int iscsit_set_dataout_timeout_values(
/*
* NOTE: Called from interrupt (timer) context.
*/
-static void iscsit_handle_dataout_timeout(unsigned long data)
+void iscsit_handle_dataout_timeout(struct timer_list *t)
{
u32 pdu_length = 0, pdu_offset = 0;
u32 r2t_length = 0, r2t_offset = 0;
- struct iscsi_cmd *cmd = (struct iscsi_cmd *) data;
- struct iscsi_conn *conn = cmd->conn;
- struct iscsi_session *sess = NULL;
+ struct iscsit_cmd *cmd = timer_container_of(cmd, t, dataout_timer);
+ struct iscsit_conn *conn = cmd->conn;
+ struct iscsit_session *sess = NULL;
struct iscsi_node_attrib *na;
iscsit_inc_conn_usage_count(conn);
@@ -1170,15 +1112,21 @@ static void iscsit_handle_dataout_timeout(unsigned long data)
na = iscsit_tpg_get_node_attrib(sess);
if (!sess->sess_ops->ErrorRecoveryLevel) {
- pr_debug("Unable to recover from DataOut timeout while"
- " in ERL=0.\n");
+ pr_err("Unable to recover from DataOut timeout while"
+ " in ERL=0, closing iSCSI connection for I_T Nexus"
+ " %s,i,0x%6phN,%s,t,0x%02x\n",
+ sess->sess_ops->InitiatorName, sess->isid,
+ sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
goto failure;
}
if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) {
- pr_debug("Command ITT: 0x%08x exceeded max retries"
- " for DataOUT timeout %u, closing iSCSI connection.\n",
- cmd->init_task_tag, na->dataout_timeout_retries);
+ pr_err("Command ITT: 0x%08x exceeded max retries"
+ " for DataOUT timeout %u, closing iSCSI connection for"
+ " I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
+ cmd->init_task_tag, na->dataout_timeout_retries,
+ sess->sess_ops->InitiatorName, sess->isid,
+ sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
goto failure;
}
@@ -1225,14 +1173,15 @@ static void iscsit_handle_dataout_timeout(unsigned long data)
failure:
spin_unlock_bh(&cmd->dataout_timeout_lock);
+ iscsit_fill_cxn_timeout_err_stats(sess);
iscsit_cause_connection_reinstatement(conn, 0);
iscsit_dec_conn_usage_count(conn);
}
-void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd)
+void iscsit_mod_dataout_timer(struct iscsit_cmd *cmd)
{
- struct iscsi_conn *conn = cmd->conn;
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_conn *conn = cmd->conn;
+ struct iscsit_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
spin_lock_bh(&cmd->dataout_timeout_lock);
@@ -1248,32 +1197,27 @@ void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd)
spin_unlock_bh(&cmd->dataout_timeout_lock);
}
-/*
- * Called with cmd->dataout_timeout_lock held.
- */
void iscsit_start_dataout_timer(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn)
+ struct iscsit_cmd *cmd,
+ struct iscsit_conn *conn)
{
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+ lockdep_assert_held(&cmd->dataout_timeout_lock);
+
if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING)
return;
pr_debug("Starting DataOUT timer for ITT: 0x%08x on"
" CID: %hu.\n", cmd->init_task_tag, conn->cid);
- init_timer(&cmd->dataout_timer);
- cmd->dataout_timer.expires = (get_jiffies_64() + na->dataout_timeout * HZ);
- cmd->dataout_timer.data = (unsigned long)cmd;
- cmd->dataout_timer.function = iscsit_handle_dataout_timeout;
cmd->dataout_timer_flags &= ~ISCSI_TF_STOP;
cmd->dataout_timer_flags |= ISCSI_TF_RUNNING;
- add_timer(&cmd->dataout_timer);
+ mod_timer(&cmd->dataout_timer, jiffies + na->dataout_timeout * HZ);
}
-void iscsit_stop_dataout_timer(struct iscsi_cmd *cmd)
+void iscsit_stop_dataout_timer(struct iscsit_cmd *cmd)
{
spin_lock_bh(&cmd->dataout_timeout_lock);
if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
@@ -1283,7 +1227,7 @@ void iscsit_stop_dataout_timer(struct iscsi_cmd *cmd)
cmd->dataout_timer_flags |= ISCSI_TF_STOP;
spin_unlock_bh(&cmd->dataout_timeout_lock);
- del_timer_sync(&cmd->dataout_timer);
+ timer_delete_sync(&cmd->dataout_timer);
spin_lock_bh(&cmd->dataout_timeout_lock);
cmd->dataout_timer_flags &= ~ISCSI_TF_RUNNING;
diff --git a/drivers/target/iscsi/iscsi_target_erl1.h b/drivers/target/iscsi/iscsi_target_erl1.h
index 2a3ebf118a34..12472eefe559 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.h
+++ b/drivers/target/iscsi/iscsi_target_erl1.h
@@ -1,26 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_ERL1_H
#define ISCSI_TARGET_ERL1_H
-extern int iscsit_dump_data_payload(struct iscsi_conn *, u32, int);
+#include <linux/types.h>
+#include <scsi/iscsi_proto.h> /* itt_t */
+
+struct iscsit_cmd;
+struct iscsit_conn;
+struct iscsi_datain_req;
+struct iscsi_ooo_cmdsn;
+struct iscsi_pdu;
+struct iscsit_session;
+
+extern int iscsit_dump_data_payload(struct iscsit_conn *, u32, int);
extern int iscsit_create_recovery_datain_values_datasequenceinorder_yes(
- struct iscsi_cmd *, struct iscsi_datain_req *);
+ struct iscsit_cmd *, struct iscsi_datain_req *);
extern int iscsit_create_recovery_datain_values_datasequenceinorder_no(
- struct iscsi_cmd *, struct iscsi_datain_req *);
-extern int iscsit_handle_recovery_datain_or_r2t(struct iscsi_conn *, unsigned char *,
+ struct iscsit_cmd *, struct iscsi_datain_req *);
+extern int iscsit_handle_recovery_datain_or_r2t(struct iscsit_conn *, unsigned char *,
itt_t, u32, u32, u32);
-extern int iscsit_handle_status_snack(struct iscsi_conn *, itt_t, u32,
+extern int iscsit_handle_status_snack(struct iscsit_conn *, itt_t, u32,
u32, u32);
-extern int iscsit_handle_data_ack(struct iscsi_conn *, u32, u32, u32);
-extern int iscsit_dataout_datapduinorder_no_fbit(struct iscsi_cmd *, struct iscsi_pdu *);
-extern int iscsit_recover_dataout_sequence(struct iscsi_cmd *, u32, u32);
-extern void iscsit_clear_ooo_cmdsns_for_conn(struct iscsi_conn *);
-extern void iscsit_free_all_ooo_cmdsns(struct iscsi_session *);
-extern int iscsit_execute_ooo_cmdsns(struct iscsi_session *);
-extern int iscsit_execute_cmd(struct iscsi_cmd *, int);
-extern int iscsit_handle_ooo_cmdsn(struct iscsi_session *, struct iscsi_cmd *, u32);
-extern void iscsit_remove_ooo_cmdsn(struct iscsi_session *, struct iscsi_ooo_cmdsn *);
-extern void iscsit_mod_dataout_timer(struct iscsi_cmd *);
-extern void iscsit_start_dataout_timer(struct iscsi_cmd *, struct iscsi_conn *);
-extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
+extern int iscsit_handle_data_ack(struct iscsit_conn *, u32, u32, u32);
+extern int iscsit_dataout_datapduinorder_no_fbit(struct iscsit_cmd *, struct iscsi_pdu *);
+extern int iscsit_recover_dataout_sequence(struct iscsit_cmd *, u32, u32);
+extern void iscsit_clear_ooo_cmdsns_for_conn(struct iscsit_conn *);
+extern void iscsit_free_all_ooo_cmdsns(struct iscsit_session *);
+extern int iscsit_execute_ooo_cmdsns(struct iscsit_session *);
+extern int iscsit_execute_cmd(struct iscsit_cmd *, int);
+extern int iscsit_handle_ooo_cmdsn(struct iscsit_session *, struct iscsit_cmd *, u32);
+extern void iscsit_remove_ooo_cmdsn(struct iscsit_session *, struct iscsi_ooo_cmdsn *);
+extern void iscsit_handle_dataout_timeout(struct timer_list *t);
+extern void iscsit_mod_dataout_timer(struct iscsit_cmd *);
+extern void iscsit_start_dataout_timer(struct iscsit_cmd *, struct iscsit_conn *);
+extern void iscsit_stop_dataout_timer(struct iscsit_cmd *);
#endif /* ISCSI_TARGET_ERL1_H */
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 45a5afd5ea13..56d78af7cec7 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -1,29 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains error recovery level two functions used by
* the iSCSI Target driver.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
******************************************************************************/
+#include <linux/slab.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_datain_values.h"
#include "iscsi_target_util.h"
#include "iscsi_target_erl0.h"
@@ -34,56 +25,8 @@
/*
* FIXME: Does RData SNACK apply here as well?
*/
-void iscsit_create_conn_recovery_datain_values(
- struct iscsi_cmd *cmd,
- __be32 exp_data_sn)
-{
- u32 data_sn = 0;
- struct iscsi_conn *conn = cmd->conn;
-
- cmd->next_burst_len = 0;
- cmd->read_data_done = 0;
-
- while (be32_to_cpu(exp_data_sn) > data_sn) {
- if ((cmd->next_burst_len +
- conn->conn_ops->MaxRecvDataSegmentLength) <
- conn->sess->sess_ops->MaxBurstLength) {
- cmd->read_data_done +=
- conn->conn_ops->MaxRecvDataSegmentLength;
- cmd->next_burst_len +=
- conn->conn_ops->MaxRecvDataSegmentLength;
- } else {
- cmd->read_data_done +=
- (conn->sess->sess_ops->MaxBurstLength -
- cmd->next_burst_len);
- cmd->next_burst_len = 0;
- }
- data_sn++;
- }
-}
-
-void iscsit_create_conn_recovery_dataout_values(
- struct iscsi_cmd *cmd)
-{
- u32 write_data_done = 0;
- struct iscsi_conn *conn = cmd->conn;
-
- cmd->data_sn = 0;
- cmd->next_burst_len = 0;
-
- while (cmd->write_data_done > write_data_done) {
- if ((write_data_done + conn->sess->sess_ops->MaxBurstLength) <=
- cmd->write_data_done)
- write_data_done += conn->sess->sess_ops->MaxBurstLength;
- else
- break;
- }
-
- cmd->write_data_done = write_data_done;
-}
-
static int iscsit_attach_active_connection_recovery_entry(
- struct iscsi_session *sess,
+ struct iscsit_session *sess,
struct iscsi_conn_recovery *cr)
{
spin_lock(&sess->cr_a_lock);
@@ -94,7 +37,7 @@ static int iscsit_attach_active_connection_recovery_entry(
}
static int iscsit_attach_inactive_connection_recovery_entry(
- struct iscsi_session *sess,
+ struct iscsit_session *sess,
struct iscsi_conn_recovery *cr)
{
spin_lock(&sess->cr_i_lock);
@@ -109,7 +52,7 @@ static int iscsit_attach_inactive_connection_recovery_entry(
}
struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
- struct iscsi_session *sess,
+ struct iscsit_session *sess,
u16 cid)
{
struct iscsi_conn_recovery *cr;
@@ -126,9 +69,9 @@ struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
return NULL;
}
-void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
+void iscsit_free_connection_recovery_entries(struct iscsit_session *sess)
{
- struct iscsi_cmd *cmd, *cmd_tmp;
+ struct iscsit_cmd *cmd, *cmd_tmp;
struct iscsi_conn_recovery *cr, *cr_tmp;
spin_lock(&sess->cr_a_lock);
@@ -140,7 +83,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_conn_node) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd, true);
@@ -162,7 +105,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_conn_node) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd, true);
@@ -178,7 +121,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
int iscsit_remove_active_connection_recovery_entry(
struct iscsi_conn_recovery *cr,
- struct iscsi_session *sess)
+ struct iscsit_session *sess)
{
spin_lock(&sess->cr_a_lock);
list_del(&cr->cr_list);
@@ -195,7 +138,7 @@ int iscsit_remove_active_connection_recovery_entry(
static void iscsit_remove_inactive_connection_recovery_entry(
struct iscsi_conn_recovery *cr,
- struct iscsi_session *sess)
+ struct iscsit_session *sess)
{
spin_lock(&sess->cr_i_lock);
list_del(&cr->cr_list);
@@ -206,8 +149,8 @@ static void iscsit_remove_inactive_connection_recovery_entry(
* Called with cr->conn_recovery_cmd_lock help.
*/
int iscsit_remove_cmd_from_connection_recovery(
- struct iscsi_cmd *cmd,
- struct iscsi_session *sess)
+ struct iscsit_cmd *cmd,
+ struct iscsit_session *sess)
{
struct iscsi_conn_recovery *cr;
@@ -218,7 +161,7 @@ int iscsit_remove_cmd_from_connection_recovery(
}
cr = cmd->cr;
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
return --cr->cmd_count;
}
@@ -227,8 +170,8 @@ void iscsit_discard_cr_cmds_by_expstatsn(
u32 exp_statsn)
{
u32 dropped_count = 0;
- struct iscsi_cmd *cmd, *cmd_tmp;
- struct iscsi_session *sess = cr->sess;
+ struct iscsit_cmd *cmd, *cmd_tmp;
+ struct iscsit_session *sess = cr->sess;
spin_lock(&cr->conn_recovery_cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp,
@@ -272,12 +215,12 @@ void iscsit_discard_cr_cmds_by_expstatsn(
}
}
-int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
+int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsit_conn *conn)
{
u32 dropped_count = 0;
- struct iscsi_cmd *cmd, *cmd_tmp;
+ struct iscsit_cmd *cmd, *cmd_tmp;
struct iscsi_ooo_cmdsn *ooo_cmdsn, *ooo_cmdsn_tmp;
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_session *sess = conn->sess;
mutex_lock(&sess->cmdsn_mutex);
list_for_each_entry_safe(ooo_cmdsn, ooo_cmdsn_tmp,
@@ -299,7 +242,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
continue;
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
@@ -313,16 +256,16 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
return 0;
}
-int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
+int iscsit_prepare_cmds_for_reallegiance(struct iscsit_conn *conn)
{
u32 cmd_count = 0;
- struct iscsi_cmd *cmd, *cmd_tmp;
+ struct iscsit_cmd *cmd, *cmd_tmp;
struct iscsi_conn_recovery *cr;
/*
* Allocate an struct iscsi_conn_recovery for this connection.
- * Each struct iscsi_cmd contains an struct iscsi_conn_recovery pointer
- * (struct iscsi_cmd->cr) so we need to allocate this before preparing the
+ * Each struct iscsit_cmd contains an struct iscsi_conn_recovery pointer
+ * (struct iscsit_cmd->cr) so we need to allocate this before preparing the
* connection's command list for connection recovery.
*/
cr = kzalloc(sizeof(struct iscsi_conn_recovery), GFP_KERNEL);
@@ -337,7 +280,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
/*
* Only perform connection recovery on ISCSI_OP_SCSI_CMD or
* ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
- * list_del(&cmd->i_conn_node); to release the command to the
+ * list_del_init(&cmd->i_conn_node); to release the command to the
* session pool and remove it from the connection's list.
*
* Also stop the DataOUT timer, which will be restarted after
@@ -348,12 +291,12 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
if ((cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD) &&
(cmd->iscsi_opcode != ISCSI_OP_NOOP_OUT)) {
- pr_debug("Not performing realligence on"
+ pr_debug("Not performing reallegiance on"
" Opcode: 0x%02x, ITT: 0x%08x, CmdSN: 0x%08x,"
" CID: %hu\n", cmd->iscsi_opcode,
cmd->init_task_tag, cmd->cmd_sn, conn->cid);
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
@@ -373,7 +316,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
*/
if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
@@ -383,7 +326,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
cmd_count++;
pr_debug("Preparing Opcode: 0x%02x, ITT: 0x%08x,"
" CmdSN: 0x%08x, StatSN: 0x%08x, CID: %hu for"
- " realligence.\n", cmd->iscsi_opcode,
+ " reallegiance.\n", cmd->iscsi_opcode,
cmd->init_task_tag, cmd->cmd_sn, cmd->stat_sn,
conn->cid);
@@ -395,14 +338,14 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
cmd->sess = conn->sess;
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_all_datain_reqs(cmd);
transport_wait_for_tasks(&cmd->se_cmd);
/*
- * Add the struct iscsi_cmd to the connection recovery cmd list
+ * Add the struct iscsit_cmd to the connection recovery cmd list
*/
spin_lock(&cr->conn_recovery_cmd_lock);
list_add_tail(&cmd->i_conn_node, &cr->conn_recovery_cmd_list);
@@ -427,7 +370,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
return 0;
}
-int iscsit_connection_recovery_transport_reset(struct iscsi_conn *conn)
+int iscsit_connection_recovery_transport_reset(struct iscsit_conn *conn)
{
atomic_set(&conn->connection_recovery, 1);
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
index 63f2501f3fe0..9064c74eef7a 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.h
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -1,18 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_ERL2_H
#define ISCSI_TARGET_ERL2_H
-extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, __be32);
-extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
+#include <linux/types.h>
+
+struct iscsit_cmd;
+struct iscsit_conn;
+struct iscsi_conn_recovery;
+struct iscsit_session;
+
extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
- struct iscsi_session *, u16);
-extern void iscsit_free_connection_recovery_entires(struct iscsi_session *);
+ struct iscsit_session *, u16);
+extern void iscsit_free_connection_recovery_entries(struct iscsit_session *);
extern int iscsit_remove_active_connection_recovery_entry(
- struct iscsi_conn_recovery *, struct iscsi_session *);
-extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *,
- struct iscsi_session *);
+ struct iscsi_conn_recovery *, struct iscsit_session *);
+extern int iscsit_remove_cmd_from_connection_recovery(struct iscsit_cmd *,
+ struct iscsit_session *);
extern void iscsit_discard_cr_cmds_by_expstatsn(struct iscsi_conn_recovery *, u32);
-extern int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *);
-extern int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *);
-extern int iscsit_connection_recovery_transport_reset(struct iscsi_conn *);
+extern int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsit_conn *);
+extern int iscsit_prepare_cmds_for_reallegiance(struct iscsit_conn *);
+extern int iscsit_connection_recovery_transport_reset(struct iscsit_conn *);
#endif /*** ISCSI_TARGET_ERL2_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 3402241be87c..53aca059dc16 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1,39 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains the login functions used by the iSCSI Target driver.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
******************************************************************************/
+#include <linux/module.h>
#include <linux/string.h>
#include <linux/kthread.h>
-#include <linux/crypto.h>
+#include <linux/sched/signal.h>
#include <linux/idr.h>
+#include <linux/tcp.h> /* TCP_NODELAY */
+#include <net/ip.h>
+#include <net/ipv6.h> /* ipv6_addr_v4mapped() */
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include "iscsi_target_core.h"
-#include "iscsi_target_tq.h"
+#include <target/iscsi/iscsi_target_core.h>
+#include <target/iscsi/iscsi_target_stat.h>
#include "iscsi_target_device.h"
#include "iscsi_target_nego.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl2.h"
#include "iscsi_target_login.h"
-#include "iscsi_target_stat.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
@@ -41,7 +34,7 @@
#include <target/iscsi/iscsi_transport.h>
-static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
+static struct iscsi_login *iscsi_login_init_conn(struct iscsit_conn *conn)
{
struct iscsi_login *login;
@@ -50,6 +43,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
pr_err("Unable to allocate memory for struct iscsi_login.\n");
return NULL;
}
+ conn->login = login;
login->conn = conn;
login->first_request = 1;
@@ -65,44 +59,10 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
goto out_req_buf;
}
- conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
- if (!conn->conn_ops) {
- pr_err("Unable to allocate memory for"
- " struct iscsi_conn_ops.\n");
- goto out_rsp_buf;
- }
-
- init_waitqueue_head(&conn->queues_wq);
- INIT_LIST_HEAD(&conn->conn_list);
- INIT_LIST_HEAD(&conn->conn_cmd_list);
- INIT_LIST_HEAD(&conn->immed_queue_list);
- INIT_LIST_HEAD(&conn->response_queue_list);
- init_completion(&conn->conn_post_wait_comp);
- init_completion(&conn->conn_wait_comp);
- init_completion(&conn->conn_wait_rcfr_comp);
- init_completion(&conn->conn_waiting_on_uc_comp);
- init_completion(&conn->conn_logout_comp);
- init_completion(&conn->rx_half_close_comp);
- init_completion(&conn->tx_half_close_comp);
- spin_lock_init(&conn->cmd_lock);
- spin_lock_init(&conn->conn_usage_lock);
- spin_lock_init(&conn->immed_queue_lock);
- spin_lock_init(&conn->nopin_timer_lock);
- spin_lock_init(&conn->response_queue_lock);
- spin_lock_init(&conn->state_lock);
-
- if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
- pr_err("Unable to allocate conn->conn_cpumask\n");
- goto out_conn_ops;
- }
conn->conn_login = login;
return login;
-out_conn_ops:
- kfree(conn->conn_ops);
-out_rsp_buf:
- kfree(login->rsp_buf);
out_req_buf:
kfree(login->req_buf);
out_login:
@@ -110,39 +70,8 @@ out_login:
return NULL;
}
-/*
- * Used by iscsi_target_nego.c:iscsi_target_locate_portal() to setup
- * per struct iscsi_conn libcrypto contexts for crc32c and crc32-intel
- */
-int iscsi_login_setup_crypto(struct iscsi_conn *conn)
-{
- /*
- * Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts
- * which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback
- * to software 1x8 byte slicing from crc32c.ko
- */
- conn->conn_rx_hash.flags = 0;
- conn->conn_rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(conn->conn_rx_hash.tfm)) {
- pr_err("crypto_alloc_hash() failed for conn_rx_tfm\n");
- return -ENOMEM;
- }
-
- conn->conn_tx_hash.flags = 0;
- conn->conn_tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(conn->conn_tx_hash.tfm)) {
- pr_err("crypto_alloc_hash() failed for conn_tx_tfm\n");
- crypto_free_hash(conn->conn_rx_hash.tfm);
- return -ENOMEM;
- }
-
- return 0;
-}
-
static int iscsi_login_check_initiator_version(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
u8 version_max,
u8 version_min)
{
@@ -158,12 +87,12 @@ static int iscsi_login_check_initiator_version(
return 0;
}
-int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
+int iscsi_check_for_session_reinstatement(struct iscsit_conn *conn)
{
int sessiontype;
struct iscsi_param *initiatorname_param = NULL, *sessiontype_param = NULL;
struct iscsi_portal_group *tpg = conn->tpg;
- struct iscsi_session *sess = NULL, *sess_p = NULL;
+ struct iscsit_session *sess = NULL, *sess_p = NULL;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
struct se_session *se_sess, *se_sess_tmp;
@@ -187,6 +116,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
spin_lock(&sess_p->conn_lock);
if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
atomic_read(&sess_p->session_logout) ||
+ atomic_read(&sess_p->session_close) ||
(sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess_p->conn_lock);
continue;
@@ -196,6 +126,8 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
initiatorname_param->value) &&
(sess_p->sess_ops->SessionType == sessiontype))) {
atomic_set(&sess_p->session_reinstatement, 1);
+ atomic_set(&sess_p->session_fall_back_to_erl0, 1);
+ atomic_set(&sess_p->session_close, 1);
spin_unlock(&sess_p->conn_lock);
iscsit_inc_session_usage_count(sess_p);
iscsit_stop_time2retain_timer(sess_p);
@@ -212,7 +144,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
return 0;
pr_debug("%s iSCSI Session SID %u is still active for %s,"
- " preforming session reinstatement.\n", (sessiontype) ?
+ " performing session reinstatement.\n", (sessiontype) ?
"Discovery" : "Normal", sess->sid,
sess->sess_ops->InitiatorName);
@@ -220,7 +152,6 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
if (sess->session_state == TARG_SESS_STATE_FAILED) {
spin_unlock_bh(&sess->conn_lock);
iscsit_dec_session_usage_count(sess);
- target_put_session(sess->se_sess);
return 0;
}
spin_unlock_bh(&sess->conn_lock);
@@ -228,41 +159,67 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
iscsit_stop_session(sess, 1, 1);
iscsit_dec_session_usage_count(sess);
- target_put_session(sess->se_sess);
return 0;
}
-static void iscsi_login_set_conn_values(
- struct iscsi_session *sess,
- struct iscsi_conn *conn,
+static int iscsi_login_set_conn_values(
+ struct iscsit_session *sess,
+ struct iscsit_conn *conn,
__be16 cid)
{
+ int ret;
conn->sess = sess;
conn->cid = be16_to_cpu(cid);
/*
* Generate a random Status sequence number (statsn) for the new
* iSCSI connection.
*/
- get_random_bytes(&conn->stat_sn, sizeof(u32));
+ ret = get_random_bytes_wait(&conn->stat_sn, sizeof(u32));
+ if (unlikely(ret))
+ return ret;
mutex_lock(&auth_id_lock);
conn->auth_id = iscsit_global->auth_id++;
mutex_unlock(&auth_id_lock);
+ return 0;
}
+__printf(2, 3) int iscsi_change_param_sprintf(
+ struct iscsit_conn *conn,
+ const char *fmt, ...)
+{
+ va_list args;
+ unsigned char buf[64];
+
+ memset(buf, 0, sizeof buf);
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof buf, fmt, args);
+ va_end(args);
+
+ if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(iscsi_change_param_sprintf);
+
/*
* This is the leading connection of a new session,
* or session reinstatement.
*/
static int iscsi_login_zero_tsih_s1(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
unsigned char *buf)
{
- struct iscsi_session *sess = NULL;
+ struct iscsit_session *sess = NULL;
struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
int ret;
- sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
+ sess = kzalloc(sizeof(struct iscsit_session), GFP_KERNEL);
if (!sess) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -270,7 +227,9 @@ static int iscsi_login_zero_tsih_s1(
return -ENOMEM;
}
- iscsi_login_set_conn_values(sess, conn, pdu->cid);
+ if (iscsi_login_set_conn_values(sess, conn, pdu->cid))
+ goto free_sess;
+
sess->init_task_tag = pdu->itt;
memcpy(&sess->isid, pdu->isid, 6);
sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn);
@@ -289,29 +248,24 @@ static int iscsi_login_zero_tsih_s1(
spin_lock_init(&sess->session_usage_lock);
spin_lock_init(&sess->ttt_lock);
- idr_preload(GFP_KERNEL);
- spin_lock_bh(&sess_idr_lock);
- ret = idr_alloc(&sess_idr, NULL, 0, 0, GFP_NOWAIT);
- if (ret >= 0)
- sess->session_index = ret;
- spin_unlock_bh(&sess_idr_lock);
- idr_preload_end();
+ timer_setup(&sess->time2retain_timer,
+ iscsit_handle_time2retain_timeout, 0);
+ ret = ida_alloc(&sess_ida, GFP_KERNEL);
if (ret < 0) {
- pr_err("idr_alloc() for sess_idr failed\n");
+ pr_err("Session ID allocation failed %d\n", ret);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- kfree(sess);
- return -ENOMEM;
+ goto free_sess;
}
+ sess->session_index = ret;
sess->creation_time = get_jiffies_64();
- spin_lock_init(&sess->session_stats_lock);
/*
* The FFP CmdSN window values will be allocated from the TPG's
* Initiator Node's ACL once the login has been successfully completed.
*/
- sess->max_cmd_sn = be32_to_cpu(pdu->cmdsn);
+ atomic_set(&sess->max_cmd_sn, be32_to_cpu(pdu->cmdsn));
sess->sess_ops = kzalloc(sizeof(struct iscsi_sess_ops), GFP_KERNEL);
if (!sess->sess_ops) {
@@ -319,27 +273,34 @@ static int iscsi_login_zero_tsih_s1(
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Unable to allocate memory for"
" struct iscsi_sess_ops.\n");
- kfree(sess);
- return -ENOMEM;
+ goto free_id;
}
- sess->se_sess = transport_init_session();
+ sess->se_sess = transport_alloc_session(TARGET_PROT_NORMAL);
if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- kfree(sess);
- return -ENOMEM;
+ goto free_ops;
}
return 0;
+
+free_ops:
+ kfree(sess->sess_ops);
+free_id:
+ ida_free(&sess_ida, sess->session_index);
+free_sess:
+ kfree(sess);
+ conn->sess = NULL;
+ return -ENOMEM;
}
static int iscsi_login_zero_tsih_s2(
- struct iscsi_conn *conn)
+ struct iscsit_conn *conn)
{
struct iscsi_node_attrib *na;
- struct iscsi_session *sess = conn->sess;
- unsigned char buf[32];
+ struct iscsit_session *sess = conn->sess;
+ struct iscsi_param *param;
bool iser = false;
sess->tpg = conn->tpg;
@@ -348,15 +309,15 @@ static int iscsi_login_zero_tsih_s2(
* Assign a new TPG Session Handle. Note this is protected with
* struct iscsi_portal_group->np_login_sem from iscsit_access_np().
*/
- sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
+ sess->tsih = ++sess->tpg->ntsih;
if (!sess->tsih)
- sess->tsih = ++ISCSI_TPG_S(sess)->ntsih;
+ sess->tsih = ++sess->tpg->ntsih;
/*
* Create the default params from user defined values..
*/
if (iscsi_copy_param_list(&conn->param_list,
- ISCSI_TPG_C(conn)->param_list, 1) < 0) {
+ conn->tpg->param_list, 1) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
@@ -374,35 +335,35 @@ static int iscsi_login_zero_tsih_s2(
na = iscsit_tpg_get_node_attrib(sess);
/*
+ * If ACL allows non-authorized access in TPG with CHAP,
+ * then set None to AuthMethod.
+ */
+ param = iscsi_find_param_from_key(AUTHMETHOD, conn->param_list);
+ if (param && !strstr(param->value, NONE)) {
+ if (!iscsi_conn_auth_required(conn))
+ if (iscsi_change_param_sprintf(conn, "AuthMethod=%s",
+ NONE))
+ return -1;
+ }
+
+ /*
* Need to send TargetPortalGroupTag back in first login response
* on any iSCSI connection where the Initiator provides TargetName.
* See 5.3.1. Login Phase Start
*
* In our case, we have already located the struct iscsi_tiqn at this point.
*/
- memset(buf, 0, 32);
- sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
- if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))
return -1;
- }
/*
* Workaround for Initiators that have broken connection recovery logic.
*
* "We would really like to get rid of this." Linux-iSCSI.org team
*/
- memset(buf, 0, 32);
- sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl);
- if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ if (iscsi_change_param_sprintf(conn, "ErrorRecoveryLevel=%d", na->default_erl))
return -1;
- }
- if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0)
- return -1;
/*
* Set RDMAExtensions=Yes by default for iSER enabled network portals
*/
@@ -411,15 +372,12 @@ static int iscsi_login_zero_tsih_s2(
unsigned long mrdsl, off;
int rc;
- sprintf(buf, "RDMAExtensions=Yes");
- if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ if (iscsi_change_param_sprintf(conn, "RDMAExtensions=Yes"))
return -1;
- }
+
/*
* Make MaxRecvDataSegmentLength PAGE_SIZE aligned for
- * Immediate Data + Unsolicitied Data-OUT if necessary..
+ * Immediate Data + Unsolicited Data-OUT if necessary..
*/
param = iscsi_find_param_from_key("MaxRecvDataSegmentLength",
conn->param_list);
@@ -428,7 +386,7 @@ static int iscsi_login_zero_tsih_s2(
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
}
- rc = strict_strtoul(param->value, 0, &mrdsl);
+ rc = kstrtoul(param->value, 0, &mrdsl);
if (rc < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -436,7 +394,7 @@ static int iscsi_login_zero_tsih_s2(
}
off = mrdsl % PAGE_SIZE;
if (!off)
- return 0;
+ goto check_prot;
if (mrdsl < PAGE_SIZE)
mrdsl = PAGE_SIZE;
@@ -446,89 +404,49 @@ static int iscsi_login_zero_tsih_s2(
pr_warn("Aligning ISER MaxRecvDataSegmentLength: %lu down"
" to PAGE_SIZE\n", mrdsl);
- sprintf(buf, "MaxRecvDataSegmentLength=%lu\n", mrdsl);
- if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ if (iscsi_change_param_sprintf(conn, "MaxRecvDataSegmentLength=%lu\n", mrdsl))
return -1;
- }
- }
-
- return 0;
-}
-
-/*
- * Remove PSTATE_NEGOTIATE for the four FIM related keys.
- * The Initiator node will be able to enable FIM by proposing them itself.
- */
-int iscsi_login_disable_FIM_keys(
- struct iscsi_param_list *param_list,
- struct iscsi_conn *conn)
-{
- struct iscsi_param *param;
-
- param = iscsi_find_param_from_key("OFMarker", param_list);
- if (!param) {
- pr_err("iscsi_find_param_from_key() for"
- " OFMarker failed\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
- }
- param->state &= ~PSTATE_NEGOTIATE;
+ /*
+ * ISER currently requires that ImmediateData + Unsolicited
+ * Data be disabled when protection / signature MRs are enabled.
+ */
+check_prot:
+ if (sess->se_sess->sup_prot_ops &
+ (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS |
+ TARGET_PROT_DOUT_INSERT)) {
- param = iscsi_find_param_from_key("OFMarkInt", param_list);
- if (!param) {
- pr_err("iscsi_find_param_from_key() for"
- " IFMarker failed\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
- }
- param->state &= ~PSTATE_NEGOTIATE;
+ if (iscsi_change_param_sprintf(conn, "ImmediateData=No"))
+ return -1;
- param = iscsi_find_param_from_key("IFMarker", param_list);
- if (!param) {
- pr_err("iscsi_find_param_from_key() for"
- " IFMarker failed\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
- }
- param->state &= ~PSTATE_NEGOTIATE;
+ if (iscsi_change_param_sprintf(conn, "InitialR2T=Yes"))
+ return -1;
- param = iscsi_find_param_from_key("IFMarkInt", param_list);
- if (!param) {
- pr_err("iscsi_find_param_from_key() for"
- " IFMarker failed\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
+ pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for"
+ " T10-PI enabled ISER session\n");
+ }
}
- param->state &= ~PSTATE_NEGOTIATE;
return 0;
}
static int iscsi_login_non_zero_tsih_s1(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
unsigned char *buf)
{
struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
- iscsi_login_set_conn_values(NULL, conn, pdu->cid);
- return 0;
+ return iscsi_login_set_conn_values(NULL, conn, pdu->cid);
}
/*
* Add a new connection to an existing session.
*/
static int iscsi_login_non_zero_tsih_s2(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
unsigned char *buf)
{
struct iscsi_portal_group *tpg = conn->tpg;
- struct iscsi_session *sess = NULL, *sess_p = NULL;
+ struct iscsit_session *sess = NULL, *sess_p = NULL;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
struct se_session *se_sess, *se_sess_tmp;
struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
@@ -538,9 +456,10 @@ static int iscsi_login_non_zero_tsih_s2(
list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
sess_list) {
- sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess_p = (struct iscsit_session *)se_sess->fabric_sess_ptr;
if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
atomic_read(&sess_p->session_logout) ||
+ atomic_read(&sess_p->session_close) ||
(sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
continue;
if (!memcmp(sess_p->isid, pdu->isid, 6) &&
@@ -573,10 +492,9 @@ static int iscsi_login_non_zero_tsih_s2(
atomic_set(&sess->session_continuation, 1);
spin_unlock_bh(&sess->conn_lock);
- iscsi_login_set_conn_values(sess, conn, pdu->cid);
-
- if (iscsi_copy_param_list(&conn->param_list,
- ISCSI_TPG_C(conn)->param_list, 0) < 0) {
+ if (iscsi_login_set_conn_values(sess, conn, pdu->cid) < 0 ||
+ iscsi_copy_param_list(&conn->param_list,
+ conn->tpg->param_list, 0) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
@@ -593,25 +511,20 @@ static int iscsi_login_non_zero_tsih_s2(
*
* In our case, we have already located the struct iscsi_tiqn at this point.
*/
- memset(buf, 0, 32);
- sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
- if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
- ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))
return -1;
- }
- return iscsi_login_disable_FIM_keys(conn->param_list, conn);
+ return 0;
}
int iscsi_login_post_auth_non_zero_tsih(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
u16 cid,
u32 exp_statsn)
{
- struct iscsi_conn *conn_ptr = NULL;
+ struct iscsit_conn *conn_ptr = NULL;
struct iscsi_conn_recovery *cr = NULL;
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_session *sess = conn->sess;
/*
* By following item 5 in the login table, if we have found
@@ -631,7 +544,7 @@ int iscsi_login_post_auth_non_zero_tsih(
}
/*
- * Check for any connection recovery entires containing CID.
+ * Check for any connection recovery entries containing CID.
* We use the original ExpStatSN sent in the first login request
* to acknowledge commands for the failed connection.
*
@@ -671,11 +584,11 @@ int iscsi_login_post_auth_non_zero_tsih(
return 0;
}
-static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
+static void iscsi_post_login_start_timers(struct iscsit_conn *conn)
{
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_session *sess = conn->sess;
/*
- * FIXME: Unsolicitied NopIN support for ISER
+ * FIXME: Unsolicited NopIN support for ISER
*/
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
return;
@@ -684,17 +597,62 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
iscsit_start_nopin_timer(conn);
}
-static int iscsi_post_login_handler(
+int iscsit_start_kthreads(struct iscsit_conn *conn)
+{
+ int ret = 0;
+
+ spin_lock(&iscsit_global->ts_bitmap_lock);
+ conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
+ ISCSIT_BITMAP_BITS, get_order(1));
+ spin_unlock(&iscsit_global->ts_bitmap_lock);
+
+ if (conn->bitmap_id < 0) {
+ pr_err("bitmap_find_free_region() failed for"
+ " iscsit_start_kthreads()\n");
+ return -ENOMEM;
+ }
+
+ conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn,
+ "%s", ISCSI_TX_THREAD_NAME);
+ if (IS_ERR(conn->tx_thread)) {
+ pr_err("Unable to start iscsi_target_tx_thread\n");
+ ret = PTR_ERR(conn->tx_thread);
+ goto out_bitmap;
+ }
+ conn->tx_thread_active = true;
+
+ conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn,
+ "%s", ISCSI_RX_THREAD_NAME);
+ if (IS_ERR(conn->rx_thread)) {
+ pr_err("Unable to start iscsi_target_rx_thread\n");
+ ret = PTR_ERR(conn->rx_thread);
+ goto out_tx;
+ }
+ conn->rx_thread_active = true;
+
+ return 0;
+out_tx:
+ send_sig(SIGINT, conn->tx_thread, 1);
+ kthread_stop(conn->tx_thread);
+ conn->tx_thread_active = false;
+out_bitmap:
+ spin_lock(&iscsit_global->ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+ get_order(1));
+ spin_unlock(&iscsit_global->ts_bitmap_lock);
+ return ret;
+}
+
+void iscsi_post_login_handler(
struct iscsi_np *np,
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
u8 zero_tsih)
{
int stop_timer = 0;
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_session *sess = conn->sess;
struct se_session *se_sess = sess->se_sess;
- struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
+ struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
- struct iscsi_thread_set *ts;
iscsit_inc_conn_usage_count(conn);
@@ -705,11 +663,9 @@ static int iscsi_post_login_handler(
conn->conn_state = TARG_CONN_STATE_LOGGED_IN;
iscsi_set_connection_parameters(conn->conn_ops, conn->param_list);
- iscsit_set_sync_and_steering_values(conn);
/*
* SCSI Initiator -> SCSI Target Port Mapping
*/
- ts = iscsi_get_thread_set();
if (!zero_tsih) {
iscsi_set_session_parameters(sess->sess_ops,
conn->param_list, 0);
@@ -725,20 +681,18 @@ static int iscsi_post_login_handler(
stop_timer = 1;
}
- pr_debug("iSCSI Login successful on CID: %hu from %s to"
- " %s:%hu,%hu\n", conn->cid, conn->login_ip,
- conn->local_ip, conn->local_port, tpg->tpgt);
+ pr_debug("iSCSI Login successful on CID: %hu from %pISpc to"
+ " %pISpc,%hu\n", conn->cid, &conn->login_sockaddr,
+ &conn->local_sockaddr, tpg->tpgt);
list_add_tail(&conn->conn_list, &sess->sess_conn_list);
atomic_inc(&sess->nconn);
- pr_debug("Incremented iSCSI Connection count to %hu"
+ pr_debug("Incremented iSCSI Connection count to %d"
" from node: %s\n", atomic_read(&sess->nconn),
sess->sess_ops->InitiatorName);
spin_unlock_bh(&sess->conn_lock);
iscsi_post_login_start_timers(conn);
-
- iscsi_activate_thread_set(conn, ts);
/*
* Determine CPU mask to ensure connection's RX and TX kthreads
* are scheduled on the same CPU.
@@ -746,15 +700,20 @@ static int iscsi_post_login_handler(
iscsit_thread_get_cpumask(conn);
conn->conn_rx_reset_cpumask = 1;
conn->conn_tx_reset_cpumask = 1;
-
+ /*
+ * Wakeup the sleeping iscsi_target_rx_thread() now that
+ * iscsit_conn is in TARG_CONN_STATE_LOGGED_IN state.
+ */
+ complete(&conn->rx_login_comp);
iscsit_dec_conn_usage_count(conn);
+
if (stop_timer) {
spin_lock_bh(&se_tpg->session_lock);
iscsit_stop_time2retain_timer(sess);
spin_unlock_bh(&se_tpg->session_lock);
}
iscsit_dec_session_usage_count(sess);
- return 0;
+ return;
}
iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
@@ -769,14 +728,14 @@ static int iscsi_post_login_handler(
pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
sess->session_state = TARG_SESS_STATE_LOGGED_IN;
- pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n",
- conn->cid, conn->login_ip, conn->local_ip, conn->local_port,
+ pr_debug("iSCSI Login successful on CID: %hu from %pISpc to %pISpc,%hu\n",
+ conn->cid, &conn->login_sockaddr, &conn->local_sockaddr,
tpg->tpgt);
spin_lock_bh(&sess->conn_lock);
list_add_tail(&conn->conn_list, &sess->sess_conn_list);
atomic_inc(&sess->nconn);
- pr_debug("Incremented iSCSI Connection count to %hu from node:"
+ pr_debug("Incremented iSCSI Connection count to %d from node:"
" %s\n", atomic_read(&sess->nconn),
sess->sess_ops->InitiatorName);
spin_unlock_bh(&sess->conn_lock);
@@ -796,7 +755,6 @@ static int iscsi_post_login_handler(
spin_unlock_bh(&se_tpg->session_lock);
iscsi_post_login_start_timers(conn);
- iscsi_activate_thread_set(conn, ts);
/*
* Determine CPU mask to ensure connection's RX and TX kthreads
* are scheduled on the same CPU.
@@ -804,75 +762,20 @@ static int iscsi_post_login_handler(
iscsit_thread_get_cpumask(conn);
conn->conn_rx_reset_cpumask = 1;
conn->conn_tx_reset_cpumask = 1;
-
- iscsit_dec_conn_usage_count(conn);
-
- return 0;
-}
-
-static void iscsi_handle_login_thread_timeout(unsigned long data)
-{
- struct iscsi_np *np = (struct iscsi_np *) data;
-
- spin_lock_bh(&np->np_thread_lock);
- pr_err("iSCSI Login timeout on Network Portal %s:%hu\n",
- np->np_ip, np->np_port);
-
- if (np->np_login_timer_flags & ISCSI_TF_STOP) {
- spin_unlock_bh(&np->np_thread_lock);
- return;
- }
-
- if (np->np_thread)
- send_sig(SIGINT, np->np_thread, 1);
-
- np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
- spin_unlock_bh(&np->np_thread_lock);
-}
-
-static void iscsi_start_login_thread_timer(struct iscsi_np *np)
-{
/*
- * This used the TA_LOGIN_TIMEOUT constant because at this
- * point we do not have access to ISCSI_TPG_ATTRIB(tpg)->login_timeout
+ * Wakeup the sleeping iscsi_target_rx_thread() now that
+ * iscsit_conn is in TARG_CONN_STATE_LOGGED_IN state.
*/
- spin_lock_bh(&np->np_thread_lock);
- init_timer(&np->np_login_timer);
- np->np_login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ);
- np->np_login_timer.data = (unsigned long)np;
- np->np_login_timer.function = iscsi_handle_login_thread_timeout;
- np->np_login_timer_flags &= ~ISCSI_TF_STOP;
- np->np_login_timer_flags |= ISCSI_TF_RUNNING;
- add_timer(&np->np_login_timer);
-
- pr_debug("Added timeout timer to iSCSI login request for"
- " %u seconds.\n", TA_LOGIN_TIMEOUT);
- spin_unlock_bh(&np->np_thread_lock);
-}
-
-static void iscsi_stop_login_thread_timer(struct iscsi_np *np)
-{
- spin_lock_bh(&np->np_thread_lock);
- if (!(np->np_login_timer_flags & ISCSI_TF_RUNNING)) {
- spin_unlock_bh(&np->np_thread_lock);
- return;
- }
- np->np_login_timer_flags |= ISCSI_TF_STOP;
- spin_unlock_bh(&np->np_thread_lock);
-
- del_timer_sync(&np->np_login_timer);
-
- spin_lock_bh(&np->np_thread_lock);
- np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
- spin_unlock_bh(&np->np_thread_lock);
+ complete(&conn->rx_login_comp);
+ iscsit_dec_conn_usage_count(conn);
}
int iscsit_setup_np(
struct iscsi_np *np,
- struct __kernel_sockaddr_storage *sockaddr)
+ struct sockaddr_storage *sockaddr)
{
struct socket *sock = NULL;
- int backlog = 5, ret, opt = 0, len;
+ int backlog = ISCSIT_TCP_BACKLOG, ret, len;
switch (np->np_network_transport) {
case ISCSI_TCP:
@@ -893,9 +796,6 @@ int iscsit_setup_np(
return -EINVAL;
}
- np->np_ip_proto = IPPROTO_TCP;
- np->np_sock_type = SOCK_STREAM;
-
ret = sock_create(sockaddr->ss_family, np->np_sock_type,
np->np_ip_proto, &sock);
if (ret < 0) {
@@ -908,45 +808,21 @@ int iscsit_setup_np(
* in iscsi_target_configfs.c code..
*/
memcpy(&np->np_sockaddr, sockaddr,
- sizeof(struct __kernel_sockaddr_storage));
+ sizeof(struct sockaddr_storage));
if (sockaddr->ss_family == AF_INET6)
len = sizeof(struct sockaddr_in6);
else
len = sizeof(struct sockaddr_in);
/*
- * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
+ * Set SO_REUSEADDR, and disable Nagle Algorithm with TCP_NODELAY.
*/
- /* FIXME: Someone please explain why this is endian-safe */
- opt = 1;
- if (np->np_network_transport == ISCSI_TCP) {
- ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
- (char *)&opt, sizeof(opt));
- if (ret < 0) {
- pr_err("kernel_setsockopt() for TCP_NODELAY"
- " failed: %d\n", ret);
- goto fail;
- }
- }
+ if (np->np_network_transport == ISCSI_TCP)
+ tcp_sock_set_nodelay(sock->sk);
+ sock_set_reuseaddr(sock->sk);
+ ip_sock_set_freebind(sock->sk);
- /* FIXME: Someone please explain why this is endian-safe */
- ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
- (char *)&opt, sizeof(opt));
- if (ret < 0) {
- pr_err("kernel_setsockopt() for SO_REUSEADDR"
- " failed\n");
- goto fail;
- }
-
- ret = kernel_setsockopt(sock, IPPROTO_IP, IP_FREEBIND,
- (char *)&opt, sizeof(opt));
- if (ret < 0) {
- pr_err("kernel_setsockopt() for IP_FREEBIND"
- " failed\n");
- goto fail;
- }
-
- ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len);
+ ret = kernel_bind(sock, (struct sockaddr_unsized *)&np->np_sockaddr, len);
if (ret < 0) {
pr_err("kernel_bind() failed: %d\n", ret);
goto fail;
@@ -961,14 +837,13 @@ int iscsit_setup_np(
return 0;
fail:
np->np_socket = NULL;
- if (sock)
- sock_release(sock);
+ sock_release(sock);
return ret;
}
int iscsi_target_setup_login_socket(
struct iscsi_np *np,
- struct __kernel_sockaddr_storage *sockaddr)
+ struct sockaddr_storage *sockaddr)
{
struct iscsit_transport *t;
int rc;
@@ -984,15 +859,16 @@ int iscsi_target_setup_login_socket(
}
np->np_transport = t;
+ np->enabled = true;
return 0;
}
-int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
+int iscsit_accept_np(struct iscsi_np *np, struct iscsit_conn *conn)
{
struct socket *new_sock, *sock = np->np_socket;
struct sockaddr_in sock_in;
struct sockaddr_in6 sock_in6;
- int rc, err;
+ int rc;
rc = kernel_accept(sock, &new_sock, 0);
if (rc < 0)
@@ -1005,44 +881,50 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
rc = conn->sock->ops->getname(conn->sock,
- (struct sockaddr *)&sock_in6, &err, 1);
- if (!rc) {
- snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
- &sock_in6.sin6_addr.in6_u);
- conn->login_port = ntohs(sock_in6.sin6_port);
+ (struct sockaddr *)&sock_in6, 1);
+ if (rc >= 0) {
+ if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) {
+ memcpy(&conn->login_sockaddr, &sock_in6, sizeof(sock_in6));
+ } else {
+ /* Pretend to be an ipv4 socket */
+ sock_in.sin_family = AF_INET;
+ sock_in.sin_port = sock_in6.sin6_port;
+ memcpy(&sock_in.sin_addr, &sock_in6.sin6_addr.s6_addr32[3], 4);
+ memcpy(&conn->login_sockaddr, &sock_in, sizeof(sock_in));
+ }
}
rc = conn->sock->ops->getname(conn->sock,
- (struct sockaddr *)&sock_in6, &err, 0);
- if (!rc) {
- snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
- &sock_in6.sin6_addr.in6_u);
- conn->local_port = ntohs(sock_in6.sin6_port);
+ (struct sockaddr *)&sock_in6, 0);
+ if (rc >= 0) {
+ if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) {
+ memcpy(&conn->local_sockaddr, &sock_in6, sizeof(sock_in6));
+ } else {
+ /* Pretend to be an ipv4 socket */
+ sock_in.sin_family = AF_INET;
+ sock_in.sin_port = sock_in6.sin6_port;
+ memcpy(&sock_in.sin_addr, &sock_in6.sin6_addr.s6_addr32[3], 4);
+ memcpy(&conn->local_sockaddr, &sock_in, sizeof(sock_in));
+ }
}
} else {
memset(&sock_in, 0, sizeof(struct sockaddr_in));
rc = conn->sock->ops->getname(conn->sock,
- (struct sockaddr *)&sock_in, &err, 1);
- if (!rc) {
- sprintf(conn->login_ip, "%pI4",
- &sock_in.sin_addr.s_addr);
- conn->login_port = ntohs(sock_in.sin_port);
- }
+ (struct sockaddr *)&sock_in, 1);
+ if (rc >= 0)
+ memcpy(&conn->login_sockaddr, &sock_in, sizeof(sock_in));
rc = conn->sock->ops->getname(conn->sock,
- (struct sockaddr *)&sock_in, &err, 0);
- if (!rc) {
- sprintf(conn->local_ip, "%pI4",
- &sock_in.sin_addr.s_addr);
- conn->local_port = ntohs(sock_in.sin_port);
- }
+ (struct sockaddr *)&sock_in, 0);
+ if (rc >= 0)
+ memcpy(&conn->local_sockaddr, &sock_in, sizeof(sock_in));
}
return 0;
}
-int iscsit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
+int iscsit_get_login_rx(struct iscsit_conn *conn, struct iscsi_login *login)
{
struct iscsi_login_req *login_req;
u32 padding = 0, payload_length;
@@ -1087,7 +969,7 @@ int iscsit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
return 0;
}
-int iscsit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
+int iscsit_put_login_tx(struct iscsit_conn *conn, struct iscsi_login *login,
u32 length)
{
if (iscsi_login_tx_data(conn, login->rsp, login->rsp_buf, length) < 0)
@@ -1097,7 +979,7 @@ int iscsit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
}
static int
-iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
+iscsit_conn_set_transport(struct iscsit_conn *conn, struct iscsit_transport *t)
{
int rc;
@@ -1116,67 +998,203 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
return 0;
}
+static struct iscsit_conn *iscsit_alloc_conn(struct iscsi_np *np)
+{
+ struct iscsit_conn *conn;
+
+ conn = kzalloc(sizeof(struct iscsit_conn), GFP_KERNEL);
+ if (!conn) {
+ pr_err("Could not allocate memory for new connection\n");
+ return NULL;
+ }
+ pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+ conn->conn_state = TARG_CONN_STATE_FREE;
+
+ init_waitqueue_head(&conn->queues_wq);
+ INIT_LIST_HEAD(&conn->conn_list);
+ INIT_LIST_HEAD(&conn->conn_cmd_list);
+ INIT_LIST_HEAD(&conn->immed_queue_list);
+ INIT_LIST_HEAD(&conn->response_queue_list);
+ init_completion(&conn->conn_post_wait_comp);
+ init_completion(&conn->conn_wait_comp);
+ init_completion(&conn->conn_wait_rcfr_comp);
+ init_completion(&conn->conn_waiting_on_uc_comp);
+ init_completion(&conn->conn_logout_comp);
+ init_completion(&conn->rx_half_close_comp);
+ init_completion(&conn->tx_half_close_comp);
+ init_completion(&conn->rx_login_comp);
+ spin_lock_init(&conn->cmd_lock);
+ spin_lock_init(&conn->conn_usage_lock);
+ spin_lock_init(&conn->immed_queue_lock);
+ spin_lock_init(&conn->nopin_timer_lock);
+ spin_lock_init(&conn->response_queue_lock);
+ spin_lock_init(&conn->state_lock);
+ spin_lock_init(&conn->login_worker_lock);
+ spin_lock_init(&conn->login_timer_lock);
+
+ timer_setup(&conn->nopin_response_timer,
+ iscsit_handle_nopin_response_timeout, 0);
+ timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
+ timer_setup(&conn->login_timer, iscsit_login_timeout, 0);
+
+ if (iscsit_conn_set_transport(conn, np->np_transport) < 0)
+ goto free_conn;
+
+ conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
+ if (!conn->conn_ops) {
+ pr_err("Unable to allocate memory for struct iscsi_conn_ops.\n");
+ goto put_transport;
+ }
+
+ if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
+ pr_err("Unable to allocate conn->conn_cpumask\n");
+ goto free_conn_ops;
+ }
+
+ if (!zalloc_cpumask_var(&conn->allowed_cpumask, GFP_KERNEL)) {
+ pr_err("Unable to allocate conn->allowed_cpumask\n");
+ goto free_conn_cpumask;
+ }
+
+ conn->cmd_cnt = target_alloc_cmd_counter();
+ if (!conn->cmd_cnt)
+ goto free_conn_allowed_cpumask;
+
+ return conn;
+
+free_conn_allowed_cpumask:
+ free_cpumask_var(conn->allowed_cpumask);
+free_conn_cpumask:
+ free_cpumask_var(conn->conn_cpumask);
+free_conn_ops:
+ kfree(conn->conn_ops);
+put_transport:
+ iscsit_put_transport(conn->conn_transport);
+free_conn:
+ kfree(conn);
+ return NULL;
+}
+
+void iscsit_free_conn(struct iscsit_conn *conn)
+{
+ target_free_cmd_counter(conn->cmd_cnt);
+ free_cpumask_var(conn->allowed_cpumask);
+ free_cpumask_var(conn->conn_cpumask);
+ kfree(conn->conn_ops);
+ iscsit_put_transport(conn->conn_transport);
+ kfree(conn);
+}
+
+void iscsi_target_login_sess_out(struct iscsit_conn *conn,
+ bool zero_tsih, bool new_sess)
+{
+ if (!new_sess)
+ goto old_sess_out;
+
+ pr_err("iSCSI Login negotiation failed.\n");
+ iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_INIT_ERR);
+ if (!zero_tsih || !conn->sess)
+ goto old_sess_out;
+
+ transport_free_session(conn->sess->se_sess);
+ ida_free(&sess_ida, conn->sess->session_index);
+ kfree(conn->sess->sess_ops);
+ kfree(conn->sess);
+ conn->sess = NULL;
+
+old_sess_out:
+ /*
+ * If login negotiation fails check if the Time2Retain timer
+ * needs to be restarted.
+ */
+ if (!zero_tsih && conn->sess) {
+ spin_lock_bh(&conn->sess->conn_lock);
+ if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
+ struct se_portal_group *se_tpg =
+ &conn->tpg->tpg_se_tpg;
+
+ atomic_set(&conn->sess->session_continuation, 0);
+ spin_unlock_bh(&conn->sess->conn_lock);
+ spin_lock_bh(&se_tpg->session_lock);
+ iscsit_start_time2retain_handler(conn->sess);
+ spin_unlock_bh(&se_tpg->session_lock);
+ } else
+ spin_unlock_bh(&conn->sess->conn_lock);
+ iscsit_dec_session_usage_count(conn->sess);
+ }
+
+ if (conn->param_list) {
+ iscsi_release_param_list(conn->param_list);
+ conn->param_list = NULL;
+ }
+ iscsi_target_nego_release(conn);
+
+ if (conn->sock) {
+ sock_release(conn->sock);
+ conn->sock = NULL;
+ }
+
+ if (conn->conn_transport->iscsit_wait_conn)
+ conn->conn_transport->iscsit_wait_conn(conn);
+
+ if (conn->conn_transport->iscsit_free_conn)
+ conn->conn_transport->iscsit_free_conn(conn);
+
+ iscsit_free_conn(conn);
+}
+
static int __iscsi_target_login_thread(struct iscsi_np *np)
{
u8 *buffer, zero_tsih = 0;
- int ret = 0, rc, stop;
- struct iscsi_conn *conn = NULL;
+ int ret = 0, rc;
+ struct iscsit_conn *conn = NULL;
struct iscsi_login *login;
struct iscsi_portal_group *tpg = NULL;
struct iscsi_login_req *pdu;
+ struct iscsi_tpg_np *tpg_np;
+ bool new_sess = false;
flush_signals(current);
spin_lock_bh(&np->np_thread_lock);
- if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+ spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp);
+ return 1;
+ } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
+ spin_unlock_bh(&np->np_thread_lock);
+ goto exit;
} else {
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
}
spin_unlock_bh(&np->np_thread_lock);
- conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+ conn = iscsit_alloc_conn(np);
if (!conn) {
- pr_err("Could not allocate memory for"
- " new connection\n");
/* Get another socket */
return 1;
}
- pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
- conn->conn_state = TARG_CONN_STATE_FREE;
-
- if (iscsit_conn_set_transport(conn, np->np_transport) < 0) {
- kfree(conn);
- return 1;
- }
rc = np->np_transport->iscsit_accept_np(np, conn);
if (rc == -ENOSYS) {
complete(&np->np_restart_comp);
- iscsit_put_transport(conn->conn_transport);
- kfree(conn);
- conn = NULL;
+ iscsit_free_conn(conn);
goto exit;
} else if (rc < 0) {
spin_lock_bh(&np->np_thread_lock);
- if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
+ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp);
- if (ret == -ENODEV) {
- iscsit_put_transport(conn->conn_transport);
- kfree(conn);
- conn = NULL;
- goto out;
- }
+ iscsit_free_conn(conn);
/* Get another socket */
return 1;
}
spin_unlock_bh(&np->np_thread_lock);
- iscsit_put_transport(conn->conn_transport);
- kfree(conn);
- conn = NULL;
- goto out;
+ iscsit_free_conn(conn);
+ return 1;
}
/*
* Perform the remaining iSCSI connection initialization items..
@@ -1186,7 +1204,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
goto new_sess_out;
}
- iscsi_start_login_thread_timer(np);
+ iscsit_start_login_timer(conn, current);
pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
conn->conn_state = TARG_CONN_STATE_XPT_UP;
@@ -1210,8 +1228,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
spin_unlock_bh(&np->np_thread_lock);
- pr_err("iSCSI Network Portal on %s:%hu currently not"
- " active.\n", np->np_ip, np->np_port);
+ pr_err("iSCSI Network Portal on %pISpc currently not"
+ " active.\n", &np->np_sockaddr);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
goto new_sess_out;
@@ -1220,9 +1238,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
conn->network_transport = np->np_network_transport;
- pr_debug("Received iSCSI login request from %s on %s Network"
- " Portal %s:%hu\n", conn->login_ip, np->np_transport->name,
- conn->local_ip, conn->local_port);
+ pr_debug("Received iSCSI login request from %pISpc on %s Network"
+ " Portal %pISpc\n", &conn->login_sockaddr, np->np_transport->name,
+ &conn->local_sockaddr);
pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
conn->conn_state = TARG_CONN_STATE_IN_LOGIN;
@@ -1265,10 +1283,15 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
tpg = conn->tpg;
goto new_sess_out;
}
+ login->zero_tsih = zero_tsih;
+
+ if (conn->sess)
+ conn->sess->se_sess->sup_prot_ops =
+ conn->conn_transport->iscsit_get_sup_prot_ops(conn);
tpg = conn->tpg;
if (!tpg) {
- pr_err("Unable to locate struct iscsi_conn->tpg\n");
+ pr_err("Unable to locate struct iscsit_conn->tpg\n");
goto new_sess_out;
}
@@ -1280,114 +1303,51 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
goto old_sess_out;
}
- if (iscsi_target_start_negotiation(login, conn) < 0)
- goto new_sess_out;
-
- if (!conn->sess) {
- pr_err("struct iscsi_conn session pointer is NULL!\n");
- goto new_sess_out;
+ if (conn->conn_transport->iscsit_validate_params) {
+ ret = conn->conn_transport->iscsit_validate_params(conn);
+ if (ret < 0) {
+ if (zero_tsih)
+ goto new_sess_out;
+ else
+ goto old_sess_out;
+ }
}
- iscsi_stop_login_thread_timer(np);
-
- if (signal_pending(current))
+ ret = iscsi_target_start_negotiation(login, conn);
+ if (ret < 0)
goto new_sess_out;
- ret = iscsi_post_login_handler(np, conn, zero_tsih);
+ if (ret == 1) {
+ tpg_np = conn->tpg_np;
- if (ret < 0)
- goto new_sess_out;
+ iscsi_post_login_handler(np, conn, zero_tsih);
+ iscsit_deaccess_np(np, tpg, tpg_np);
+ }
- iscsit_deaccess_np(np, tpg);
tpg = NULL;
+ tpg_np = NULL;
/* Get another socket */
return 1;
new_sess_out:
- pr_err("iSCSI Login negotiation failed.\n");
- iscsit_collect_login_stats(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
- ISCSI_LOGIN_STATUS_INIT_ERR);
- if (!zero_tsih || !conn->sess)
- goto old_sess_out;
- if (conn->sess->se_sess)
- transport_free_session(conn->sess->se_sess);
- if (conn->sess->session_index != 0) {
- spin_lock_bh(&sess_idr_lock);
- idr_remove(&sess_idr, conn->sess->session_index);
- spin_unlock_bh(&sess_idr_lock);
- }
- kfree(conn->sess->sess_ops);
- kfree(conn->sess);
+ new_sess = true;
old_sess_out:
- iscsi_stop_login_thread_timer(np);
- /*
- * If login negotiation fails check if the Time2Retain timer
- * needs to be restarted.
- */
- if (!zero_tsih && conn->sess) {
- spin_lock_bh(&conn->sess->conn_lock);
- if (conn->sess->session_state == TARG_SESS_STATE_FAILED) {
- struct se_portal_group *se_tpg =
- &ISCSI_TPG_C(conn)->tpg_se_tpg;
-
- atomic_set(&conn->sess->session_continuation, 0);
- spin_unlock_bh(&conn->sess->conn_lock);
- spin_lock_bh(&se_tpg->session_lock);
- iscsit_start_time2retain_handler(conn->sess);
- spin_unlock_bh(&se_tpg->session_lock);
- } else
- spin_unlock_bh(&conn->sess->conn_lock);
- iscsit_dec_session_usage_count(conn->sess);
- }
-
- if (!IS_ERR(conn->conn_rx_hash.tfm))
- crypto_free_hash(conn->conn_rx_hash.tfm);
- if (!IS_ERR(conn->conn_tx_hash.tfm))
- crypto_free_hash(conn->conn_tx_hash.tfm);
-
- if (conn->conn_cpumask)
- free_cpumask_var(conn->conn_cpumask);
-
- kfree(conn->conn_ops);
-
- if (conn->param_list) {
- iscsi_release_param_list(conn->param_list);
- conn->param_list = NULL;
- }
- iscsi_target_nego_release(conn);
-
- if (conn->sock) {
- sock_release(conn->sock);
- conn->sock = NULL;
- }
-
- if (conn->conn_transport->iscsit_free_conn)
- conn->conn_transport->iscsit_free_conn(conn);
-
- iscsit_put_transport(conn->conn_transport);
-
- kfree(conn);
+ iscsit_stop_login_timer(conn);
+ tpg_np = conn->tpg_np;
+ iscsi_target_login_sess_out(conn, zero_tsih, new_sess);
+ new_sess = false;
if (tpg) {
- iscsit_deaccess_np(np, tpg);
+ iscsit_deaccess_np(np, tpg, tpg_np);
tpg = NULL;
+ tpg_np = NULL;
}
-out:
- stop = kthread_should_stop();
- if (!stop && signal_pending(current)) {
- spin_lock_bh(&np->np_thread_lock);
- stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
- spin_unlock_bh(&np->np_thread_lock);
- }
- /* Wait for another socket.. */
- if (!stop)
- return 1;
+ return 1;
+
exit:
- iscsi_stop_login_thread_timer(np);
spin_lock_bh(&np->np_thread_lock);
np->np_thread_state = ISCSI_NP_THREAD_EXIT;
- np->np_thread = NULL;
spin_unlock_bh(&np->np_thread_lock);
return 0;
@@ -1400,7 +1360,7 @@ int iscsi_target_login_thread(void *arg)
allow_signal(SIGINT);
- while (!kthread_should_stop()) {
+ while (1) {
ret = __iscsi_target_login_thread(np);
/*
* We break and exit here unless another sock_accept() call
@@ -1410,5 +1370,9 @@ int iscsi_target_login_thread(void *arg)
break;
}
+ while (!kthread_should_stop()) {
+ msleep(100);
+ }
+
return 0;
}
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 63efd2878451..03c7d695d58f 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -1,18 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_LOGIN_H
#define ISCSI_TARGET_LOGIN_H
-extern int iscsi_login_setup_crypto(struct iscsi_conn *);
-extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
-extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
+#include <linux/types.h>
+
+struct iscsit_conn;
+struct iscsi_login;
+struct iscsi_np;
+struct sockaddr_storage;
+
+extern int iscsi_check_for_session_reinstatement(struct iscsit_conn *);
+extern int iscsi_login_post_auth_non_zero_tsih(struct iscsit_conn *, u16, u32);
extern int iscsit_setup_np(struct iscsi_np *,
- struct __kernel_sockaddr_storage *);
+ struct sockaddr_storage *);
extern int iscsi_target_setup_login_socket(struct iscsi_np *,
- struct __kernel_sockaddr_storage *);
-extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
-extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
-extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
-extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
+ struct sockaddr_storage *);
+extern int iscsit_accept_np(struct iscsi_np *, struct iscsit_conn *);
+extern int iscsit_get_login_rx(struct iscsit_conn *, struct iscsi_login *);
+extern int iscsit_put_login_tx(struct iscsit_conn *, struct iscsi_login *, u32);
+extern void iscsit_free_conn(struct iscsit_conn *);
+extern int iscsit_start_kthreads(struct iscsit_conn *);
+extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsit_conn *, u8);
+extern void iscsi_target_login_sess_out(struct iscsit_conn *, bool, bool);
extern int iscsi_target_login_thread(void *);
-extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *);
#endif /*** ISCSI_TARGET_LOGIN_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index c4675b4ceb49..832588f21f91 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -1,30 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains main functions related to iSCSI Parameter negotiation.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
******************************************************************************/
#include <linux/ctype.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/sched/signal.h>
+#include <net/sock.h>
+#include <trace/events/sock.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h>
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h"
#include "iscsi_target_login.h"
#include "iscsi_target_nego.h"
@@ -34,7 +29,6 @@
#include "iscsi_target_auth.h"
#define MAX_LOGIN_PDUS 7
-#define TEXT_LEN 4096
void convert_null_to_semi(char *buf, int len)
{
@@ -69,31 +63,34 @@ int extract_param(
int len;
if (!in_buf || !pattern || !out_buf || !type)
- return -1;
+ return -EINVAL;
ptr = strstr(in_buf, pattern);
if (!ptr)
- return -1;
+ return -ENOENT;
ptr = strstr(ptr, "=");
if (!ptr)
- return -1;
+ return -EINVAL;
ptr += 1;
if (*ptr == '0' && (*(ptr+1) == 'x' || *(ptr+1) == 'X')) {
ptr += 2; /* skip 0x */
*type = HEX;
+ } else if (*ptr == '0' && (*(ptr+1) == 'b' || *(ptr+1) == 'B')) {
+ ptr += 2; /* skip 0b */
+ *type = BASE64;
} else
*type = DECIMAL;
len = strlen_semi(ptr);
if (len < 0)
- return -1;
+ return -EINVAL;
- if (len > max_length) {
+ if (len >= max_length) {
pr_err("Length of input: %d exceeds max_length:"
" %d\n", len, max_length);
- return -1;
+ return -EINVAL;
}
memcpy(out_buf, ptr, len);
out_buf[len] = '\0';
@@ -101,55 +98,44 @@ int extract_param(
return 0;
}
+static struct iscsi_node_auth *iscsi_get_node_auth(struct iscsit_conn *conn)
+{
+ struct iscsi_portal_group *tpg;
+ struct iscsi_node_acl *nacl;
+ struct se_node_acl *se_nacl;
+
+ if (conn->sess->sess_ops->SessionType)
+ return &iscsit_global->discovery_acl.node_auth;
+
+ se_nacl = conn->sess->se_sess->se_node_acl;
+ if (!se_nacl) {
+ pr_err("Unable to locate struct se_node_acl for CHAP auth\n");
+ return NULL;
+ }
+
+ if (se_nacl->dynamic_node_acl) {
+ tpg = to_iscsi_tpg(se_nacl->se_tpg);
+ return &tpg->tpg_demo_auth;
+ }
+
+ nacl = to_iscsi_nacl(se_nacl);
+
+ return &nacl->node_auth;
+}
+
static u32 iscsi_handle_authentication(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
char *in_buf,
char *out_buf,
int in_length,
int *out_length,
unsigned char *authtype)
{
- struct iscsi_session *sess = conn->sess;
struct iscsi_node_auth *auth;
- struct iscsi_node_acl *iscsi_nacl;
- struct iscsi_portal_group *iscsi_tpg;
- struct se_node_acl *se_nacl;
- if (!sess->sess_ops->SessionType) {
- /*
- * For SessionType=Normal
- */
- se_nacl = conn->sess->se_sess->se_node_acl;
- if (!se_nacl) {
- pr_err("Unable to locate struct se_node_acl for"
- " CHAP auth\n");
- return -1;
- }
- iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
- se_node_acl);
- if (!iscsi_nacl) {
- pr_err("Unable to locate struct iscsi_node_acl for"
- " CHAP auth\n");
- return -1;
- }
-
- if (se_nacl->dynamic_node_acl) {
- iscsi_tpg = container_of(se_nacl->se_tpg,
- struct iscsi_portal_group, tpg_se_tpg);
-
- auth = &iscsi_tpg->tpg_demo_auth;
- } else {
- iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
- se_node_acl);
-
- auth = ISCSI_NODE_AUTH(iscsi_nacl);
- }
- } else {
- /*
- * For SessionType=Discovery
- */
- auth = &iscsit_global->discovery_acl.node_auth;
- }
+ auth = iscsi_get_node_auth(conn);
+ if (!auth)
+ return -1;
if (strstr("CHAP", authtype))
strcpy(conn->sess->auth_type, "CHAP");
@@ -158,31 +144,20 @@ static u32 iscsi_handle_authentication(
if (strstr("None", authtype))
return 1;
-#ifdef CANSRP
- else if (strstr("SRP", authtype))
- return srp_main_loop(conn, auth, in_buf, out_buf,
- &in_length, out_length);
-#endif
else if (strstr("CHAP", authtype))
return chap_main_loop(conn, auth, in_buf, out_buf,
&in_length, out_length);
- else if (strstr("SPKM1", authtype))
- return 2;
- else if (strstr("SPKM2", authtype))
- return 2;
- else if (strstr("KRB5", authtype))
- return 2;
- else
- return 2;
+ /* SRP, SPKM1, SPKM2 and KRB5 are unsupported */
+ return 2;
}
-static void iscsi_remove_failed_auth_entry(struct iscsi_conn *conn)
+static void iscsi_remove_failed_auth_entry(struct iscsit_conn *conn)
{
kfree(conn->auth_protocol);
}
int iscsi_target_check_login_request(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
struct iscsi_login *login)
{
int req_csg, req_nsg;
@@ -237,7 +212,7 @@ int iscsi_target_check_login_request(
if ((login_req->max_version != login->version_max) ||
(login_req->min_version != login->version_min)) {
- pr_err("Login request changed Version Max/Nin"
+ pr_err("Login request changed Version Max/Min"
" unexpectedly to 0x%02x/0x%02x, protocol error\n",
login_req->max_version, login_req->min_version);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
@@ -270,9 +245,10 @@ int iscsi_target_check_login_request(
return 0;
}
+EXPORT_SYMBOL(iscsi_target_check_login_request);
static int iscsi_target_check_first_request(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
struct iscsi_login *login)
{
struct iscsi_param *param = NULL;
@@ -339,10 +315,9 @@ static int iscsi_target_check_first_request(
return 0;
}
-static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+static int iscsi_target_do_tx_login_io(struct iscsit_conn *conn, struct iscsi_login *login)
{
u32 padding = 0;
- struct iscsi_session *sess = conn->sess;
struct iscsi_login_rsp *login_rsp;
login_rsp = (struct iscsi_login_rsp *) login->rsp;
@@ -354,7 +329,7 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
login_rsp->itt = login->init_task_tag;
login_rsp->statsn = cpu_to_be32(conn->stat_sn++);
login_rsp->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
- login_rsp->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
+ login_rsp->max_cmdsn = cpu_to_be32((u32) atomic_read(&conn->sess->max_cmd_sn));
pr_debug("Sending Login Response, Flags: 0x%02x, ITT: 0x%08x,"
" ExpCmdSN; 0x%08x, MaxCmdSN: 0x%08x, StatSN: 0x%08x, Length:"
@@ -363,29 +338,394 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
ntohl(login_rsp->statsn), login->rsp_length);
padding = ((-login->rsp_length) & 3);
+ /*
+ * Before sending the last login response containing the transition
+ * bit for full-feature-phase, go ahead and start up TX/RX threads
+ * now to avoid potential resource allocation failures after the
+ * final login response has been sent.
+ */
+ if (login->login_complete) {
+ int rc = iscsit_start_kthreads(conn);
+ if (rc) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ }
if (conn->conn_transport->iscsit_put_login_tx(conn, login,
login->rsp_length + padding) < 0)
- return -1;
+ goto err;
login->rsp_length = 0;
- mutex_lock(&sess->cmdsn_mutex);
- login_rsp->exp_cmdsn = cpu_to_be32(sess->exp_cmd_sn);
- login_rsp->max_cmdsn = cpu_to_be32(sess->max_cmd_sn);
- mutex_unlock(&sess->cmdsn_mutex);
return 0;
+
+err:
+ if (login->login_complete) {
+ if (conn->rx_thread && conn->rx_thread_active) {
+ send_sig(SIGINT, conn->rx_thread, 1);
+ complete(&conn->rx_login_comp);
+ kthread_stop(conn->rx_thread);
+ }
+ if (conn->tx_thread && conn->tx_thread_active) {
+ send_sig(SIGINT, conn->tx_thread, 1);
+ kthread_stop(conn->tx_thread);
+ }
+ spin_lock(&iscsit_global->ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+ get_order(1));
+ spin_unlock(&iscsit_global->ts_bitmap_lock);
+ }
+ return -1;
}
-static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
+static void iscsi_target_sk_data_ready(struct sock *sk)
{
- if (iscsi_target_do_tx_login_io(conn, login) < 0)
- return -1;
+ struct iscsit_conn *conn = sk->sk_user_data;
+ bool rc;
- if (conn->conn_transport->iscsit_get_login_rx(conn, login) < 0)
- return -1;
+ trace_sk_data_ready(sk);
+ pr_debug("Entering iscsi_target_sk_data_ready: conn: %p\n", conn);
- return 0;
+ write_lock_bh(&sk->sk_callback_lock);
+ if (!sk->sk_user_data) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ return;
+ }
+ if (!test_bit(LOGIN_FLAGS_READY, &conn->login_flags)) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ pr_debug("Got LOGIN_FLAGS_READY=0, conn: %p >>>>\n", conn);
+ return;
+ }
+ if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ pr_debug("Got LOGIN_FLAGS_CLOSED=1, conn: %p >>>>\n", conn);
+ return;
+ }
+ if (test_and_set_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1, conn: %p >>>>\n", conn);
+ if (iscsi_target_sk_data_ready == conn->orig_data_ready)
+ return;
+ conn->orig_data_ready(sk);
+ return;
+ }
+
+ rc = schedule_delayed_work(&conn->login_work, 0);
+ if (!rc) {
+ pr_debug("iscsi_target_sk_data_ready, schedule_delayed_work"
+ " got false\n");
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void iscsi_target_sk_state_change(struct sock *);
+
+static void iscsi_target_set_sock_callbacks(struct iscsit_conn *conn)
+{
+ struct sock *sk;
+
+ if (!conn->sock)
+ return;
+
+ sk = conn->sock->sk;
+ pr_debug("Entering iscsi_target_set_sock_callbacks: conn: %p\n", conn);
+
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_user_data = conn;
+ conn->orig_data_ready = sk->sk_data_ready;
+ conn->orig_state_change = sk->sk_state_change;
+ sk->sk_data_ready = iscsi_target_sk_data_ready;
+ sk->sk_state_change = iscsi_target_sk_state_change;
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ sk->sk_sndtimeo = TA_LOGIN_TIMEOUT * HZ;
+ sk->sk_rcvtimeo = TA_LOGIN_TIMEOUT * HZ;
+}
+
+static void iscsi_target_restore_sock_callbacks(struct iscsit_conn *conn)
+{
+ struct sock *sk;
+
+ if (!conn->sock)
+ return;
+
+ sk = conn->sock->sk;
+ pr_debug("Entering iscsi_target_restore_sock_callbacks: conn: %p\n", conn);
+
+ write_lock_bh(&sk->sk_callback_lock);
+ if (!sk->sk_user_data) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ return;
+ }
+ sk->sk_user_data = NULL;
+ sk->sk_data_ready = conn->orig_data_ready;
+ sk->sk_state_change = conn->orig_state_change;
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+ sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+}
+
+static int iscsi_target_do_login(struct iscsit_conn *, struct iscsi_login *);
+
+static bool __iscsi_target_sk_check_close(struct sock *sk)
+{
+ if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
+ pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE,"
+ "returning TRUE\n");
+ return true;
+ }
+ return false;
+}
+
+static bool iscsi_target_sk_check_close(struct iscsit_conn *conn)
+{
+ bool state = false;
+
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ state = (__iscsi_target_sk_check_close(sk) ||
+ test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
+ read_unlock_bh(&sk->sk_callback_lock);
+ }
+ return state;
+}
+
+static bool iscsi_target_sk_check_flag(struct iscsit_conn *conn, unsigned int flag)
+{
+ bool state = false;
+
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ state = test_bit(flag, &conn->login_flags);
+ read_unlock_bh(&sk->sk_callback_lock);
+ }
+ return state;
+}
+
+static bool iscsi_target_sk_check_and_clear(struct iscsit_conn *conn, unsigned int flag)
+{
+ bool state = false;
+
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ state = (__iscsi_target_sk_check_close(sk) ||
+ test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
+ if (!state)
+ clear_bit(flag, &conn->login_flags);
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+ return state;
+}
+
+static void iscsi_target_login_drop(struct iscsit_conn *conn, struct iscsi_login *login)
+{
+ bool zero_tsih = login->zero_tsih;
+
+ iscsi_remove_failed_auth_entry(conn);
+ iscsi_target_nego_release(conn);
+ iscsi_target_login_sess_out(conn, zero_tsih, true);
+}
+
+static void iscsi_target_do_login_rx(struct work_struct *work)
+{
+ struct iscsit_conn *conn = container_of(work,
+ struct iscsit_conn, login_work.work);
+ struct iscsi_login *login = conn->login;
+ struct iscsi_np *np = login->np;
+ struct iscsi_portal_group *tpg = conn->tpg;
+ struct iscsi_tpg_np *tpg_np = conn->tpg_np;
+ int rc, zero_tsih = login->zero_tsih;
+ bool state;
+
+ pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
+ conn, current->comm, current->pid);
+
+ spin_lock(&conn->login_worker_lock);
+ set_bit(LOGIN_FLAGS_WORKER_RUNNING, &conn->login_flags);
+ spin_unlock(&conn->login_worker_lock);
+ /*
+ * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
+ * before initial PDU processing in iscsi_target_start_negotiation()
+ * has completed, go ahead and retry until it's cleared.
+ *
+ * Otherwise if the TCP connection drops while this is occurring,
+ * iscsi_target_start_negotiation() will detect the failure, call
+ * cancel_delayed_work_sync(&conn->login_work), and cleanup the
+ * remaining iscsi connection resources from iscsi_np process context.
+ */
+ if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) {
+ schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10));
+ return;
+ }
+
+ spin_lock(&tpg->tpg_state_lock);
+ state = (tpg->tpg_state == TPG_STATE_ACTIVE);
+ spin_unlock(&tpg->tpg_state_lock);
+
+ if (!state) {
+ pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
+ goto err;
+ }
+
+ if (iscsi_target_sk_check_close(conn)) {
+ pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
+ goto err;
+ }
+
+ allow_signal(SIGINT);
+ rc = iscsit_set_login_timer_kworker(conn, current);
+ if (rc < 0) {
+ /* The login timer has already expired */
+ pr_debug("iscsi_target_do_login_rx, login failed\n");
+ goto err;
+ }
+
+ rc = conn->conn_transport->iscsit_get_login_rx(conn, login);
+ flush_signals(current);
+
+ if (rc < 0)
+ goto err;
+
+ pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
+ conn, current->comm, current->pid);
+
+ /*
+ * LOGIN_FLAGS_READ_ACTIVE is cleared so that sk_data_ready
+ * could be triggered again after this.
+ *
+ * LOGIN_FLAGS_WRITE_ACTIVE is cleared after we successfully
+ * process a login PDU, so that sk_state_chage can do login
+ * cleanup as needed if the socket is closed. If a delayed work is
+ * ongoing (LOGIN_FLAGS_WRITE_ACTIVE or LOGIN_FLAGS_READ_ACTIVE),
+ * sk_state_change will leave the cleanup to the delayed work or
+ * it will schedule a delayed work to do cleanup.
+ */
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ if (!test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags)) {
+ clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
+ set_bit(LOGIN_FLAGS_WRITE_ACTIVE, &conn->login_flags);
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+
+ rc = iscsi_target_do_login(conn, login);
+ if (rc < 0) {
+ goto err;
+ } else if (!rc) {
+ if (iscsi_target_sk_check_and_clear(conn,
+ LOGIN_FLAGS_WRITE_ACTIVE))
+ goto err;
+
+ /*
+ * Set the login timer thread pointer to NULL to prevent the
+ * login process from getting stuck if the initiator
+ * stops sending data.
+ */
+ rc = iscsit_set_login_timer_kworker(conn, NULL);
+ if (rc < 0)
+ goto err;
+ } else if (rc == 1) {
+ iscsit_stop_login_timer(conn);
+ cancel_delayed_work(&conn->login_work);
+ iscsi_target_nego_release(conn);
+ iscsi_post_login_handler(np, conn, zero_tsih);
+ iscsit_deaccess_np(np, tpg, tpg_np);
+ }
+ return;
+
+err:
+ iscsi_target_restore_sock_callbacks(conn);
+ iscsit_stop_login_timer(conn);
+ cancel_delayed_work(&conn->login_work);
+ iscsi_target_login_drop(conn, login);
+ iscsit_deaccess_np(np, tpg, tpg_np);
+}
+
+static void iscsi_target_sk_state_change(struct sock *sk)
+{
+ struct iscsit_conn *conn;
+ void (*orig_state_change)(struct sock *);
+ bool state;
+
+ pr_debug("Entering iscsi_target_sk_state_change\n");
+
+ write_lock_bh(&sk->sk_callback_lock);
+ conn = sk->sk_user_data;
+ if (!conn) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ return;
+ }
+ orig_state_change = conn->orig_state_change;
+
+ if (!test_bit(LOGIN_FLAGS_READY, &conn->login_flags)) {
+ pr_debug("Got LOGIN_FLAGS_READY=0 sk_state_change conn: %p\n",
+ conn);
+ write_unlock_bh(&sk->sk_callback_lock);
+ orig_state_change(sk);
+ return;
+ }
+ state = __iscsi_target_sk_check_close(sk);
+ pr_debug("__iscsi_target_sk_close_change: state: %d\n", state);
+
+ if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags) ||
+ test_bit(LOGIN_FLAGS_WRITE_ACTIVE, &conn->login_flags)) {
+ pr_debug("Got LOGIN_FLAGS_{READ|WRITE}_ACTIVE=1"
+ " sk_state_change conn: %p\n", conn);
+ if (state)
+ set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
+ write_unlock_bh(&sk->sk_callback_lock);
+ orig_state_change(sk);
+ return;
+ }
+ if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
+ pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
+ conn);
+ write_unlock_bh(&sk->sk_callback_lock);
+ orig_state_change(sk);
+ return;
+ }
+ /*
+ * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED,
+ * but only queue conn->login_work -> iscsi_target_do_login_rx()
+ * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared.
+ *
+ * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close()
+ * will detect the dropped TCP connection from delayed workqueue context.
+ *
+ * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial
+ * iscsi_target_start_negotiation() is running, iscsi_target_do_login()
+ * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation()
+ * via iscsi_target_sk_check_and_clear() is responsible for detecting the
+ * dropped TCP connection in iscsi_np process context, and cleaning up
+ * the remaining iscsi connection resources.
+ */
+ if (state) {
+ pr_debug("iscsi_target_sk_state_change got failed state\n");
+ set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
+ state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ orig_state_change(sk);
+
+ if (!state)
+ schedule_delayed_work(&conn->login_work, 0);
+ return;
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ orig_state_change(sk);
}
/*
@@ -394,7 +734,7 @@ static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login
* ISID/TSIH combinations.
*/
static int iscsi_target_check_for_existing_instances(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
struct iscsi_login *login)
{
if (login->checked_for_existing)
@@ -410,7 +750,7 @@ static int iscsi_target_check_for_existing_instances(
}
static int iscsi_target_do_authentication(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
struct iscsi_login *login)
{
int authret;
@@ -468,8 +808,44 @@ static int iscsi_target_do_authentication(
return 0;
}
+bool iscsi_conn_auth_required(struct iscsit_conn *conn)
+{
+ struct iscsi_node_acl *nacl;
+ struct se_node_acl *se_nacl;
+
+ if (conn->sess->sess_ops->SessionType) {
+ /*
+ * For SessionType=Discovery
+ */
+ return conn->tpg->tpg_attrib.authentication;
+ }
+ /*
+ * For SessionType=Normal
+ */
+ se_nacl = conn->sess->se_sess->se_node_acl;
+ if (!se_nacl) {
+ pr_debug("Unknown ACL is trying to connect\n");
+ return true;
+ }
+
+ if (se_nacl->dynamic_node_acl) {
+ pr_debug("Dynamic ACL %s is trying to connect\n",
+ se_nacl->initiatorname);
+ return conn->tpg->tpg_attrib.authentication;
+ }
+
+ pr_debug("Known ACL %s is trying to connect\n",
+ se_nacl->initiatorname);
+
+ nacl = to_iscsi_nacl(se_nacl);
+ if (nacl->node_attrib.authentication == NA_AUTHENTICATION_INHERITED)
+ return conn->tpg->tpg_attrib.authentication;
+
+ return nacl->node_attrib.authentication;
+}
+
static int iscsi_target_handle_csg_zero(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
struct iscsi_login *login)
{
int ret;
@@ -506,6 +882,12 @@ static int iscsi_target_handle_csg_zero(
}
goto do_auth;
+ } else if (!payload_length) {
+ pr_err("Initiator sent zero length security payload,"
+ " login failed\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_AUTH_FAILED);
+ return -1;
}
if (login->first_request)
@@ -517,27 +899,32 @@ static int iscsi_target_handle_csg_zero(
SENDER_TARGET,
login->rsp_buf,
&login->rsp_length,
- conn->param_list);
+ conn->param_list,
+ conn->tpg->tpg_attrib.login_keys_workaround);
if (ret < 0)
return -1;
if (!iscsi_check_negotiated_keys(conn->param_list)) {
- if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
- !strncmp(param->value, NONE, 4)) {
- pr_err("Initiator sent AuthMethod=None but"
- " Target is enforcing iSCSI Authentication,"
- " login failed.\n");
- iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
- ISCSI_LOGIN_STATUS_AUTH_FAILED);
- return -1;
- }
+ bool auth_required = iscsi_conn_auth_required(conn);
+
+ if (auth_required) {
+ if (!strncmp(param->value, NONE, 4)) {
+ pr_err("Initiator sent AuthMethod=None but"
+ " Target is enforcing iSCSI Authentication,"
+ " login failed.\n");
+ iscsit_tx_login_rsp(conn,
+ ISCSI_STATUS_CLS_INITIATOR_ERR,
+ ISCSI_LOGIN_STATUS_AUTH_FAILED);
+ return -1;
+ }
- if (ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication &&
- !login->auth_complete)
- return 0;
+ if (!login->auth_complete)
+ return 0;
- if (strncmp(param->value, NONE, 4) && !login->auth_complete)
- return 0;
+ if (strncmp(param->value, NONE, 4) &&
+ !login->auth_complete)
+ return 0;
+ }
if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&
(login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) {
@@ -552,7 +939,19 @@ do_auth:
return iscsi_target_do_authentication(conn, login);
}
-static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_login *login)
+static bool iscsi_conn_authenticated(struct iscsit_conn *conn,
+ struct iscsi_login *login)
+{
+ if (!iscsi_conn_auth_required(conn))
+ return true;
+
+ if (login->auth_complete)
+ return true;
+
+ return false;
+}
+
+static int iscsi_target_handle_csg_one(struct iscsit_conn *conn, struct iscsi_login *login)
{
int ret;
u32 payload_length;
@@ -587,18 +986,18 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
SENDER_TARGET,
login->rsp_buf,
&login->rsp_length,
- conn->param_list);
+ conn->param_list,
+ conn->tpg->tpg_attrib.login_keys_workaround);
if (ret < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
return -1;
}
- if (!login->auth_complete &&
- ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
+ if (!iscsi_conn_authenticated(conn, login)) {
pr_err("Initiator is requesting CSG: 1, has not been"
- " successfully authenticated, and the Target is"
- " enforcing iSCSI Authentication, login failed.\n");
+ " successfully authenticated, and the Target is"
+ " enforcing iSCSI Authentication, login failed.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_AUTH_FAILED);
return -1;
@@ -613,7 +1012,14 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
return 0;
}
-static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *login)
+/*
+ * RETURN VALUE:
+ *
+ * 1 = Login successful
+ * -1 = Login failed
+ * 0 = More PDU exchanges required
+ */
+static int iscsi_target_do_login(struct iscsit_conn *conn, struct iscsi_login *login)
{
int pdu_count = 0;
struct iscsi_login_req *login_req;
@@ -641,12 +1047,22 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
if (iscsi_target_handle_csg_one(conn, login) < 0)
return -1;
if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
+ /*
+ * Check to make sure the TCP connection has not
+ * dropped asynchronously while session reinstatement
+ * was occurring in this kthread context, before
+ * transitioning to full feature phase operation.
+ */
+ if (iscsi_target_sk_check_close(conn))
+ return -1;
+
login->tsih = conn->sess->tsih;
login->login_complete = 1;
+ iscsi_target_restore_sock_callbacks(conn);
if (iscsi_target_do_tx_login_io(conn,
login) < 0)
return -1;
- return 0;
+ return 1;
}
break;
default:
@@ -656,13 +1072,14 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
break;
}
- if (iscsi_target_do_login_io(conn, login) < 0)
+ if (iscsi_target_do_tx_login_io(conn, login) < 0)
return -1;
if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
login_rsp->flags &= ~ISCSI_FLAG_LOGIN_TRANSIT;
login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK;
}
+ break;
}
return 0;
@@ -688,28 +1105,34 @@ static void iscsi_initiatorname_tolower(
*/
int iscsi_target_locate_portal(
struct iscsi_np *np,
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
struct iscsi_login *login)
{
char *i_buf = NULL, *s_buf = NULL, *t_buf = NULL;
char *tmpbuf, *start = NULL, *end = NULL, *key, *value;
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_session *sess = conn->sess;
struct iscsi_tiqn *tiqn;
+ struct iscsi_tpg_np *tpg_np = NULL;
struct iscsi_login_req *login_req;
- u32 payload_length;
- int sessiontype = 0, ret = 0;
+ struct se_node_acl *se_nacl;
+ u32 payload_length, queue_depth = 0;
+ int sessiontype = 0, ret = 0, tag_num, tag_size;
+
+ INIT_DELAYED_WORK(&conn->login_work, iscsi_target_do_login_rx);
+ iscsi_target_set_sock_callbacks(conn);
+
+ login->np = np;
+ conn->tpg = NULL;
login_req = (struct iscsi_login_req *) login->req;
payload_length = ntoh24(login_req->dlength);
- tmpbuf = kzalloc(payload_length + 1, GFP_KERNEL);
+ tmpbuf = kmemdup_nul(login->req_buf, payload_length, GFP_KERNEL);
if (!tmpbuf) {
pr_err("Unable to allocate memory for tmpbuf.\n");
return -1;
}
- memcpy(tmpbuf, login->req_buf, payload_length);
- tmpbuf[payload_length] = '\0';
start = tmpbuf;
end = (start + payload_length);
@@ -767,31 +1190,25 @@ int iscsi_target_locate_portal(
*/
sessiontype = strncmp(s_buf, DISCOVERY, 9);
if (!sessiontype) {
- conn->tpg = iscsit_global->discovery_tpg;
if (!login->leading_connection)
goto get_target;
sess->sess_ops->SessionType = 1;
- /*
- * Setup crc32c modules from libcrypto
- */
- if (iscsi_login_setup_crypto(conn) < 0) {
- pr_err("iscsi_login_setup_crypto() failed\n");
- ret = -1;
- goto out;
- }
+
/*
* Serialize access across the discovery struct iscsi_portal_group to
* process login attempt.
*/
+ conn->tpg = iscsit_global->discovery_tpg;
if (iscsit_access_np(np, conn->tpg) < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ conn->tpg = NULL;
ret = -1;
goto out;
}
ret = 0;
- goto out;
+ goto alloc_tags;
}
get_target:
@@ -822,7 +1239,7 @@ get_target:
/*
* Locate Target Portal Group from Storage Node.
*/
- conn->tpg = iscsit_get_tpg_from_np(tiqn, np);
+ conn->tpg = iscsit_get_tpg_from_np(tiqn, np, &tpg_np);
if (!conn->tpg) {
pr_err("Unable to locate Target Portal Group"
" on %s\n", tiqn->tiqn);
@@ -832,31 +1249,26 @@ get_target:
ret = -1;
goto out;
}
+ conn->tpg_np = tpg_np;
pr_debug("Located Portal Group Object: %hu\n", conn->tpg->tpgt);
- /*
- * Setup crc32c modules from libcrypto
- */
- if (iscsi_login_setup_crypto(conn) < 0) {
- pr_err("iscsi_login_setup_crypto() failed\n");
- ret = -1;
- goto out;
- }
+
/*
* Serialize access across the struct iscsi_portal_group to
* process login attempt.
*/
if (iscsit_access_np(np, conn->tpg) < 0) {
+ kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
iscsit_put_tiqn_for_login(tiqn);
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
- ret = -1;
conn->tpg = NULL;
+ ret = -1;
goto out;
}
/*
* conn->sess->node_acl will be set when the referenced
- * struct iscsi_session is located from received ISID+TSIH in
+ * struct iscsit_session is located from received ISID+TSIH in
* iscsi_login_non_zero_tsih_s2().
*/
if (!login->leading_connection) {
@@ -883,8 +1295,27 @@ get_target:
ret = -1;
goto out;
}
+ se_nacl = sess->se_sess->se_node_acl;
+ queue_depth = se_nacl->queue_depth;
+ /*
+ * Setup pre-allocated tags based upon allowed per NodeACL CmdSN
+ * depth for non immediate commands, plus extra tags for immediate
+ * commands.
+ *
+ * Also enforce a ISCSIT_MIN_TAGS to prevent unnecessary contention
+ * in per-cpu-ida tag allocation logic + small queue_depth.
+ */
+alloc_tags:
+ tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
+ tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
+ tag_size = sizeof(struct iscsit_cmd) + conn->conn_transport->priv_size;
- ret = 0;
+ ret = transport_alloc_session_tags(sess->se_sess, tag_num, tag_size);
+ if (ret < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ ret = -1;
+ }
out:
kfree(tmpbuf);
return ret;
@@ -892,19 +1323,60 @@ out:
int iscsi_target_start_negotiation(
struct iscsi_login *login,
- struct iscsi_conn *conn)
+ struct iscsit_conn *conn)
{
int ret;
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
+ set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+ /*
+ * If iscsi_target_do_login returns zero to signal more PDU
+ * exchanges are required to complete the login, go ahead and
+ * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection
+ * is still active.
+ *
+ * Otherwise if TCP connection dropped asynchronously, go ahead
+ * and perform connection cleanup now.
+ */
ret = iscsi_target_do_login(conn, login);
- if (ret != 0)
+ if (!ret) {
+ spin_lock(&conn->login_worker_lock);
+
+ if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
+ ret = -1;
+ else if (!test_bit(LOGIN_FLAGS_WORKER_RUNNING, &conn->login_flags)) {
+ if (iscsit_set_login_timer_kworker(conn, NULL) < 0) {
+ /*
+ * The timeout has expired already.
+ * Schedule login_work to perform the cleanup.
+ */
+ schedule_delayed_work(&conn->login_work, 0);
+ }
+ }
+
+ spin_unlock(&conn->login_worker_lock);
+ }
+
+ if (ret < 0) {
+ iscsi_target_restore_sock_callbacks(conn);
iscsi_remove_failed_auth_entry(conn);
+ }
+ if (ret != 0) {
+ iscsit_stop_login_timer(conn);
+ cancel_delayed_work_sync(&conn->login_work);
+ iscsi_target_nego_release(conn);
+ }
- iscsi_target_nego_release(conn);
return ret;
}
-void iscsi_target_nego_release(struct iscsi_conn *conn)
+void iscsi_target_nego_release(struct iscsit_conn *conn)
{
struct iscsi_login *login = conn->conn_login;
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
index f021cbd330e5..e60a46d34835 100644
--- a/drivers/target/iscsi/iscsi_target_nego.h
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -1,20 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_NEGO_H
#define ISCSI_TARGET_NEGO_H
#define DECIMAL 0
#define HEX 1
+#define BASE64 2
+
+struct iscsit_conn;
+struct iscsi_login;
+struct iscsi_np;
extern void convert_null_to_semi(char *, int);
extern int extract_param(const char *, const char *, unsigned int, char *,
unsigned char *);
-extern int iscsi_target_check_login_request(struct iscsi_conn *,
- struct iscsi_login *);
-extern int iscsi_target_get_initial_payload(struct iscsi_conn *,
+extern int iscsi_target_check_login_request(struct iscsit_conn *,
struct iscsi_login *);
-extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsi_conn *,
+extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsit_conn *,
struct iscsi_login *);
extern int iscsi_target_start_negotiation(
- struct iscsi_login *, struct iscsi_conn *);
-extern void iscsi_target_nego_release(struct iscsi_conn *);
-
+ struct iscsi_login *, struct iscsit_conn *);
+extern void iscsi_target_nego_release(struct iscsit_conn *);
+extern bool iscsi_conn_auth_required(struct iscsit_conn *conn);
#endif /* ISCSI_TARGET_NEGO_H */
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index 11dc2936af76..d63efdefb18e 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -1,26 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains the main functions related to Initiator Node Attributes.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
******************************************************************************/
#include <target/target_core_base.h>
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_device.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
@@ -35,10 +25,12 @@ static inline char *iscsit_na_get_initiatorname(
}
void iscsit_set_default_node_attribues(
- struct iscsi_node_acl *acl)
+ struct iscsi_node_acl *acl,
+ struct iscsi_portal_group *tpg)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
+ a->authentication = NA_AUTHENTICATION_INHERITED;
a->dataout_timeout = NA_DATAOUT_TIMEOUT;
a->dataout_timeout_retries = NA_DATAOUT_TIMEOUT_RETRIES;
a->nopin_timeout = NA_NOPIN_TIMEOUT;
@@ -46,7 +38,7 @@ void iscsit_set_default_node_attribues(
a->random_datain_pdu_offsets = NA_RANDOM_DATAIN_PDU_OFFSETS;
a->random_datain_seq_offsets = NA_RANDOM_DATAIN_SEQ_OFFSETS;
a->random_r2t_offsets = NA_RANDOM_R2T_OFFSETS;
- a->default_erl = NA_DEFAULT_ERL;
+ a->default_erl = tpg->tpg_attrib.default_erl;
}
int iscsit_na_dataout_timeout(
@@ -105,8 +97,8 @@ int iscsit_na_nopin_timeout(
u32 nopin_timeout)
{
struct iscsi_node_attrib *a = &acl->node_attrib;
- struct iscsi_session *sess;
- struct iscsi_conn *conn;
+ struct iscsit_session *sess;
+ struct iscsit_conn *conn;
struct se_node_acl *se_nacl = &a->nacl->se_node_acl;
struct se_session *se_sess;
u32 orig_nopin_timeout = a->nopin_timeout;
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h
index c970b326ef23..ce074cb54579 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.h
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h
@@ -1,7 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_NODEATTRIB_H
#define ISCSI_TARGET_NODEATTRIB_H
-extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *);
+#include <linux/types.h>
+
+struct iscsi_node_acl;
+struct iscsi_portal_group;
+
+extern void iscsit_set_default_node_attribues(struct iscsi_node_acl *,
+ struct iscsi_portal_group *);
extern int iscsit_na_dataout_timeout(struct iscsi_node_acl *, u32);
extern int iscsit_na_dataout_timeout_retries(struct iscsi_node_acl *, u32);
extern int iscsit_na_nopin_timeout(struct iscsi_node_acl *, u32);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 35fd6439eb01..1d4e1788e073 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1,31 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains main functions related to iSCSI Parameter negotiation.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
******************************************************************************/
#include <linux/slab.h>
-
-#include "iscsi_target_core.h"
+#include <linux/uio.h> /* struct kvec */
+#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_util.h"
#include "iscsi_target_parameters.h"
int iscsi_login_rx_data(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
char *buf,
int length)
{
@@ -36,13 +26,6 @@ int iscsi_login_rx_data(
iov.iov_len = length;
iov.iov_base = buf;
- /*
- * Initial Marker-less Interval.
- * Add the values regardless of IFMarker/OFMarker, considering
- * it may not be negoitated yet.
- */
- conn->of_marker += length;
-
rx_got = rx_data(conn, &iov, 1, length);
if (rx_got != length) {
pr_err("rx_data returned %d, expecting %d.\n",
@@ -54,7 +37,7 @@ int iscsi_login_rx_data(
}
int iscsi_login_tx_data(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
char *pdu_buf,
char *text_buf,
int text_length)
@@ -74,13 +57,6 @@ int iscsi_login_tx_data(
iov_cnt++;
}
- /*
- * Initial Marker-less Interval.
- * Add the values regardless of IFMarker/OFMarker, considering
- * it may not be negoitated yet.
- */
- conn->if_marker += length;
-
tx_sent = tx_data(conn, &iov[0], iov_cnt, length);
if (tx_sent != length) {
pr_err("tx_data returned %d, expecting %d.\n",
@@ -91,60 +67,6 @@ int iscsi_login_tx_data(
return 0;
}
-void iscsi_dump_conn_ops(struct iscsi_conn_ops *conn_ops)
-{
- pr_debug("HeaderDigest: %s\n", (conn_ops->HeaderDigest) ?
- "CRC32C" : "None");
- pr_debug("DataDigest: %s\n", (conn_ops->DataDigest) ?
- "CRC32C" : "None");
- pr_debug("MaxRecvDataSegmentLength: %u\n",
- conn_ops->MaxRecvDataSegmentLength);
- pr_debug("OFMarker: %s\n", (conn_ops->OFMarker) ? "Yes" : "No");
- pr_debug("IFMarker: %s\n", (conn_ops->IFMarker) ? "Yes" : "No");
- if (conn_ops->OFMarker)
- pr_debug("OFMarkInt: %u\n", conn_ops->OFMarkInt);
- if (conn_ops->IFMarker)
- pr_debug("IFMarkInt: %u\n", conn_ops->IFMarkInt);
-}
-
-void iscsi_dump_sess_ops(struct iscsi_sess_ops *sess_ops)
-{
- pr_debug("InitiatorName: %s\n", sess_ops->InitiatorName);
- pr_debug("InitiatorAlias: %s\n", sess_ops->InitiatorAlias);
- pr_debug("TargetName: %s\n", sess_ops->TargetName);
- pr_debug("TargetAlias: %s\n", sess_ops->TargetAlias);
- pr_debug("TargetPortalGroupTag: %hu\n",
- sess_ops->TargetPortalGroupTag);
- pr_debug("MaxConnections: %hu\n", sess_ops->MaxConnections);
- pr_debug("InitialR2T: %s\n",
- (sess_ops->InitialR2T) ? "Yes" : "No");
- pr_debug("ImmediateData: %s\n", (sess_ops->ImmediateData) ?
- "Yes" : "No");
- pr_debug("MaxBurstLength: %u\n", sess_ops->MaxBurstLength);
- pr_debug("FirstBurstLength: %u\n", sess_ops->FirstBurstLength);
- pr_debug("DefaultTime2Wait: %hu\n", sess_ops->DefaultTime2Wait);
- pr_debug("DefaultTime2Retain: %hu\n",
- sess_ops->DefaultTime2Retain);
- pr_debug("MaxOutstandingR2T: %hu\n",
- sess_ops->MaxOutstandingR2T);
- pr_debug("DataPDUInOrder: %s\n",
- (sess_ops->DataPDUInOrder) ? "Yes" : "No");
- pr_debug("DataSequenceInOrder: %s\n",
- (sess_ops->DataSequenceInOrder) ? "Yes" : "No");
- pr_debug("ErrorRecoveryLevel: %hu\n",
- sess_ops->ErrorRecoveryLevel);
- pr_debug("SessionType: %s\n", (sess_ops->SessionType) ?
- "Discovery" : "Normal");
-}
-
-void iscsi_print_params(struct iscsi_param_list *param_list)
-{
- struct iscsi_param *param;
-
- list_for_each_entry(param, &param_list->param_list, p_list)
- pr_debug("%s: %s\n", param->name, param->value);
-}
-
static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *param_list,
char *name, char *value, u8 phase, u8 scope, u8 sender,
u16 type_range, u8 use)
@@ -196,10 +118,6 @@ static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *para
case TYPERANGE_DIGEST:
param->type = TYPE_VALUE_LIST | TYPE_STRING;
break;
- case TYPERANGE_MARKINT:
- param->type = TYPE_NUMBER_RANGE;
- param->type_range |= TYPERANGE_1_TO_65535;
- break;
case TYPERANGE_ISCSINAME:
case TYPERANGE_SESSIONTYPE:
case TYPERANGE_TARGETADDRESS:
@@ -234,7 +152,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
if (!pl) {
pr_err("Unable to allocate memory for"
" struct iscsi_param_list.\n");
- return -1 ;
+ return -ENOMEM;
}
INIT_LIST_HEAD(&pl->param_list);
INIT_LIST_HEAD(&pl->extra_response_list);
@@ -424,15 +342,16 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
param = iscsi_set_default_param(pl, IFMARKINT, INITIAL_IFMARKINT,
PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
- TYPERANGE_MARKINT, USE_INITIAL_ONLY);
+ TYPERANGE_UTF8, USE_INITIAL_ONLY);
if (!param)
goto out;
param = iscsi_set_default_param(pl, OFMARKINT, INITIAL_OFMARKINT,
PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
- TYPERANGE_MARKINT, USE_INITIAL_ONLY);
+ TYPERANGE_UTF8, USE_INITIAL_ONLY);
if (!param)
goto out;
+
/*
* Extra parameters for ISER from RFC-5046
*/
@@ -476,10 +395,10 @@ int iscsi_set_keys_to_negotiate(
if (!strcmp(param->name, AUTHMETHOD)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, HEADERDIGEST)) {
- if (iser == false)
+ if (!iser)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, DATADIGEST)) {
- if (iser == false)
+ if (!iser)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, MAXCONNECTIONS)) {
SET_PSTATE_NEGOTIATE(param);
@@ -499,7 +418,7 @@ int iscsi_set_keys_to_negotiate(
} else if (!strcmp(param->name, IMMEDIATEDATA)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
- if (iser == false)
+ if (!iser)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) {
continue;
@@ -522,21 +441,21 @@ int iscsi_set_keys_to_negotiate(
} else if (!strcmp(param->name, SESSIONTYPE)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, IFMARKER)) {
- SET_PSTATE_NEGOTIATE(param);
+ SET_PSTATE_REJECT(param);
} else if (!strcmp(param->name, OFMARKER)) {
- SET_PSTATE_NEGOTIATE(param);
+ SET_PSTATE_REJECT(param);
} else if (!strcmp(param->name, IFMARKINT)) {
- SET_PSTATE_NEGOTIATE(param);
+ SET_PSTATE_REJECT(param);
} else if (!strcmp(param->name, OFMARKINT)) {
- SET_PSTATE_NEGOTIATE(param);
+ SET_PSTATE_REJECT(param);
} else if (!strcmp(param->name, RDMAEXTENSIONS)) {
- if (iser == true)
+ if (iser)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
- if (iser == true)
+ if (iser)
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) {
- if (iser == true)
+ if (iser)
SET_PSTATE_NEGOTIATE(param);
}
}
@@ -603,7 +522,7 @@ int iscsi_copy_param_list(
param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
if (!param_list) {
pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
- goto err_out;
+ return -ENOMEM;
}
INIT_LIST_HEAD(&param_list->param_list);
INIT_LIST_HEAD(&param_list->extra_response_list);
@@ -654,7 +573,7 @@ int iscsi_copy_param_list(
err_out:
iscsi_release_param_list(param_list);
- return -1;
+ return -ENOMEM;
}
static void iscsi_release_extra_responses(struct iscsi_param_list *param_list)
@@ -705,6 +624,7 @@ struct iscsi_param *iscsi_find_param_from_key(
pr_err("Unable to locate key \"%s\".\n", key);
return NULL;
}
+EXPORT_SYMBOL(iscsi_find_param_from_key);
int iscsi_extract_key_value(char *textbuf, char **key, char **value)
{
@@ -754,12 +674,12 @@ static int iscsi_add_notunderstood_response(
if (!extra_response) {
pr_err("Unable to allocate memory for"
" struct iscsi_extra_response.\n");
- return -1;
+ return -ENOMEM;
}
INIT_LIST_HEAD(&extra_response->er_list);
- strlcpy(extra_response->key, key, sizeof(extra_response->key));
- strlcpy(extra_response->value, NOTUNDERSTOOD,
+ strscpy(extra_response->key, key, sizeof(extra_response->key));
+ strscpy(extra_response->value, NOTUNDERSTOOD,
sizeof(extra_response->value));
list_add_tail(&extra_response->er_list,
@@ -789,7 +709,8 @@ static int iscsi_check_for_auth_key(char *key)
return 0;
}
-static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
+static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param,
+ bool keys_workaround)
{
if (IS_TYPE_BOOL_AND(param)) {
if (!strcmp(param->value, NO))
@@ -797,35 +718,31 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
} else if (IS_TYPE_BOOL_OR(param)) {
if (!strcmp(param->value, YES))
SET_PSTATE_REPLY_OPTIONAL(param);
- /*
- * Required for gPXE iSCSI boot client
- */
- if (!strcmp(param->name, IMMEDIATEDATA))
- SET_PSTATE_REPLY_OPTIONAL(param);
+
+ if (keys_workaround) {
+ /*
+ * Required for gPXE iSCSI boot client
+ */
+ if (!strcmp(param->name, IMMEDIATEDATA))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
} else if (IS_TYPE_NUMBER(param)) {
if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param);
- /*
- * The GlobalSAN iSCSI Initiator for MacOSX does
- * not respond to MaxBurstLength, FirstBurstLength,
- * DefaultTime2Wait or DefaultTime2Retain parameter keys.
- * So, we set them to 'reply optional' here, and assume the
- * the defaults from iscsi_parameters.h if the initiator
- * is not RFC compliant and the keys are not negotiated.
- */
- if (!strcmp(param->name, MAXBURSTLENGTH))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, FIRSTBURSTLENGTH))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, DEFAULTTIME2WAIT))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, DEFAULTTIME2RETAIN))
- SET_PSTATE_REPLY_OPTIONAL(param);
- /*
- * Required for gPXE iSCSI boot client
- */
- if (!strcmp(param->name, MAXCONNECTIONS))
- SET_PSTATE_REPLY_OPTIONAL(param);
+
+ if (keys_workaround) {
+ /*
+ * Required for Mellanox Flexboot PXE boot ROM
+ */
+ if (!strcmp(param->name, FIRSTBURSTLENGTH))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+
+ /*
+ * Required for gPXE iSCSI boot client
+ */
+ if (!strcmp(param->name, MAXCONNECTIONS))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
} else if (IS_PHASE_DECLARATIVE(param))
SET_PSTATE_REPLY_OPTIONAL(param);
}
@@ -908,91 +825,6 @@ static int iscsi_check_numerical_value(struct iscsi_param *param, char *value_pt
return 0;
}
-static int iscsi_check_numerical_range_value(struct iscsi_param *param, char *value)
-{
- char *left_val_ptr = NULL, *right_val_ptr = NULL;
- char *tilde_ptr = NULL;
- u32 left_val, right_val, local_left_val;
-
- if (strcmp(param->name, IFMARKINT) &&
- strcmp(param->name, OFMARKINT)) {
- pr_err("Only parameters \"%s\" or \"%s\" may contain a"
- " numerical range value.\n", IFMARKINT, OFMARKINT);
- return -1;
- }
-
- if (IS_PSTATE_PROPOSER(param))
- return 0;
-
- tilde_ptr = strchr(value, '~');
- if (!tilde_ptr) {
- pr_err("Unable to locate numerical range indicator"
- " \"~\" for \"%s\".\n", param->name);
- return -1;
- }
- *tilde_ptr = '\0';
-
- left_val_ptr = value;
- right_val_ptr = value + strlen(left_val_ptr) + 1;
-
- if (iscsi_check_numerical_value(param, left_val_ptr) < 0)
- return -1;
- if (iscsi_check_numerical_value(param, right_val_ptr) < 0)
- return -1;
-
- left_val = simple_strtoul(left_val_ptr, NULL, 0);
- right_val = simple_strtoul(right_val_ptr, NULL, 0);
- *tilde_ptr = '~';
-
- if (right_val < left_val) {
- pr_err("Numerical range for parameter \"%s\" contains"
- " a right value which is less than the left.\n",
- param->name);
- return -1;
- }
-
- /*
- * For now, enforce reasonable defaults for [I,O]FMarkInt.
- */
- tilde_ptr = strchr(param->value, '~');
- if (!tilde_ptr) {
- pr_err("Unable to locate numerical range indicator"
- " \"~\" for \"%s\".\n", param->name);
- return -1;
- }
- *tilde_ptr = '\0';
-
- left_val_ptr = param->value;
- right_val_ptr = param->value + strlen(left_val_ptr) + 1;
-
- local_left_val = simple_strtoul(left_val_ptr, NULL, 0);
- *tilde_ptr = '~';
-
- if (param->set_param) {
- if ((left_val < local_left_val) ||
- (right_val < local_left_val)) {
- pr_err("Passed value range \"%u~%u\" is below"
- " minimum left value \"%u\" for key \"%s\","
- " rejecting.\n", left_val, right_val,
- local_left_val, param->name);
- return -1;
- }
- } else {
- if ((left_val < local_left_val) &&
- (right_val < local_left_val)) {
- pr_err("Received value range \"%u~%u\" is"
- " below minimum left value \"%u\" for key"
- " \"%s\", rejecting.\n", left_val, right_val,
- local_left_val, param->name);
- SET_PSTATE_REJECT(param);
- if (iscsi_update_param_value(param, REJECT) < 0)
- return -1;
- }
- }
-
- return 0;
-}
-
static int iscsi_check_string_or_list_value(struct iscsi_param *param, char *value)
{
if (IS_PSTATE_PROPOSER(param))
@@ -1029,33 +861,6 @@ static int iscsi_check_string_or_list_value(struct iscsi_param *param, char *val
return 0;
}
-/*
- * This function is used to pick a value range number, currently just
- * returns the lesser of both right values.
- */
-static char *iscsi_get_value_from_number_range(
- struct iscsi_param *param,
- char *value)
-{
- char *end_ptr, *tilde_ptr1 = NULL, *tilde_ptr2 = NULL;
- u32 acceptor_right_value, proposer_right_value;
-
- tilde_ptr1 = strchr(value, '~');
- if (!tilde_ptr1)
- return NULL;
- *tilde_ptr1++ = '\0';
- proposer_right_value = simple_strtoul(tilde_ptr1, &end_ptr, 0);
-
- tilde_ptr2 = strchr(param->value, '~');
- if (!tilde_ptr2)
- return NULL;
- *tilde_ptr2++ = '\0';
- acceptor_right_value = simple_strtoul(tilde_ptr2, &end_ptr, 0);
-
- return (acceptor_right_value >= proposer_right_value) ?
- tilde_ptr1 : tilde_ptr2;
-}
-
static char *iscsi_check_valuelist_for_support(
struct iscsi_param *param,
char *value)
@@ -1102,10 +907,10 @@ out:
}
static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value,
- struct iscsi_conn *conn)
+ struct iscsit_conn *conn)
{
u8 acceptor_boolean_value = 0, proposer_boolean_value = 0;
- char *negoitated_value = NULL;
+ char *negotiated_value = NULL;
if (IS_PSTATE_ACCEPTOR(param)) {
pr_err("Received key \"%s\" twice, protocol error.\n",
@@ -1182,7 +987,7 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value,
unsigned long long tmp;
int rc;
- rc = strict_strtoull(param->value, 0, &tmp);
+ rc = kstrtoull(param->value, 0, &tmp);
if (rc < 0)
return -1;
@@ -1205,24 +1010,16 @@ static int iscsi_check_acceptor_state(struct iscsi_param *param, char *value,
pr_debug("Updated %s to target MXDSL value: %s\n",
param->name, param->value);
}
-
- } else if (IS_TYPE_NUMBER_RANGE(param)) {
- negoitated_value = iscsi_get_value_from_number_range(
- param, value);
- if (!negoitated_value)
- return -1;
- if (iscsi_update_param_value(param, negoitated_value) < 0)
- return -1;
} else if (IS_TYPE_VALUE_LIST(param)) {
- negoitated_value = iscsi_check_valuelist_for_support(
+ negotiated_value = iscsi_check_valuelist_for_support(
param, value);
- if (!negoitated_value) {
+ if (!negotiated_value) {
pr_err("Proposer's value list \"%s\" contains"
" no valid values from Acceptor's value list"
" \"%s\".\n", value, param->value);
return -1;
}
- if (iscsi_update_param_value(param, negoitated_value) < 0)
+ if (iscsi_update_param_value(param, negotiated_value) < 0)
return -1;
} else if (IS_PHASE_DECLARATIVE(param)) {
if (iscsi_update_param_value(param, value) < 0)
@@ -1241,47 +1038,7 @@ static int iscsi_check_proposer_state(struct iscsi_param *param, char *value)
return -1;
}
- if (IS_TYPE_NUMBER_RANGE(param)) {
- u32 left_val = 0, right_val = 0, recieved_value = 0;
- char *left_val_ptr = NULL, *right_val_ptr = NULL;
- char *tilde_ptr = NULL;
-
- if (!strcmp(value, IRRELEVANT) || !strcmp(value, REJECT)) {
- if (iscsi_update_param_value(param, value) < 0)
- return -1;
- return 0;
- }
-
- tilde_ptr = strchr(value, '~');
- if (tilde_ptr) {
- pr_err("Illegal \"~\" in response for \"%s\".\n",
- param->name);
- return -1;
- }
- tilde_ptr = strchr(param->value, '~');
- if (!tilde_ptr) {
- pr_err("Unable to locate numerical range"
- " indicator \"~\" for \"%s\".\n", param->name);
- return -1;
- }
- *tilde_ptr = '\0';
-
- left_val_ptr = param->value;
- right_val_ptr = param->value + strlen(left_val_ptr) + 1;
- left_val = simple_strtoul(left_val_ptr, NULL, 0);
- right_val = simple_strtoul(right_val_ptr, NULL, 0);
- recieved_value = simple_strtoul(value, NULL, 0);
-
- *tilde_ptr = '~';
-
- if ((recieved_value < left_val) ||
- (recieved_value > right_val)) {
- pr_err("Illegal response \"%s=%u\", value must"
- " be between %u and %u.\n", param->name,
- recieved_value, left_val, right_val);
- return -1;
- }
- } else if (IS_TYPE_VALUE_LIST(param)) {
+ if (IS_TYPE_VALUE_LIST(param)) {
char *comma_ptr = NULL, *tmp_ptr = NULL;
comma_ptr = strchr(value, ',');
@@ -1363,9 +1120,6 @@ static int iscsi_check_value(struct iscsi_param *param, char *value)
} else if (IS_TYPE_NUMBER(param)) {
if (iscsi_check_numerical_value(param, value) < 0)
return -1;
- } else if (IS_TYPE_NUMBER_RANGE(param)) {
- if (iscsi_check_numerical_range_value(param, value) < 0)
- return -1;
} else if (IS_TYPE_STRING(param) || IS_TYPE_VALUE_LIST(param)) {
if (iscsi_check_string_or_list_value(param, value) < 0)
return -1;
@@ -1460,18 +1214,20 @@ static struct iscsi_param *iscsi_check_key(
return param;
if (!(param->phase & phase)) {
- pr_err("Key \"%s\" may not be negotiated during ",
- param->name);
+ char *phase_name;
+
switch (phase) {
case PHASE_SECURITY:
- pr_debug("Security phase.\n");
+ phase_name = "Security";
break;
case PHASE_OPERATIONAL:
- pr_debug("Operational phase.\n");
+ phase_name = "Operational";
break;
default:
- pr_debug("Unknown phase.\n");
+ phase_name = "Unknown";
}
+ pr_err("Key \"%s\" may not be negotiated during %s phase.\n",
+ param->name, phase_name);
return NULL;
}
@@ -1485,8 +1241,6 @@ static int iscsi_enforce_integrity_rules(
char *tmpptr;
u8 DataSequenceInOrder = 0;
u8 ErrorRecoveryLevel = 0, SessionType = 0;
- u8 IFMarker = 0, OFMarker = 0;
- u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1;
u32 FirstBurstLength = 0, MaxBurstLength = 0;
struct iscsi_param *param = NULL;
@@ -1505,28 +1259,12 @@ static int iscsi_enforce_integrity_rules(
if (!strcmp(param->name, MAXBURSTLENGTH))
MaxBurstLength = simple_strtoul(param->value,
&tmpptr, 0);
- if (!strcmp(param->name, IFMARKER))
- if (!strcmp(param->value, YES))
- IFMarker = 1;
- if (!strcmp(param->name, OFMARKER))
- if (!strcmp(param->value, YES))
- OFMarker = 1;
- if (!strcmp(param->name, IFMARKINT))
- if (!strcmp(param->value, REJECT))
- IFMarkInt_Reject = 1;
- if (!strcmp(param->name, OFMARKINT))
- if (!strcmp(param->value, REJECT))
- OFMarkInt_Reject = 1;
}
list_for_each_entry(param, &param_list->param_list, p_list) {
if (!(param->phase & phase))
continue;
- if (!SessionType && (!IS_PSTATE_ACCEPTOR(param) &&
- (strcmp(param->name, IFMARKER) &&
- strcmp(param->name, OFMARKER) &&
- strcmp(param->name, IFMARKINT) &&
- strcmp(param->name, OFMARKINT))))
+ if (!SessionType && !IS_PSTATE_ACCEPTOR(param))
continue;
if (!strcmp(param->name, MAXOUTSTANDINGR2T) &&
DataSequenceInOrder && (ErrorRecoveryLevel > 0)) {
@@ -1558,38 +1296,6 @@ static int iscsi_enforce_integrity_rules(
param->name, param->value);
}
}
- if (!strcmp(param->name, IFMARKER) && IFMarkInt_Reject) {
- if (iscsi_update_param_value(param, NO) < 0)
- return -1;
- IFMarker = 0;
- pr_debug("Reset \"%s\" to \"%s\".\n",
- param->name, param->value);
- }
- if (!strcmp(param->name, OFMARKER) && OFMarkInt_Reject) {
- if (iscsi_update_param_value(param, NO) < 0)
- return -1;
- OFMarker = 0;
- pr_debug("Reset \"%s\" to \"%s\".\n",
- param->name, param->value);
- }
- if (!strcmp(param->name, IFMARKINT) && !IFMarker) {
- if (!strcmp(param->value, REJECT))
- continue;
- param->state &= ~PSTATE_NEGOTIATE;
- if (iscsi_update_param_value(param, IRRELEVANT) < 0)
- return -1;
- pr_debug("Reset \"%s\" to \"%s\".\n",
- param->name, param->value);
- }
- if (!strcmp(param->name, OFMARKINT) && !OFMarker) {
- if (!strcmp(param->value, REJECT))
- continue;
- param->state &= ~PSTATE_NEGOTIATE;
- if (iscsi_update_param_value(param, IRRELEVANT) < 0)
- return -1;
- pr_debug("Reset \"%s\" to \"%s\".\n",
- param->name, param->value);
- }
}
return 0;
@@ -1600,19 +1306,17 @@ int iscsi_decode_text_input(
u8 sender,
char *textbuf,
u32 length,
- struct iscsi_conn *conn)
+ struct iscsit_conn *conn)
{
struct iscsi_param_list *param_list = conn->param_list;
char *tmpbuf, *start = NULL, *end = NULL;
- tmpbuf = kzalloc(length + 1, GFP_KERNEL);
+ tmpbuf = kmemdup_nul(textbuf, length, GFP_KERNEL);
if (!tmpbuf) {
- pr_err("Unable to allocate memory for tmpbuf.\n");
- return -1;
+ pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length);
+ return -ENOMEM;
}
- memcpy(tmpbuf, textbuf, length);
- tmpbuf[length] = '\0';
start = tmpbuf;
end = (start + length);
@@ -1620,10 +1324,8 @@ int iscsi_decode_text_input(
char *key, *value;
struct iscsi_param *param;
- if (iscsi_extract_key_value(start, &key, &value) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_extract_key_value(start, &key, &value) < 0)
+ goto free_buffer;
pr_debug("Got key: %s=%s\n", key, value);
@@ -1636,38 +1338,37 @@ int iscsi_decode_text_input(
param = iscsi_check_key(key, phase, sender, param_list);
if (!param) {
- if (iscsi_add_notunderstood_response(key,
- value, param_list) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_add_notunderstood_response(key, value,
+ param_list) < 0)
+ goto free_buffer;
+
start += strlen(key) + strlen(value) + 2;
continue;
}
- if (iscsi_check_value(param, value) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_check_value(param, value) < 0)
+ goto free_buffer;
start += strlen(key) + strlen(value) + 2;
if (IS_PSTATE_PROPOSER(param)) {
- if (iscsi_check_proposer_state(param, value) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_check_proposer_state(param, value) < 0)
+ goto free_buffer;
+
SET_PSTATE_RESPONSE_GOT(param);
} else {
- if (iscsi_check_acceptor_state(param, value, conn) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_check_acceptor_state(param, value, conn) < 0)
+ goto free_buffer;
+
SET_PSTATE_ACCEPTOR(param);
}
}
kfree(tmpbuf);
return 0;
+
+free_buffer:
+ kfree(tmpbuf);
+ return -1;
}
int iscsi_encode_text_output(
@@ -1675,7 +1376,8 @@ int iscsi_encode_text_output(
u8 sender,
char *textbuf,
u32 *length,
- struct iscsi_param_list *param_list)
+ struct iscsi_param_list *param_list,
+ bool keys_workaround)
{
char *output_buf = NULL;
struct iscsi_extra_response *er;
@@ -1711,7 +1413,8 @@ int iscsi_encode_text_output(
*length += 1;
output_buf = textbuf + *length;
SET_PSTATE_PROPOSER(param);
- iscsi_check_proposer_for_optional_reply(param);
+ iscsi_check_proposer_for_optional_reply(param,
+ keys_workaround);
pr_debug("Sending key: %s=%s\n",
param->name, param->value);
}
@@ -1826,24 +1529,6 @@ void iscsi_set_connection_parameters(
*/
pr_debug("MaxRecvDataSegmentLength: %u\n",
ops->MaxRecvDataSegmentLength);
- } else if (!strcmp(param->name, OFMARKER)) {
- ops->OFMarker = !strcmp(param->value, YES);
- pr_debug("OFMarker: %s\n",
- param->value);
- } else if (!strcmp(param->name, IFMARKER)) {
- ops->IFMarker = !strcmp(param->value, YES);
- pr_debug("IFMarker: %s\n",
- param->value);
- } else if (!strcmp(param->name, OFMARKINT)) {
- ops->OFMarkInt =
- simple_strtoul(param->value, &tmpptr, 0);
- pr_debug("OFMarkInt: %s\n",
- param->value);
- } else if (!strcmp(param->name, IFMARKINT)) {
- ops->IFMarkInt =
- simple_strtoul(param->value, &tmpptr, 0);
- pr_debug("IFMarkInt: %s\n",
- param->value);
} else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
ops->InitiatorRecvDataSegmentLength =
simple_strtoul(param->value, &tmpptr, 0);
@@ -1924,7 +1609,7 @@ void iscsi_set_session_parameters(
param->value);
} else if (!strcmp(param->name, INITIALR2T)) {
ops->InitialR2T = !strcmp(param->value, YES);
- pr_debug("InitialR2T: %s\n",
+ pr_debug("InitialR2T: %s\n",
param->value);
} else if (!strcmp(param->name, IMMEDIATEDATA)) {
ops->ImmediateData = !strcmp(param->value, YES);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index a47046a752aa..c672a971fcb7 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -1,6 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_PARAMETERS_H
#define ISCSI_PARAMETERS_H
+#include <linux/types.h>
#include <scsi/iscsi_proto.h>
struct iscsi_extra_response {
@@ -23,11 +25,13 @@ struct iscsi_param {
struct list_head p_list;
} ____cacheline_aligned;
-extern int iscsi_login_rx_data(struct iscsi_conn *, char *, int);
-extern int iscsi_login_tx_data(struct iscsi_conn *, char *, char *, int);
-extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
-extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *);
-extern void iscsi_print_params(struct iscsi_param_list *);
+struct iscsit_conn;
+struct iscsi_conn_ops;
+struct iscsi_param_list;
+struct iscsi_sess_ops;
+
+extern int iscsi_login_rx_data(struct iscsit_conn *, char *, int);
+extern int iscsi_login_tx_data(struct iscsit_conn *, char *, char *, int);
extern int iscsi_create_default_params(struct iscsi_param_list **);
extern int iscsi_set_keys_to_negotiate(struct iscsi_param_list *, bool);
extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *);
@@ -38,9 +42,9 @@ extern void iscsi_release_param_list(struct iscsi_param_list *);
extern struct iscsi_param *iscsi_find_param_from_key(char *, struct iscsi_param_list *);
extern int iscsi_extract_key_value(char *, char **, char **);
extern int iscsi_update_param_value(struct iscsi_param *, char *);
-extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *);
+extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsit_conn *);
extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
- struct iscsi_param_list *);
+ struct iscsi_param_list *, bool);
extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
struct iscsi_param_list *);
@@ -86,9 +90,6 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
#define OFMARKER "OFMarker"
#define IFMARKINT "IFMarkInt"
#define OFMARKINT "OFMarkInt"
-#define X_EXTENSIONKEY "X-com.sbei.version"
-#define X_EXTENSIONKEY_CISCO_NEW "X-com.cisco.protocol"
-#define X_EXTENSIONKEY_CISCO_OLD "X-com.cisco.iscsi.draft"
/*
* Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046
@@ -138,8 +139,8 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
#define INITIAL_SESSIONTYPE NORMAL
#define INITIAL_IFMARKER NO
#define INITIAL_OFMARKER NO
-#define INITIAL_IFMARKINT "2048~65535"
-#define INITIAL_OFMARKINT "2048~65535"
+#define INITIAL_IFMARKINT REJECT
+#define INITIAL_OFMARKINT REJECT
/*
* Initial values for iSER parameters following RFC-5046 Section 6
@@ -239,10 +240,9 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
#define TYPERANGE_AUTH 0x0200
#define TYPERANGE_DIGEST 0x0400
#define TYPERANGE_ISCSINAME 0x0800
-#define TYPERANGE_MARKINT 0x1000
-#define TYPERANGE_SESSIONTYPE 0x2000
-#define TYPERANGE_TARGETADDRESS 0x4000
-#define TYPERANGE_UTF8 0x8000
+#define TYPERANGE_SESSIONTYPE 0x1000
+#define TYPERANGE_TARGETADDRESS 0x2000
+#define TYPERANGE_UTF8 0x4000
#define IS_TYPERANGE_0_TO_2(p) ((p)->type_range & TYPERANGE_0_TO_2)
#define IS_TYPERANGE_0_TO_3600(p) ((p)->type_range & TYPERANGE_0_TO_3600)
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
index edb592a368ef..66de2b8de463 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -1,36 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains main functions related to iSCSI DataSequenceInOrder=No
* and DataPDUInOrder=No.
*
- \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
******************************************************************************/
#include <linux/slab.h>
#include <linux/random.h>
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_util.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_seq_pdu_list.h"
-#define OFFLOAD_BUF_SIZE 32768
-
#ifdef DEBUG
-static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
+static void iscsit_dump_seq_list(struct iscsit_cmd *cmd)
{
int i;
struct iscsi_seq *seq;
@@ -48,7 +36,7 @@ static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
}
}
-static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
+static void iscsit_dump_pdu_list(struct iscsit_cmd *cmd)
{
int i;
struct iscsi_pdu *pdu;
@@ -64,12 +52,12 @@ static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd)
}
}
#else
-static void iscsit_dump_seq_list(struct iscsi_cmd *cmd) {}
-static void iscsit_dump_pdu_list(struct iscsi_cmd *cmd) {}
+static void iscsit_dump_seq_list(struct iscsit_cmd *cmd) {}
+static void iscsit_dump_pdu_list(struct iscsit_cmd *cmd) {}
#endif
static void iscsit_ordered_seq_lists(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
u8 type)
{
u32 i, seq_count = 0;
@@ -82,7 +70,7 @@ static void iscsit_ordered_seq_lists(
}
static void iscsit_ordered_pdu_lists(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
u8 type)
{
u32 i, pdu_send_order = 0, seq_no = 0;
@@ -129,7 +117,7 @@ redo:
}
static int iscsit_randomize_pdu_lists(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
u8 type)
{
int i = 0;
@@ -179,7 +167,7 @@ redo:
}
static int iscsit_randomize_seq_lists(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
u8 type)
{
int i, j = 0;
@@ -211,7 +199,7 @@ static int iscsit_randomize_seq_lists(
}
static void iscsit_determine_counts_for_list(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
struct iscsi_build_list *bl,
u32 *seq_count,
u32 *pdu_count)
@@ -220,7 +208,7 @@ static void iscsit_determine_counts_for_list(
u32 burstlength = 0, offset = 0;
u32 unsolicited_data_length = 0;
u32 mdsl;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
if (cmd->se_cmd.data_direction == DMA_TO_DEVICE)
mdsl = cmd->conn->conn_ops->MaxXmitDataSegmentLength;
@@ -295,13 +283,13 @@ static void iscsit_determine_counts_for_list(
* or DataPDUInOrder=No.
*/
static int iscsit_do_build_pdu_and_seq_lists(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
struct iscsi_build_list *bl)
{
int check_immediate = 0, datapduinorder, datasequenceinorder;
u32 burstlength = 0, offset = 0, i = 0, mdsl;
u32 pdu_count = 0, seq_no = 0, unsolicited_data_length = 0;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_pdu *pdu = cmd->pdu_list;
struct iscsi_seq *seq = cmd->seq_list;
@@ -496,16 +484,16 @@ static int iscsit_do_build_pdu_and_seq_lists(
}
int iscsit_build_pdu_and_seq_lists(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
u32 immediate_data_length)
{
struct iscsi_build_list bl;
u32 pdu_count = 0, seq_count = 1;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_pdu *pdu = NULL;
struct iscsi_seq *seq = NULL;
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_session *sess = conn->sess;
struct iscsi_node_attrib *na;
/*
@@ -571,7 +559,7 @@ int iscsit_build_pdu_and_seq_lists(
}
struct iscsi_pdu *iscsit_get_pdu_holder(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
u32 offset,
u32 length)
{
@@ -579,7 +567,7 @@ struct iscsi_pdu *iscsit_get_pdu_holder(
struct iscsi_pdu *pdu = NULL;
if (!cmd->pdu_list) {
- pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
+ pr_err("struct iscsit_cmd->pdu_list is NULL!\n");
return NULL;
}
@@ -595,15 +583,15 @@ struct iscsi_pdu *iscsit_get_pdu_holder(
}
struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
struct iscsi_seq *seq)
{
u32 i;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_pdu *pdu = NULL;
if (!cmd->pdu_list) {
- pr_err("struct iscsi_cmd->pdu_list is NULL!\n");
+ pr_err("struct iscsit_cmd->pdu_list is NULL!\n");
return NULL;
}
@@ -672,14 +660,14 @@ redo:
}
struct iscsi_seq *iscsit_get_seq_holder(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
u32 offset,
u32 length)
{
u32 i;
if (!cmd->seq_list) {
- pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+ pr_err("struct iscsit_cmd->seq_list is NULL!\n");
return NULL;
}
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
index d5b153751a8d..288298f9f1b4 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.h
@@ -1,6 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_SEQ_AND_PDU_LIST_H
#define ISCSI_SEQ_AND_PDU_LIST_H
+#include <linux/types.h>
+#include <linux/cache.h>
+
/* struct iscsi_pdu->status */
#define DATAOUT_PDU_SENT 1
@@ -78,9 +82,11 @@ struct iscsi_seq {
u32 xfer_len;
} ____cacheline_aligned;
-extern int iscsit_build_pdu_and_seq_lists(struct iscsi_cmd *, u32);
-extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsi_cmd *, u32, u32);
-extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsi_cmd *, struct iscsi_seq *);
-extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsi_cmd *, u32, u32);
+struct iscsit_cmd;
+
+extern int iscsit_build_pdu_and_seq_lists(struct iscsit_cmd *, u32);
+extern struct iscsi_pdu *iscsit_get_pdu_holder(struct iscsit_cmd *, u32, u32);
+extern struct iscsi_pdu *iscsit_get_pdu_holder_for_seq(struct iscsit_cmd *, struct iscsi_seq *);
+extern struct iscsi_seq *iscsit_get_seq_holder(struct iscsit_cmd *, u32, u32);
#endif /* ISCSI_SEQ_AND_PDU_LIST_H */
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index 464b4206a51e..367c6468b8e1 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -1,36 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Modern ConfigFS group context specific iSCSI statistics based on original
* iscsi_target_mib.c code
*
- * Copyright (c) 2011 Rising Tide Systems
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * Copyright (c) 2011-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
******************************************************************************/
#include <linux/configfs.h>
#include <linux/export.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/configfs_macros.h>
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h"
#include "iscsi_target_device.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
-#include "iscsi_target_stat.h"
+#include <target/iscsi/iscsi_target_stat.h>
#ifndef INITIAL_JIFFIES
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
@@ -39,7 +28,6 @@
/* Instance Attributes Table */
#define ISCSI_INST_NUM_NODES 1
#define ISCSI_INST_DESCR "Storage Engine Target"
-#define ISCSI_INST_LAST_FAILURE_TYPE 0
#define ISCSI_DISCONTINUITY_TIME 0
#define ISCSI_NODE_INDEX 1
@@ -52,76 +40,56 @@
/*
* Instance Attributes Table
*/
-CONFIGFS_EATTR_STRUCT(iscsi_stat_instance, iscsi_wwn_stat_grps);
-#define ISCSI_STAT_INSTANCE_ATTR(_name, _mode) \
-static struct iscsi_stat_instance_attribute \
- iscsi_stat_instance_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- iscsi_stat_instance_show_attr_##_name, \
- iscsi_stat_instance_store_attr_##_name);
-
-#define ISCSI_STAT_INSTANCE_ATTR_RO(_name) \
-static struct iscsi_stat_instance_attribute \
- iscsi_stat_instance_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- iscsi_stat_instance_show_attr_##_name);
-
-static ssize_t iscsi_stat_instance_show_attr_inst(
- struct iscsi_wwn_stat_grps *igrps, char *page)
-{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+static struct iscsi_tiqn *iscsi_instance_tiqn(struct config_item *item)
+{
+ struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item),
+ struct iscsi_wwn_stat_grps, iscsi_instance_group);
+ return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps);
+}
- return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+static ssize_t iscsi_stat_instance_inst_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n",
+ iscsi_instance_tiqn(item)->tiqn_index);
}
-ISCSI_STAT_INSTANCE_ATTR_RO(inst);
-static ssize_t iscsi_stat_instance_show_attr_min_ver(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_instance_min_ver_show(struct config_item *item,
+ char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
}
-ISCSI_STAT_INSTANCE_ATTR_RO(min_ver);
-static ssize_t iscsi_stat_instance_show_attr_max_ver(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_instance_max_ver_show(struct config_item *item,
+ char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION);
}
-ISCSI_STAT_INSTANCE_ATTR_RO(max_ver);
-static ssize_t iscsi_stat_instance_show_attr_portals(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_instance_portals_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
-
- return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_num_tpg_nps);
+ return snprintf(page, PAGE_SIZE, "%u\n",
+ iscsi_instance_tiqn(item)->tiqn_num_tpg_nps);
}
-ISCSI_STAT_INSTANCE_ATTR_RO(portals);
-static ssize_t iscsi_stat_instance_show_attr_nodes(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_instance_nodes_show(struct config_item *item,
+ char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES);
}
-ISCSI_STAT_INSTANCE_ATTR_RO(nodes);
-static ssize_t iscsi_stat_instance_show_attr_sessions(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_instance_sessions_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
-
- return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_nsessions);
+ return snprintf(page, PAGE_SIZE, "%u\n",
+ iscsi_instance_tiqn(item)->tiqn_nsessions);
}
-ISCSI_STAT_INSTANCE_ATTR_RO(sessions);
-static ssize_t iscsi_stat_instance_show_attr_fail_sess(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_instance_fail_sess_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_instance_tiqn(item);
struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
u32 sess_err_count;
@@ -133,88 +101,84 @@ static ssize_t iscsi_stat_instance_show_attr_fail_sess(
return snprintf(page, PAGE_SIZE, "%u\n", sess_err_count);
}
-ISCSI_STAT_INSTANCE_ATTR_RO(fail_sess);
-static ssize_t iscsi_stat_instance_show_attr_fail_type(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_instance_fail_type_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_instance_tiqn(item);
struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
return snprintf(page, PAGE_SIZE, "%u\n",
sess_err->last_sess_failure_type);
}
-ISCSI_STAT_INSTANCE_ATTR_RO(fail_type);
-static ssize_t iscsi_stat_instance_show_attr_fail_rem_name(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_instance_fail_rem_name_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_instance_tiqn(item);
struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
return snprintf(page, PAGE_SIZE, "%s\n",
sess_err->last_sess_fail_rem_name[0] ?
sess_err->last_sess_fail_rem_name : NONE);
}
-ISCSI_STAT_INSTANCE_ATTR_RO(fail_rem_name);
-static ssize_t iscsi_stat_instance_show_attr_disc_time(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_instance_disc_time_show(struct config_item *item,
+ char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DISCONTINUITY_TIME);
}
-ISCSI_STAT_INSTANCE_ATTR_RO(disc_time);
-static ssize_t iscsi_stat_instance_show_attr_description(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_instance_description_show(struct config_item *item,
+ char *page)
{
return snprintf(page, PAGE_SIZE, "%s\n", ISCSI_INST_DESCR);
}
-ISCSI_STAT_INSTANCE_ATTR_RO(description);
-static ssize_t iscsi_stat_instance_show_attr_vendor(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_instance_vendor_show(struct config_item *item,
+ char *page)
{
- return snprintf(page, PAGE_SIZE, "RisingTide Systems iSCSI-Target\n");
+ return snprintf(page, PAGE_SIZE, "Datera, Inc. iSCSI-Target\n");
}
-ISCSI_STAT_INSTANCE_ATTR_RO(vendor);
-static ssize_t iscsi_stat_instance_show_attr_version(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_instance_version_show(struct config_item *item,
+ char *page)
{
return snprintf(page, PAGE_SIZE, "%s\n", ISCSIT_VERSION);
}
-ISCSI_STAT_INSTANCE_ATTR_RO(version);
-CONFIGFS_EATTR_OPS(iscsi_stat_instance, iscsi_wwn_stat_grps,
- iscsi_instance_group);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, inst);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, min_ver);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, max_ver);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, portals);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, nodes);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, sessions);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, fail_sess);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, fail_type);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, fail_rem_name);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, disc_time);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, description);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, vendor);
+CONFIGFS_ATTR_RO(iscsi_stat_instance_, version);
static struct configfs_attribute *iscsi_stat_instance_attrs[] = {
- &iscsi_stat_instance_inst.attr,
- &iscsi_stat_instance_min_ver.attr,
- &iscsi_stat_instance_max_ver.attr,
- &iscsi_stat_instance_portals.attr,
- &iscsi_stat_instance_nodes.attr,
- &iscsi_stat_instance_sessions.attr,
- &iscsi_stat_instance_fail_sess.attr,
- &iscsi_stat_instance_fail_type.attr,
- &iscsi_stat_instance_fail_rem_name.attr,
- &iscsi_stat_instance_disc_time.attr,
- &iscsi_stat_instance_description.attr,
- &iscsi_stat_instance_vendor.attr,
- &iscsi_stat_instance_version.attr,
+ &iscsi_stat_instance_attr_inst,
+ &iscsi_stat_instance_attr_min_ver,
+ &iscsi_stat_instance_attr_max_ver,
+ &iscsi_stat_instance_attr_portals,
+ &iscsi_stat_instance_attr_nodes,
+ &iscsi_stat_instance_attr_sessions,
+ &iscsi_stat_instance_attr_fail_sess,
+ &iscsi_stat_instance_attr_fail_type,
+ &iscsi_stat_instance_attr_fail_rem_name,
+ &iscsi_stat_instance_attr_disc_time,
+ &iscsi_stat_instance_attr_description,
+ &iscsi_stat_instance_attr_vendor,
+ &iscsi_stat_instance_attr_version,
NULL,
};
-static struct configfs_item_operations iscsi_stat_instance_item_ops = {
- .show_attribute = iscsi_stat_instance_attr_show,
- .store_attribute = iscsi_stat_instance_attr_store,
-};
-
-struct config_item_type iscsi_stat_instance_cit = {
- .ct_item_ops = &iscsi_stat_instance_item_ops,
+const struct config_item_type iscsi_stat_instance_cit = {
.ct_attrs = iscsi_stat_instance_attrs,
.ct_owner = THIS_MODULE,
};
@@ -222,81 +186,61 @@ struct config_item_type iscsi_stat_instance_cit = {
/*
* Instance Session Failure Stats Table
*/
-CONFIGFS_EATTR_STRUCT(iscsi_stat_sess_err, iscsi_wwn_stat_grps);
-#define ISCSI_STAT_SESS_ERR_ATTR(_name, _mode) \
-static struct iscsi_stat_sess_err_attribute \
- iscsi_stat_sess_err_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- iscsi_stat_sess_err_show_attr_##_name, \
- iscsi_stat_sess_err_store_attr_##_name);
-
-#define ISCSI_STAT_SESS_ERR_ATTR_RO(_name) \
-static struct iscsi_stat_sess_err_attribute \
- iscsi_stat_sess_err_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- iscsi_stat_sess_err_show_attr_##_name);
-
-static ssize_t iscsi_stat_sess_err_show_attr_inst(
- struct iscsi_wwn_stat_grps *igrps, char *page)
-{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+static struct iscsi_tiqn *iscsi_sess_err_tiqn(struct config_item *item)
+{
+ struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item),
+ struct iscsi_wwn_stat_grps, iscsi_sess_err_group);
+ return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps);
+}
- return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+static ssize_t iscsi_stat_sess_err_inst_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n",
+ iscsi_sess_err_tiqn(item)->tiqn_index);
}
-ISCSI_STAT_SESS_ERR_ATTR_RO(inst);
-static ssize_t iscsi_stat_sess_err_show_attr_digest_errors(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_sess_err_digest_errors_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_sess_err_tiqn(item);
struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
return snprintf(page, PAGE_SIZE, "%u\n", sess_err->digest_errors);
}
-ISCSI_STAT_SESS_ERR_ATTR_RO(digest_errors);
-static ssize_t iscsi_stat_sess_err_show_attr_cxn_errors(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_sess_err_cxn_errors_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_sess_err_tiqn(item);
struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
return snprintf(page, PAGE_SIZE, "%u\n", sess_err->cxn_timeout_errors);
}
-ISCSI_STAT_SESS_ERR_ATTR_RO(cxn_errors);
-static ssize_t iscsi_stat_sess_err_show_attr_format_errors(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_sess_err_format_errors_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_sess_err_tiqn(item);
struct iscsi_sess_err_stats *sess_err = &tiqn->sess_err_stats;
return snprintf(page, PAGE_SIZE, "%u\n", sess_err->pdu_format_errors);
}
-ISCSI_STAT_SESS_ERR_ATTR_RO(format_errors);
-CONFIGFS_EATTR_OPS(iscsi_stat_sess_err, iscsi_wwn_stat_grps,
- iscsi_sess_err_group);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_err_, inst);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_err_, digest_errors);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_err_, cxn_errors);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_err_, format_errors);
static struct configfs_attribute *iscsi_stat_sess_err_attrs[] = {
- &iscsi_stat_sess_err_inst.attr,
- &iscsi_stat_sess_err_digest_errors.attr,
- &iscsi_stat_sess_err_cxn_errors.attr,
- &iscsi_stat_sess_err_format_errors.attr,
+ &iscsi_stat_sess_err_attr_inst,
+ &iscsi_stat_sess_err_attr_digest_errors,
+ &iscsi_stat_sess_err_attr_cxn_errors,
+ &iscsi_stat_sess_err_attr_format_errors,
NULL,
};
-static struct configfs_item_operations iscsi_stat_sess_err_item_ops = {
- .show_attribute = iscsi_stat_sess_err_attr_show,
- .store_attribute = iscsi_stat_sess_err_attr_store,
-};
-
-struct config_item_type iscsi_stat_sess_err_cit = {
- .ct_item_ops = &iscsi_stat_sess_err_item_ops,
+const struct config_item_type iscsi_stat_sess_err_cit = {
.ct_attrs = iscsi_stat_sess_err_attrs,
.ct_owner = THIS_MODULE,
};
@@ -304,42 +248,30 @@ struct config_item_type iscsi_stat_sess_err_cit = {
/*
* Target Attributes Table
*/
-CONFIGFS_EATTR_STRUCT(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps);
-#define ISCSI_STAT_TGT_ATTR(_name, _mode) \
-static struct iscsi_stat_tgt_attr_attribute \
- iscsi_stat_tgt_attr_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- iscsi_stat_tgt-attr_show_attr_##_name, \
- iscsi_stat_tgt_attr_store_attr_##_name);
-
-#define ISCSI_STAT_TGT_ATTR_RO(_name) \
-static struct iscsi_stat_tgt_attr_attribute \
- iscsi_stat_tgt_attr_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- iscsi_stat_tgt_attr_show_attr_##_name);
-
-static ssize_t iscsi_stat_tgt_attr_show_attr_inst(
- struct iscsi_wwn_stat_grps *igrps, char *page)
-{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+static struct iscsi_tiqn *iscsi_tgt_attr_tiqn(struct config_item *item)
+{
+ struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item),
+ struct iscsi_wwn_stat_grps, iscsi_tgt_attr_group);
+ return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps);
+}
- return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+static ssize_t iscsi_stat_tgt_attr_inst_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n",
+ iscsi_tgt_attr_tiqn(item)->tiqn_index);
}
-ISCSI_STAT_TGT_ATTR_RO(inst);
-static ssize_t iscsi_stat_tgt_attr_show_attr_indx(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_tgt_attr_indx_show(struct config_item *item,
+ char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
}
-ISCSI_STAT_TGT_ATTR_RO(indx);
-static ssize_t iscsi_stat_tgt_attr_show_attr_login_fails(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_tgt_attr_login_fails_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
u32 fail_count;
@@ -351,13 +283,11 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_login_fails(
return snprintf(page, PAGE_SIZE, "%u\n", fail_count);
}
-ISCSI_STAT_TGT_ATTR_RO(login_fails);
-static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_time(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_tgt_attr_last_fail_time_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
u32 last_fail_time;
@@ -369,13 +299,11 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_time(
return snprintf(page, PAGE_SIZE, "%u\n", last_fail_time);
}
-ISCSI_STAT_TGT_ATTR_RO(last_fail_time);
-static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_type(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_tgt_attr_last_fail_type_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
u32 last_fail_type;
@@ -385,30 +313,26 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_last_fail_type(
return snprintf(page, PAGE_SIZE, "%u\n", last_fail_type);
}
-ISCSI_STAT_TGT_ATTR_RO(last_fail_type);
-static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_name(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_tgt_attr_fail_intr_name_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
- unsigned char buf[224];
+ unsigned char buf[ISCSI_IQN_LEN];
spin_lock(&lstat->lock);
- snprintf(buf, 224, "%s", lstat->last_intr_fail_name[0] ?
+ snprintf(buf, ISCSI_IQN_LEN, "%s", lstat->last_intr_fail_name[0] ?
lstat->last_intr_fail_name : NONE);
spin_unlock(&lstat->lock);
return snprintf(page, PAGE_SIZE, "%s\n", buf);
}
-ISCSI_STAT_TGT_ATTR_RO(fail_intr_name);
-static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_tgt_attr_fail_intr_addr_type_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
int ret;
@@ -421,52 +345,43 @@ static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr_type(
return ret;
}
-ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr_type);
-static ssize_t iscsi_stat_tgt_attr_show_attr_fail_intr_addr(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_tgt_attr_fail_intr_addr_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
int ret;
spin_lock(&lstat->lock);
- if (lstat->last_intr_fail_ip_family == AF_INET6) {
- ret = snprintf(page, PAGE_SIZE, "[%s]\n",
- lstat->last_intr_fail_ip_addr);
- } else {
- ret = snprintf(page, PAGE_SIZE, "%s\n",
- lstat->last_intr_fail_ip_addr);
- }
+ ret = snprintf(page, PAGE_SIZE, "%pISc\n", &lstat->last_intr_fail_sockaddr);
spin_unlock(&lstat->lock);
return ret;
}
-ISCSI_STAT_TGT_ATTR_RO(fail_intr_addr);
-CONFIGFS_EATTR_OPS(iscsi_stat_tgt_attr, iscsi_wwn_stat_grps,
- iscsi_tgt_attr_group);
+CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, inst);
+CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, indx);
+CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, login_fails);
+CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, last_fail_time);
+CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, last_fail_type);
+CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, fail_intr_name);
+CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, fail_intr_addr_type);
+CONFIGFS_ATTR_RO(iscsi_stat_tgt_attr_, fail_intr_addr);
static struct configfs_attribute *iscsi_stat_tgt_attr_attrs[] = {
- &iscsi_stat_tgt_attr_inst.attr,
- &iscsi_stat_tgt_attr_indx.attr,
- &iscsi_stat_tgt_attr_login_fails.attr,
- &iscsi_stat_tgt_attr_last_fail_time.attr,
- &iscsi_stat_tgt_attr_last_fail_type.attr,
- &iscsi_stat_tgt_attr_fail_intr_name.attr,
- &iscsi_stat_tgt_attr_fail_intr_addr_type.attr,
- &iscsi_stat_tgt_attr_fail_intr_addr.attr,
+ &iscsi_stat_tgt_attr_attr_inst,
+ &iscsi_stat_tgt_attr_attr_indx,
+ &iscsi_stat_tgt_attr_attr_login_fails,
+ &iscsi_stat_tgt_attr_attr_last_fail_time,
+ &iscsi_stat_tgt_attr_attr_last_fail_type,
+ &iscsi_stat_tgt_attr_attr_fail_intr_name,
+ &iscsi_stat_tgt_attr_attr_fail_intr_addr_type,
+ &iscsi_stat_tgt_attr_attr_fail_intr_addr,
NULL,
};
-static struct configfs_item_operations iscsi_stat_tgt_attr_item_ops = {
- .show_attribute = iscsi_stat_tgt_attr_attr_show,
- .store_attribute = iscsi_stat_tgt_attr_attr_store,
-};
-
-struct config_item_type iscsi_stat_tgt_attr_cit = {
- .ct_item_ops = &iscsi_stat_tgt_attr_item_ops,
+const struct config_item_type iscsi_stat_tgt_attr_cit = {
.ct_attrs = iscsi_stat_tgt_attr_attrs,
.ct_owner = THIS_MODULE,
};
@@ -474,42 +389,29 @@ struct config_item_type iscsi_stat_tgt_attr_cit = {
/*
* Target Login Stats Table
*/
-CONFIGFS_EATTR_STRUCT(iscsi_stat_login, iscsi_wwn_stat_grps);
-#define ISCSI_STAT_LOGIN(_name, _mode) \
-static struct iscsi_stat_login_attribute \
- iscsi_stat_login_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- iscsi_stat_login_show_attr_##_name, \
- iscsi_stat_login_store_attr_##_name);
-
-#define ISCSI_STAT_LOGIN_RO(_name) \
-static struct iscsi_stat_login_attribute \
- iscsi_stat_login_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- iscsi_stat_login_show_attr_##_name);
-
-static ssize_t iscsi_stat_login_show_attr_inst(
- struct iscsi_wwn_stat_grps *igrps, char *page)
-{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+static struct iscsi_tiqn *iscsi_login_stat_tiqn(struct config_item *item)
+{
+ struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item),
+ struct iscsi_wwn_stat_grps, iscsi_login_stats_group);
+ return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps);
+}
- return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+static ssize_t iscsi_stat_login_inst_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n",
+ iscsi_login_stat_tiqn(item)->tiqn_index);
}
-ISCSI_STAT_LOGIN_RO(inst);
-static ssize_t iscsi_stat_login_show_attr_indx(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_login_indx_show(struct config_item *item,
+ char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
}
-ISCSI_STAT_LOGIN_RO(indx);
-static ssize_t iscsi_stat_login_show_attr_accepts(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_login_accepts_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
ssize_t ret;
@@ -519,13 +421,11 @@ static ssize_t iscsi_stat_login_show_attr_accepts(
return ret;
}
-ISCSI_STAT_LOGIN_RO(accepts);
-static ssize_t iscsi_stat_login_show_attr_other_fails(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_login_other_fails_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
ssize_t ret;
@@ -535,13 +435,11 @@ static ssize_t iscsi_stat_login_show_attr_other_fails(
return ret;
}
-ISCSI_STAT_LOGIN_RO(other_fails);
-static ssize_t iscsi_stat_login_show_attr_redirects(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_login_redirects_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
ssize_t ret;
@@ -551,13 +449,11 @@ static ssize_t iscsi_stat_login_show_attr_redirects(
return ret;
}
-ISCSI_STAT_LOGIN_RO(redirects);
-static ssize_t iscsi_stat_login_show_attr_authorize_fails(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_login_authorize_fails_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
ssize_t ret;
@@ -567,13 +463,11 @@ static ssize_t iscsi_stat_login_show_attr_authorize_fails(
return ret;
}
-ISCSI_STAT_LOGIN_RO(authorize_fails);
-static ssize_t iscsi_stat_login_show_attr_authenticate_fails(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_login_authenticate_fails_show(
+ struct config_item *item, char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
ssize_t ret;
@@ -583,13 +477,11 @@ static ssize_t iscsi_stat_login_show_attr_authenticate_fails(
return ret;
}
-ISCSI_STAT_LOGIN_RO(authenticate_fails);
-static ssize_t iscsi_stat_login_show_attr_negotiate_fails(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_login_negotiate_fails_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_login_stat_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
ssize_t ret;
@@ -599,30 +491,29 @@ static ssize_t iscsi_stat_login_show_attr_negotiate_fails(
return ret;
}
-ISCSI_STAT_LOGIN_RO(negotiate_fails);
-CONFIGFS_EATTR_OPS(iscsi_stat_login, iscsi_wwn_stat_grps,
- iscsi_login_stats_group);
+CONFIGFS_ATTR_RO(iscsi_stat_login_, inst);
+CONFIGFS_ATTR_RO(iscsi_stat_login_, indx);
+CONFIGFS_ATTR_RO(iscsi_stat_login_, accepts);
+CONFIGFS_ATTR_RO(iscsi_stat_login_, other_fails);
+CONFIGFS_ATTR_RO(iscsi_stat_login_, redirects);
+CONFIGFS_ATTR_RO(iscsi_stat_login_, authorize_fails);
+CONFIGFS_ATTR_RO(iscsi_stat_login_, authenticate_fails);
+CONFIGFS_ATTR_RO(iscsi_stat_login_, negotiate_fails);
static struct configfs_attribute *iscsi_stat_login_stats_attrs[] = {
- &iscsi_stat_login_inst.attr,
- &iscsi_stat_login_indx.attr,
- &iscsi_stat_login_accepts.attr,
- &iscsi_stat_login_other_fails.attr,
- &iscsi_stat_login_redirects.attr,
- &iscsi_stat_login_authorize_fails.attr,
- &iscsi_stat_login_authenticate_fails.attr,
- &iscsi_stat_login_negotiate_fails.attr,
+ &iscsi_stat_login_attr_inst,
+ &iscsi_stat_login_attr_indx,
+ &iscsi_stat_login_attr_accepts,
+ &iscsi_stat_login_attr_other_fails,
+ &iscsi_stat_login_attr_redirects,
+ &iscsi_stat_login_attr_authorize_fails,
+ &iscsi_stat_login_attr_authenticate_fails,
+ &iscsi_stat_login_attr_negotiate_fails,
NULL,
};
-static struct configfs_item_operations iscsi_stat_login_stats_item_ops = {
- .show_attribute = iscsi_stat_login_attr_show,
- .store_attribute = iscsi_stat_login_attr_store,
-};
-
-struct config_item_type iscsi_stat_login_cit = {
- .ct_item_ops = &iscsi_stat_login_stats_item_ops,
+const struct config_item_type iscsi_stat_login_cit = {
.ct_attrs = iscsi_stat_login_stats_attrs,
.ct_owner = THIS_MODULE,
};
@@ -630,78 +521,56 @@ struct config_item_type iscsi_stat_login_cit = {
/*
* Target Logout Stats Table
*/
-
-CONFIGFS_EATTR_STRUCT(iscsi_stat_logout, iscsi_wwn_stat_grps);
-#define ISCSI_STAT_LOGOUT(_name, _mode) \
-static struct iscsi_stat_logout_attribute \
- iscsi_stat_logout_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- iscsi_stat_logout_show_attr_##_name, \
- iscsi_stat_logout_store_attr_##_name);
-
-#define ISCSI_STAT_LOGOUT_RO(_name) \
-static struct iscsi_stat_logout_attribute \
- iscsi_stat_logout_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- iscsi_stat_logout_show_attr_##_name);
-
-static ssize_t iscsi_stat_logout_show_attr_inst(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static struct iscsi_tiqn *iscsi_logout_stat_tiqn(struct config_item *item)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_wwn_stat_grps *igrps = container_of(to_config_group(item),
+ struct iscsi_wwn_stat_grps, iscsi_logout_stats_group);
+ return container_of(igrps, struct iscsi_tiqn, tiqn_stat_grps);
+}
- return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
+static ssize_t iscsi_stat_logout_inst_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n",
+ iscsi_logout_stat_tiqn(item)->tiqn_index);
}
-ISCSI_STAT_LOGOUT_RO(inst);
-static ssize_t iscsi_stat_logout_show_attr_indx(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_logout_indx_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_NODE_INDEX);
}
-ISCSI_STAT_LOGOUT_RO(indx);
-static ssize_t iscsi_stat_logout_show_attr_normal_logouts(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_logout_normal_logouts_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_logout_stat_tiqn(item);
struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
return snprintf(page, PAGE_SIZE, "%u\n", lstats->normal_logouts);
}
-ISCSI_STAT_LOGOUT_RO(normal_logouts);
-static ssize_t iscsi_stat_logout_show_attr_abnormal_logouts(
- struct iscsi_wwn_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_logout_abnormal_logouts_show(struct config_item *item,
+ char *page)
{
- struct iscsi_tiqn *tiqn = container_of(igrps,
- struct iscsi_tiqn, tiqn_stat_grps);
+ struct iscsi_tiqn *tiqn = iscsi_logout_stat_tiqn(item);
struct iscsi_logout_stats *lstats = &tiqn->logout_stats;
return snprintf(page, PAGE_SIZE, "%u\n", lstats->abnormal_logouts);
}
-ISCSI_STAT_LOGOUT_RO(abnormal_logouts);
-CONFIGFS_EATTR_OPS(iscsi_stat_logout, iscsi_wwn_stat_grps,
- iscsi_logout_stats_group);
+CONFIGFS_ATTR_RO(iscsi_stat_logout_, inst);
+CONFIGFS_ATTR_RO(iscsi_stat_logout_, indx);
+CONFIGFS_ATTR_RO(iscsi_stat_logout_, normal_logouts);
+CONFIGFS_ATTR_RO(iscsi_stat_logout_, abnormal_logouts);
static struct configfs_attribute *iscsi_stat_logout_stats_attrs[] = {
- &iscsi_stat_logout_inst.attr,
- &iscsi_stat_logout_indx.attr,
- &iscsi_stat_logout_normal_logouts.attr,
- &iscsi_stat_logout_abnormal_logouts.attr,
+ &iscsi_stat_logout_attr_inst,
+ &iscsi_stat_logout_attr_indx,
+ &iscsi_stat_logout_attr_normal_logouts,
+ &iscsi_stat_logout_attr_abnormal_logouts,
NULL,
};
-static struct configfs_item_operations iscsi_stat_logout_stats_item_ops = {
- .show_attribute = iscsi_stat_logout_attr_show,
- .store_attribute = iscsi_stat_logout_attr_store,
-};
-
-struct config_item_type iscsi_stat_logout_cit = {
- .ct_item_ops = &iscsi_stat_logout_stats_item_ops,
+const struct config_item_type iscsi_stat_logout_cit = {
.ct_attrs = iscsi_stat_logout_stats_attrs,
.ct_owner = THIS_MODULE,
};
@@ -709,41 +578,28 @@ struct config_item_type iscsi_stat_logout_cit = {
/*
* Session Stats Table
*/
+static struct iscsi_node_acl *iscsi_stat_nacl(struct config_item *item)
+{
+ struct iscsi_node_stat_grps *igrps = container_of(to_config_group(item),
+ struct iscsi_node_stat_grps, iscsi_sess_stats_group);
+ return container_of(igrps, struct iscsi_node_acl, node_stat_grps);
+}
-CONFIGFS_EATTR_STRUCT(iscsi_stat_sess, iscsi_node_stat_grps);
-#define ISCSI_STAT_SESS(_name, _mode) \
-static struct iscsi_stat_sess_attribute \
- iscsi_stat_sess_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- iscsi_stat_sess_show_attr_##_name, \
- iscsi_stat_sess_store_attr_##_name);
-
-#define ISCSI_STAT_SESS_RO(_name) \
-static struct iscsi_stat_sess_attribute \
- iscsi_stat_sess_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- iscsi_stat_sess_show_attr_##_name);
-
-static ssize_t iscsi_stat_sess_show_attr_inst(
- struct iscsi_node_stat_grps *igrps, char *page)
-{
- struct iscsi_node_acl *acl = container_of(igrps,
- struct iscsi_node_acl, node_stat_grps);
+static ssize_t iscsi_stat_sess_inst_show(struct config_item *item, char *page)
+{
+ struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
struct se_wwn *wwn = acl->se_node_acl.se_tpg->se_tpg_wwn;
struct iscsi_tiqn *tiqn = container_of(wwn,
struct iscsi_tiqn, tiqn_wwn);
return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index);
}
-ISCSI_STAT_SESS_RO(inst);
-static ssize_t iscsi_stat_sess_show_attr_node(
- struct iscsi_node_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_sess_node_show(struct config_item *item, char *page)
{
- struct iscsi_node_acl *acl = container_of(igrps,
- struct iscsi_node_acl, node_stat_grps);
+ struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
struct se_node_acl *se_nacl = &acl->se_node_acl;
- struct iscsi_session *sess;
+ struct iscsit_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
@@ -759,15 +615,12 @@ static ssize_t iscsi_stat_sess_show_attr_node(
return ret;
}
-ISCSI_STAT_SESS_RO(node);
-static ssize_t iscsi_stat_sess_show_attr_indx(
- struct iscsi_node_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_sess_indx_show(struct config_item *item, char *page)
{
- struct iscsi_node_acl *acl = container_of(igrps,
- struct iscsi_node_acl, node_stat_grps);
+ struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
struct se_node_acl *se_nacl = &acl->se_node_acl;
- struct iscsi_session *sess;
+ struct iscsit_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
@@ -783,15 +636,13 @@ static ssize_t iscsi_stat_sess_show_attr_indx(
return ret;
}
-ISCSI_STAT_SESS_RO(indx);
-static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
- struct iscsi_node_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_sess_cmd_pdus_show(struct config_item *item,
+ char *page)
{
- struct iscsi_node_acl *acl = container_of(igrps,
- struct iscsi_node_acl, node_stat_grps);
+ struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
struct se_node_acl *se_nacl = &acl->se_node_acl;
- struct iscsi_session *sess;
+ struct iscsit_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
@@ -800,21 +651,20 @@ static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->cmd_pdus));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
-ISCSI_STAT_SESS_RO(cmd_pdus);
-static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
- struct iscsi_node_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_sess_rsp_pdus_show(struct config_item *item,
+ char *page)
{
- struct iscsi_node_acl *acl = container_of(igrps,
- struct iscsi_node_acl, node_stat_grps);
+ struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
struct se_node_acl *se_nacl = &acl->se_node_acl;
- struct iscsi_session *sess;
+ struct iscsit_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
@@ -823,21 +673,20 @@ static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->rsp_pdus));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
-ISCSI_STAT_SESS_RO(rsp_pdus);
-static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
- struct iscsi_node_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_sess_txdata_octs_show(struct config_item *item,
+ char *page)
{
- struct iscsi_node_acl *acl = container_of(igrps,
- struct iscsi_node_acl, node_stat_grps);
+ struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
struct se_node_acl *se_nacl = &acl->se_node_acl;
- struct iscsi_session *sess;
+ struct iscsit_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
@@ -846,22 +695,20 @@ static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%llu\n",
- (unsigned long long)sess->tx_data_octets);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->tx_data_octets));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
-ISCSI_STAT_SESS_RO(txdata_octs);
-static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
- struct iscsi_node_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_sess_rxdata_octs_show(struct config_item *item,
+ char *page)
{
- struct iscsi_node_acl *acl = container_of(igrps,
- struct iscsi_node_acl, node_stat_grps);
+ struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
struct se_node_acl *se_nacl = &acl->se_node_acl;
- struct iscsi_session *sess;
+ struct iscsit_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
@@ -870,22 +717,20 @@ static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%llu\n",
- (unsigned long long)sess->rx_data_octets);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->rx_data_octets));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
-ISCSI_STAT_SESS_RO(rxdata_octs);
-static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
- struct iscsi_node_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_sess_conn_digest_errors_show(struct config_item *item,
+ char *page)
{
- struct iscsi_node_acl *acl = container_of(igrps,
- struct iscsi_node_acl, node_stat_grps);
+ struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
struct se_node_acl *se_nacl = &acl->se_node_acl;
- struct iscsi_session *sess;
+ struct iscsit_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
@@ -894,22 +739,20 @@ static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- sess->conn_digest_errors);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->conn_digest_errors));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
-ISCSI_STAT_SESS_RO(conn_digest_errors);
-static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
- struct iscsi_node_stat_grps *igrps, char *page)
+static ssize_t iscsi_stat_sess_conn_timeout_errors_show(
+ struct config_item *item, char *page)
{
- struct iscsi_node_acl *acl = container_of(igrps,
- struct iscsi_node_acl, node_stat_grps);
+ struct iscsi_node_acl *acl = iscsi_stat_nacl(item);
struct se_node_acl *se_nacl = &acl->se_node_acl;
- struct iscsi_session *sess;
+ struct iscsit_session *sess;
struct se_session *se_sess;
ssize_t ret = 0;
@@ -918,38 +761,38 @@ static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
if (se_sess) {
sess = se_sess->fabric_sess_ptr;
if (sess)
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- sess->conn_timeout_errors);
+ ret = snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&sess->conn_timeout_errors));
}
spin_unlock_bh(&se_nacl->nacl_sess_lock);
return ret;
}
-ISCSI_STAT_SESS_RO(conn_timeout_errors);
-CONFIGFS_EATTR_OPS(iscsi_stat_sess, iscsi_node_stat_grps,
- iscsi_sess_stats_group);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_, inst);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_, node);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_, indx);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_, cmd_pdus);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_, rsp_pdus);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_, txdata_octs);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_, rxdata_octs);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_, conn_digest_errors);
+CONFIGFS_ATTR_RO(iscsi_stat_sess_, conn_timeout_errors);
static struct configfs_attribute *iscsi_stat_sess_stats_attrs[] = {
- &iscsi_stat_sess_inst.attr,
- &iscsi_stat_sess_node.attr,
- &iscsi_stat_sess_indx.attr,
- &iscsi_stat_sess_cmd_pdus.attr,
- &iscsi_stat_sess_rsp_pdus.attr,
- &iscsi_stat_sess_txdata_octs.attr,
- &iscsi_stat_sess_rxdata_octs.attr,
- &iscsi_stat_sess_conn_digest_errors.attr,
- &iscsi_stat_sess_conn_timeout_errors.attr,
+ &iscsi_stat_sess_attr_inst,
+ &iscsi_stat_sess_attr_node,
+ &iscsi_stat_sess_attr_indx,
+ &iscsi_stat_sess_attr_cmd_pdus,
+ &iscsi_stat_sess_attr_rsp_pdus,
+ &iscsi_stat_sess_attr_txdata_octs,
+ &iscsi_stat_sess_attr_rxdata_octs,
+ &iscsi_stat_sess_attr_conn_digest_errors,
+ &iscsi_stat_sess_attr_conn_timeout_errors,
NULL,
};
-static struct configfs_item_operations iscsi_stat_sess_stats_item_ops = {
- .show_attribute = iscsi_stat_sess_attr_show,
- .store_attribute = iscsi_stat_sess_attr_store,
-};
-
-struct config_item_type iscsi_stat_sess_cit = {
- .ct_item_ops = &iscsi_stat_sess_stats_item_ops,
+const struct config_item_type iscsi_stat_sess_cit = {
.ct_attrs = iscsi_stat_sess_stats_attrs,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/target/iscsi/iscsi_target_stat.h b/drivers/target/iscsi/iscsi_target_stat.h
deleted file mode 100644
index 3ff76b4faad3..000000000000
--- a/drivers/target/iscsi/iscsi_target_stat.h
+++ /dev/null
@@ -1,64 +0,0 @@
-#ifndef ISCSI_TARGET_STAT_H
-#define ISCSI_TARGET_STAT_H
-
-/*
- * For struct iscsi_tiqn->tiqn_wwn default groups
- */
-extern struct config_item_type iscsi_stat_instance_cit;
-extern struct config_item_type iscsi_stat_sess_err_cit;
-extern struct config_item_type iscsi_stat_tgt_attr_cit;
-extern struct config_item_type iscsi_stat_login_cit;
-extern struct config_item_type iscsi_stat_logout_cit;
-
-/*
- * For struct iscsi_session->se_sess default groups
- */
-extern struct config_item_type iscsi_stat_sess_cit;
-
-/* iSCSI session error types */
-#define ISCSI_SESS_ERR_UNKNOWN 0
-#define ISCSI_SESS_ERR_DIGEST 1
-#define ISCSI_SESS_ERR_CXN_TIMEOUT 2
-#define ISCSI_SESS_ERR_PDU_FORMAT 3
-
-/* iSCSI session error stats */
-struct iscsi_sess_err_stats {
- spinlock_t lock;
- u32 digest_errors;
- u32 cxn_timeout_errors;
- u32 pdu_format_errors;
- u32 last_sess_failure_type;
- char last_sess_fail_rem_name[224];
-} ____cacheline_aligned;
-
-/* iSCSI login failure types (sub oids) */
-#define ISCSI_LOGIN_FAIL_OTHER 2
-#define ISCSI_LOGIN_FAIL_REDIRECT 3
-#define ISCSI_LOGIN_FAIL_AUTHORIZE 4
-#define ISCSI_LOGIN_FAIL_AUTHENTICATE 5
-#define ISCSI_LOGIN_FAIL_NEGOTIATE 6
-
-/* iSCSI login stats */
-struct iscsi_login_stats {
- spinlock_t lock;
- u32 accepts;
- u32 other_fails;
- u32 redirects;
- u32 authorize_fails;
- u32 authenticate_fails;
- u32 negotiate_fails; /* used for notifications */
- u64 last_fail_time; /* time stamp (jiffies) */
- u32 last_fail_type;
- int last_intr_fail_ip_family;
- unsigned char last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
- char last_intr_fail_name[224];
-} ____cacheline_aligned;
-
-/* iSCSI logout stats */
-struct iscsi_logout_stats {
- spinlock_t lock;
- u32 normal_logouts;
- u32 abnormal_logouts;
-} ____cacheline_aligned;
-
-#endif /*** ISCSI_TARGET_STAT_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index b997e5da47d3..620de3910599 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -1,31 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains the iSCSI Target specific Task Management functions.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
******************************************************************************/
-#include <asm/unaligned.h>
-#include <scsi/scsi_device.h>
+#include <linux/unaligned.h>
+#include <scsi/scsi_proto.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h>
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_datain_values.h"
#include "iscsi_target_device.h"
@@ -38,11 +28,11 @@
#include "iscsi_target.h"
u8 iscsit_tmr_abort_task(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
unsigned char *buf)
{
- struct iscsi_cmd *ref_cmd;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_cmd *ref_cmd;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
@@ -52,7 +42,7 @@ u8 iscsit_tmr_abort_task(
pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
" %hu.\n", hdr->rtt, conn->cid);
return (iscsi_sna_gte(be32_to_cpu(hdr->refcmdsn), conn->sess->exp_cmd_sn) &&
- iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), conn->sess->max_cmd_sn)) ?
+ iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), (u32) atomic_read(&conn->sess->max_cmd_sn))) ?
ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
}
if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) {
@@ -73,18 +63,18 @@ u8 iscsit_tmr_abort_task(
* Called from iscsit_handle_task_mgt_cmd().
*/
int iscsit_tmr_task_warm_reset(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
struct iscsi_tmr_req *tmr_req,
unsigned char *buf)
{
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
if (!na->tmr_warm_reset) {
pr_err("TMR Opcode TARGET_WARM_RESET authorization"
" failed for Initiator Node: %s\n",
sess->se_sess->se_node_acl->initiatorname);
- return -1;
+ return -1;
}
/*
* Do the real work in transport_generic_do_tmr().
@@ -93,11 +83,11 @@ int iscsit_tmr_task_warm_reset(
}
int iscsit_tmr_task_cold_reset(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
struct iscsi_tmr_req *tmr_req,
unsigned char *buf)
{
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
if (!na->tmr_cold_reset) {
@@ -113,16 +103,17 @@ int iscsit_tmr_task_cold_reset(
}
u8 iscsit_tmr_task_reassign(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
unsigned char *buf)
{
- struct iscsi_cmd *ref_cmd = NULL;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_cmd *ref_cmd = NULL;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_conn_recovery *cr = NULL;
struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
- int ret, ref_lun;
+ u64 ref_lun;
+ int ret;
pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
" RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",
@@ -166,7 +157,7 @@ u8 iscsit_tmr_task_reassign(
ref_lun = scsilun_to_int(&hdr->lun);
if (ref_lun != ref_cmd->se_cmd.orig_fe_lun) {
pr_err("Unable to perform connection recovery for"
- " differing ref_lun: %d ref_cmd orig_fe_lun: %u\n",
+ " differing ref_lun: %llu ref_cmd orig_fe_lun: %llu\n",
ref_lun, ref_cmd->se_cmd.orig_fe_lun);
return ISCSI_TMF_RSP_REJECTED;
}
@@ -185,9 +176,9 @@ u8 iscsit_tmr_task_reassign(
}
static void iscsit_task_reassign_remove_cmd(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
struct iscsi_conn_recovery *cr,
- struct iscsi_session *sess)
+ struct iscsit_session *sess)
{
int ret;
@@ -203,9 +194,9 @@ static void iscsit_task_reassign_remove_cmd(
static int iscsit_task_reassign_complete_nop_out(
struct iscsi_tmr_req *tmr_req,
- struct iscsi_conn *conn)
+ struct iscsit_conn *conn)
{
- struct iscsi_cmd *cmd = tmr_req->ref_cmd;
+ struct iscsit_cmd *cmd = tmr_req->ref_cmd;
struct iscsi_conn_recovery *cr;
if (!cmd->cr) {
@@ -234,12 +225,12 @@ static int iscsit_task_reassign_complete_nop_out(
}
static int iscsit_task_reassign_complete_write(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
struct iscsi_tmr_req *tmr_req)
{
int no_build_r2ts = 0;
u32 length = 0, offset = 0;
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct se_cmd *se_cmd = &cmd->se_cmd;
/*
* The Initiator must not send a R2T SNACK with a Begrun less than
@@ -306,10 +297,10 @@ static int iscsit_task_reassign_complete_write(
}
static int iscsit_task_reassign_complete_read(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
struct iscsi_tmr_req *tmr_req)
{
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
struct iscsi_datain_req *dr;
struct se_cmd *se_cmd = &cmd->se_cmd;
/*
@@ -328,7 +319,7 @@ static int iscsit_task_reassign_complete_read(
pr_debug("READ ITT: 0x%08x: t_state: %d never sent to"
" transport\n", cmd->init_task_tag,
cmd->se_cmd.t_state);
- transport_handle_cdb_direct(se_cmd);
+ target_submit(se_cmd);
return 0;
}
@@ -359,10 +350,10 @@ static int iscsit_task_reassign_complete_read(
}
static int iscsit_task_reassign_complete_none(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
struct iscsi_tmr_req *tmr_req)
{
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
cmd->i_state = ISTATE_SEND_STATUS;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
@@ -371,9 +362,9 @@ static int iscsit_task_reassign_complete_none(
static int iscsit_task_reassign_complete_scsi_cmnd(
struct iscsi_tmr_req *tmr_req,
- struct iscsi_conn *conn)
+ struct iscsit_conn *conn)
{
- struct iscsi_cmd *cmd = tmr_req->ref_cmd;
+ struct iscsit_cmd *cmd = tmr_req->ref_cmd;
struct iscsi_conn_recovery *cr;
if (!cmd->cr) {
@@ -420,13 +411,13 @@ static int iscsit_task_reassign_complete_scsi_cmnd(
static int iscsit_task_reassign_complete(
struct iscsi_tmr_req *tmr_req,
- struct iscsi_conn *conn)
+ struct iscsit_conn *conn)
{
- struct iscsi_cmd *cmd;
+ struct iscsit_cmd *cmd;
int ret = 0;
if (!tmr_req->ref_cmd) {
- pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n");
+ pr_err("TMR Request is missing a RefCmd struct iscsit_cmd.\n");
return -1;
}
cmd = tmr_req->ref_cmd;
@@ -442,14 +433,14 @@ static int iscsit_task_reassign_complete(
break;
default:
pr_err("Illegal iSCSI Opcode 0x%02x during"
- " command realligence\n", cmd->iscsi_opcode);
+ " command reallegiance\n", cmd->iscsi_opcode);
return -1;
}
if (ret != 0)
return ret;
- pr_debug("Completed connection realligence for Opcode: 0x%02x,"
+ pr_debug("Completed connection reallegiance for Opcode: 0x%02x,"
" ITT: 0x%08x to CID: %hu.\n", cmd->iscsi_opcode,
cmd->init_task_tag, conn->cid);
@@ -461,7 +452,7 @@ static int iscsit_task_reassign_complete(
* Right now the only one that its really needed for is
* connection recovery releated TASK_REASSIGN.
*/
-int iscsit_tmr_post_handler(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+int iscsit_tmr_post_handler(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
@@ -479,14 +470,14 @@ EXPORT_SYMBOL(iscsit_tmr_post_handler);
*/
static int iscsit_task_reassign_prepare_read(
struct iscsi_tmr_req *tmr_req,
- struct iscsi_conn *conn)
+ struct iscsit_conn *conn)
{
return 0;
}
static void iscsit_task_reassign_prepare_unsolicited_dataout(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn)
+ struct iscsit_cmd *cmd,
+ struct iscsit_conn *conn)
{
int i, j;
struct iscsi_pdu *pdu = NULL;
@@ -554,9 +545,9 @@ static void iscsit_task_reassign_prepare_unsolicited_dataout(
static int iscsit_task_reassign_prepare_write(
struct iscsi_tmr_req *tmr_req,
- struct iscsi_conn *conn)
+ struct iscsit_conn *conn)
{
- struct iscsi_cmd *cmd = tmr_req->ref_cmd;
+ struct iscsit_cmd *cmd = tmr_req->ref_cmd;
struct iscsi_pdu *pdu = NULL;
struct iscsi_r2t *r2t = NULL, *r2t_tmp;
int first_incomplete_r2t = 1, i = 0;
@@ -585,7 +576,7 @@ static int iscsit_task_reassign_prepare_write(
*
* If we have not received all DataOUT in question, we must
* make sure to make the appropriate changes to values in
- * struct iscsi_cmd (and elsewhere depending on session parameters)
+ * struct iscsit_cmd (and elsewhere depending on session parameters)
* so iscsit_build_r2ts_for_cmd() in iscsit_task_reassign_complete_write()
* will resend a new R2T for the DataOUT sequences in question.
*/
@@ -718,7 +709,7 @@ next:
* to check that the Initiator is not requesting R2Ts for DataOUT
* sequences it has already completed.
*
- * Free each R2T in question and adjust values in struct iscsi_cmd
+ * Free each R2T in question and adjust values in struct iscsit_cmd
* accordingly so iscsit_build_r2ts_for_cmd() do the rest of
* the work after the TMR TASK_REASSIGN Response is sent.
*/
@@ -783,13 +774,13 @@ drop_unacknowledged_r2ts:
/*
* Performs sanity checks TMR TASK_REASSIGN's ExpDataSN for
- * a given struct iscsi_cmd.
+ * a given struct iscsit_cmd.
*/
int iscsit_check_task_reassign_expdatasn(
struct iscsi_tmr_req *tmr_req,
- struct iscsi_conn *conn)
+ struct iscsit_conn *conn)
{
- struct iscsi_cmd *ref_cmd = tmr_req->ref_cmd;
+ struct iscsit_cmd *ref_cmd = tmr_req->ref_cmd;
if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD)
return 0;
diff --git a/drivers/target/iscsi/iscsi_target_tmr.h b/drivers/target/iscsi/iscsi_target_tmr.h
index 142e992cb097..3413d0f596c8 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.h
+++ b/drivers/target/iscsi/iscsi_target_tmr.h
@@ -1,14 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_TMR_H
#define ISCSI_TARGET_TMR_H
-extern u8 iscsit_tmr_abort_task(struct iscsi_cmd *, unsigned char *);
-extern int iscsit_tmr_task_warm_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
+#include <linux/types.h>
+
+struct iscsit_cmd;
+struct iscsit_conn;
+struct iscsi_tmr_req;
+
+extern u8 iscsit_tmr_abort_task(struct iscsit_cmd *, unsigned char *);
+extern int iscsit_tmr_task_warm_reset(struct iscsit_conn *, struct iscsi_tmr_req *,
unsigned char *);
-extern int iscsit_tmr_task_cold_reset(struct iscsi_conn *, struct iscsi_tmr_req *,
+extern int iscsit_tmr_task_cold_reset(struct iscsit_conn *, struct iscsi_tmr_req *,
unsigned char *);
-extern u8 iscsit_tmr_task_reassign(struct iscsi_cmd *, unsigned char *);
-extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+extern u8 iscsit_tmr_task_reassign(struct iscsit_cmd *, unsigned char *);
+extern int iscsit_tmr_post_handler(struct iscsit_cmd *, struct iscsit_conn *);
extern int iscsit_check_task_reassign_expdatasn(struct iscsi_tmr_req *,
- struct iscsi_conn *);
+ struct iscsit_conn *);
#endif /* ISCSI_TARGET_TMR_H */
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 439260b7d87f..bf06cfdfb012 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -1,28 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains iSCSI Target Portal Group related functions.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
******************************************************************************/
+#include <linux/slab.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
-
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_erl0.h"
#include "iscsi_target_login.h"
#include "iscsi_target_nodeattrib.h"
@@ -49,7 +38,7 @@ struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u1
INIT_LIST_HEAD(&tpg->tpg_gnp_list);
INIT_LIST_HEAD(&tpg->tpg_list);
mutex_init(&tpg->tpg_access_lock);
- mutex_init(&tpg->np_login_lock);
+ sema_init(&tpg->np_login_sem, 1);
spin_lock_init(&tpg->tpg_state_lock);
spin_lock_init(&tpg->tpg_np_lock);
@@ -69,11 +58,12 @@ int iscsit_load_discovery_tpg(void)
pr_err("Unable to allocate struct iscsi_portal_group\n");
return -1;
}
-
- ret = core_tpg_register(
- &lio_target_fabric_configfs->tf_ops,
- NULL, &tpg->tpg_se_tpg, tpg,
- TRANSPORT_TPG_TYPE_DISCOVERY);
+ /*
+ * Save iscsi_ops pointer for special case discovery TPG that
+ * doesn't exist as se_wwn->wwn_group within configfs.
+ */
+ tpg->tpg_se_tpg.se_tpg_tfo = &iscsi_ops;
+ ret = core_tpg_register(NULL, &tpg->tpg_se_tpg, -1);
if (ret < 0) {
kfree(tpg);
return -1;
@@ -92,10 +82,10 @@ int iscsit_load_discovery_tpg(void)
*/
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
if (!param)
- goto out;
+ goto free_pl_out;
if (iscsi_update_param_value(param, "CHAP,None") < 0)
- goto out;
+ goto free_pl_out;
tpg->tpg_attrib.authentication = 0;
@@ -107,6 +97,8 @@ int iscsit_load_discovery_tpg(void)
pr_debug("CORE[0] - Allocated Discovery TPG\n");
return 0;
+free_pl_out:
+ iscsi_release_param_list(tpg->param_list);
out:
if (tpg->sid == 1)
core_tpg_deregister(&tpg->tpg_se_tpg);
@@ -121,6 +113,7 @@ void iscsit_release_discovery_tpg(void)
if (!tpg)
return;
+ iscsi_release_param_list(tpg->param_list);
core_tpg_deregister(&tpg->tpg_se_tpg);
kfree(tpg);
@@ -129,7 +122,8 @@ void iscsit_release_discovery_tpg(void)
struct iscsi_portal_group *iscsit_get_tpg_from_np(
struct iscsi_tiqn *tiqn,
- struct iscsi_np *np)
+ struct iscsi_np *np,
+ struct iscsi_tpg_np **tpg_np_out)
{
struct iscsi_portal_group *tpg = NULL;
struct iscsi_tpg_np *tpg_np;
@@ -138,7 +132,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
spin_lock(&tpg->tpg_state_lock);
- if (tpg->tpg_state == TPG_STATE_FREE) {
+ if (tpg->tpg_state != TPG_STATE_ACTIVE) {
spin_unlock(&tpg->tpg_state_lock);
continue;
}
@@ -147,6 +141,8 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
spin_lock(&tpg->tpg_np_lock);
list_for_each_entry(tpg_np, &tpg->tpg_gnp_list, tpg_np_list) {
if (tpg_np->tpg_np == np) {
+ *tpg_np_out = tpg_np;
+ kref_get(&tpg_np->tpg_np_kref);
spin_unlock(&tpg->tpg_np_lock);
spin_unlock(&tiqn->tiqn_tpg_lock);
return tpg;
@@ -162,10 +158,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
int iscsit_get_tpg(
struct iscsi_portal_group *tpg)
{
- int ret;
-
- ret = mutex_lock_interruptible(&tpg->tpg_access_lock);
- return ((ret != 0) || signal_pending(current)) ? -1 : 0;
+ return mutex_lock_interruptible(&tpg->tpg_access_lock);
}
void iscsit_put_tpg(struct iscsi_portal_group *tpg)
@@ -175,18 +168,22 @@ void iscsit_put_tpg(struct iscsi_portal_group *tpg)
static void iscsit_clear_tpg_np_login_thread(
struct iscsi_tpg_np *tpg_np,
- struct iscsi_portal_group *tpg)
+ struct iscsi_portal_group *tpg,
+ bool shutdown)
{
if (!tpg_np->tpg_np) {
pr_err("struct iscsi_tpg_np->tpg_np is NULL!\n");
return;
}
- iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg);
+ if (shutdown)
+ tpg_np->tpg_np->enabled = false;
+ iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
}
-void iscsit_clear_tpg_np_login_threads(
- struct iscsi_portal_group *tpg)
+static void iscsit_clear_tpg_np_login_threads(
+ struct iscsi_portal_group *tpg,
+ bool shutdown)
{
struct iscsi_tpg_np *tpg_np;
@@ -197,29 +194,29 @@ void iscsit_clear_tpg_np_login_threads(
continue;
}
spin_unlock(&tpg->tpg_np_lock);
- iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
+ iscsit_clear_tpg_np_login_thread(tpg_np, tpg, shutdown);
spin_lock(&tpg->tpg_np_lock);
}
spin_unlock(&tpg->tpg_np_lock);
}
-void iscsit_tpg_dump_params(struct iscsi_portal_group *tpg)
-{
- iscsi_print_params(tpg->param_list);
-}
-
static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
{
struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
a->authentication = TA_AUTHENTICATION;
a->login_timeout = TA_LOGIN_TIMEOUT;
- a->netif_timeout = TA_NETIF_TIMEOUT;
a->default_cmdsn_depth = TA_DEFAULT_CMDSN_DEPTH;
a->generate_node_acls = TA_GENERATE_NODE_ACLS;
a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
a->demo_mode_write_protect = TA_DEMO_MODE_WRITE_PROTECT;
a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
+ a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY;
+ a->default_erl = TA_DEFAULT_ERL;
+ a->t10_pi = TA_DEFAULT_T10_PI;
+ a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE;
+ a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS;
+ a->login_keys_workaround = TA_DEFAULT_LOGIN_KEYS_WORKAROUND;
}
int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -234,7 +231,7 @@ int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_gro
if (iscsi_create_default_params(&tpg->param_list) < 0)
goto err_out;
- ISCSI_TPG_ATTRIB(tpg)->tpg = tpg;
+ tpg->tpg_attrib.tpg = tpg;
spin_lock(&tpg->tpg_state_lock);
tpg->tpg_state = TPG_STATE_INACTIVE;
@@ -253,7 +250,6 @@ err_out:
iscsi_release_param_list(tpg->param_list);
tpg->param_list = NULL;
}
- kfree(tpg);
return -ENOMEM;
}
@@ -276,8 +272,6 @@ int iscsit_tpg_del_portal_group(
return -EPERM;
}
- core_tpg_clear_object_luns(&tpg->tpg_se_tpg);
-
if (tpg->param_list) {
iscsi_release_param_list(tpg->param_list);
tpg->param_list = NULL;
@@ -307,11 +301,9 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
int ret;
- spin_lock(&tpg->tpg_state_lock);
if (tpg->tpg_state == TPG_STATE_ACTIVE) {
pr_err("iSCSI target portal group: %hu is already"
" active, ignoring request.\n", tpg->tpgt);
- spin_unlock(&tpg->tpg_state_lock);
return -EINVAL;
}
/*
@@ -320,12 +312,10 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
* is enforced (as per default), and remove the NONE option.
*/
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
- if (!param) {
- spin_unlock(&tpg->tpg_state_lock);
+ if (!param)
return -EINVAL;
- }
- if (ISCSI_TPG_ATTRIB(tpg)->authentication) {
+ if (tpg->tpg_attrib.authentication) {
if (!strcmp(param->value, NONE)) {
ret = iscsi_update_param_value(param, CHAP);
if (ret)
@@ -337,6 +327,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
goto err;
}
+ spin_lock(&tpg->tpg_state_lock);
tpg->tpg_state = TPG_STATE_ACTIVE;
spin_unlock(&tpg->tpg_state_lock);
@@ -349,7 +340,6 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
return 0;
err:
- spin_unlock(&tpg->tpg_state_lock);
return ret;
}
@@ -368,7 +358,7 @@ int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
tpg->tpg_state = TPG_STATE_INACTIVE;
spin_unlock(&tpg->tpg_state_lock);
- iscsit_clear_tpg_np_login_threads(tpg);
+ iscsit_clear_tpg_np_login_threads(tpg, false);
if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
spin_lock(&tpg->tpg_state_lock);
@@ -394,12 +384,11 @@ int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
}
struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(
- struct iscsi_session *sess)
+ struct iscsit_session *sess)
{
struct se_session *se_sess = sess->se_sess;
struct se_node_acl *se_nacl = se_sess->se_node_acl;
- struct iscsi_node_acl *acl = container_of(se_nacl, struct iscsi_node_acl,
- se_node_acl);
+ struct iscsi_node_acl *acl = to_iscsi_nacl(se_nacl);
return &acl->node_attrib;
}
@@ -426,7 +415,7 @@ struct iscsi_tpg_np *iscsit_tpg_locate_child_np(
static bool iscsit_tpg_check_network_portal(
struct iscsi_tiqn *tiqn,
- struct __kernel_sockaddr_storage *sockaddr,
+ struct sockaddr_storage *sockaddr,
int network_transport)
{
struct iscsi_portal_group *tpg;
@@ -443,10 +432,13 @@ static bool iscsit_tpg_check_network_portal(
match = iscsit_check_np_match(sockaddr, np,
network_transport);
- if (match == true)
+ if (match)
break;
}
spin_unlock(&tpg->tpg_np_lock);
+
+ if (match)
+ break;
}
spin_unlock(&tiqn->tiqn_tpg_lock);
@@ -455,8 +447,7 @@ static bool iscsit_tpg_check_network_portal(
struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
struct iscsi_portal_group *tpg,
- struct __kernel_sockaddr_storage *sockaddr,
- char *ip_str,
+ struct sockaddr_storage *sockaddr,
struct iscsi_tpg_np *tpg_np_parent,
int network_transport)
{
@@ -465,9 +456,9 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
if (!tpg_np_parent) {
if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
- network_transport) == true) {
- pr_err("Network Portal: %s already exists on a"
- " different TPG on %s\n", ip_str,
+ network_transport)) {
+ pr_err("Network Portal: %pISc already exists on a"
+ " different TPG on %s\n", sockaddr,
tpg->tpg_tiqn->tiqn);
return ERR_PTR(-EEXIST);
}
@@ -480,7 +471,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
return ERR_PTR(-ENOMEM);
}
- np = iscsit_add_np(sockaddr, ip_str, network_transport);
+ np = iscsit_add_np(sockaddr, network_transport);
if (IS_ERR(np)) {
kfree(tpg_np);
return ERR_CAST(np);
@@ -490,6 +481,8 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
INIT_LIST_HEAD(&tpg_np->tpg_np_child_list);
INIT_LIST_HEAD(&tpg_np->tpg_np_parent_list);
spin_lock_init(&tpg_np->tpg_np_parent_lock);
+ init_completion(&tpg_np->tpg_np_comp);
+ kref_init(&tpg_np->tpg_np_kref);
tpg_np->tpg_np = np;
tpg_np->tpg = tpg;
@@ -508,8 +501,8 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
spin_unlock(&tpg_np_parent->tpg_np_parent_lock);
}
- pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n",
- tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
+ pr_debug("CORE[%s] - Added Network Portal: %pISpc,%hu on %s\n",
+ tpg->tpg_tiqn->tiqn, &np->np_sockaddr, tpg->tpgt,
np->np_transport->name);
return tpg_np;
@@ -520,10 +513,10 @@ static int iscsit_tpg_release_np(
struct iscsi_portal_group *tpg,
struct iscsi_np *np)
{
- iscsit_clear_tpg_np_login_thread(tpg_np, tpg);
+ iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true);
- pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
- tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
+ pr_debug("CORE[%s] - Removed Network Portal: %pISpc,%hu on %s\n",
+ tpg->tpg_tiqn->tiqn, &np->np_sockaddr, tpg->tpgt,
np->np_transport->name);
tpg_np->tpg_np = NULL;
@@ -584,16 +577,6 @@ int iscsit_tpg_del_network_portal(
return iscsit_tpg_release_np(tpg_np, tpg, np);
}
-int iscsit_tpg_set_initiator_node_queue_depth(
- struct iscsi_portal_group *tpg,
- unsigned char *initiatorname,
- u32 queue_depth,
- int force)
-{
- return core_tpg_set_initiator_node_queue_depth(&tpg->tpg_se_tpg,
- initiatorname, queue_depth, force);
-}
-
int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
{
unsigned char buf1[256], buf2[256], *none = NULL;
@@ -641,8 +624,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
none = strstr(buf1, NONE);
if (none)
goto out;
- strncat(buf1, ",", strlen(","));
- strncat(buf1, NONE, strlen(NONE));
+ strlcat(buf1, "," NONE, sizeof(buf1));
if (iscsi_update_param_value(param, buf1) < 0)
return -EINVAL;
}
@@ -678,31 +660,6 @@ int iscsit_ta_login_timeout(
return 0;
}
-int iscsit_ta_netif_timeout(
- struct iscsi_portal_group *tpg,
- u32 netif_timeout)
-{
- struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
-
- if (netif_timeout > TA_NETIF_TIMEOUT_MAX) {
- pr_err("Requested Network Interface Timeout %u larger"
- " than maximum %u\n", netif_timeout,
- TA_NETIF_TIMEOUT_MAX);
- return -EINVAL;
- } else if (netif_timeout < TA_NETIF_TIMEOUT_MIN) {
- pr_err("Requested Network Interface Timeout %u smaller"
- " than minimum %u\n", netif_timeout,
- TA_NETIF_TIMEOUT_MIN);
- return -EINVAL;
- }
-
- a->netif_timeout = netif_timeout;
- pr_debug("Set Network Interface Timeout to %u for"
- " Target Portal Group %hu\n", a->netif_timeout, tpg->tpgt);
-
- return 0;
-}
-
int iscsit_ta_generate_node_acls(
struct iscsi_portal_group *tpg,
u32 flag)
@@ -813,3 +770,112 @@ int iscsit_ta_prod_mode_write_protect(
return 0;
}
+
+int iscsit_ta_demo_mode_discovery(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->demo_mode_discovery = flag;
+ pr_debug("iSCSI_TPG[%hu] - Demo Mode Discovery bit:"
+ " %s\n", tpg->tpgt, (a->demo_mode_discovery) ?
+ "ON" : "OFF");
+
+ return 0;
+}
+
+int iscsit_ta_default_erl(
+ struct iscsi_portal_group *tpg,
+ u32 default_erl)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((default_erl != 0) && (default_erl != 1) && (default_erl != 2)) {
+ pr_err("Illegal value for default_erl: %u\n", default_erl);
+ return -EINVAL;
+ }
+
+ a->default_erl = default_erl;
+ pr_debug("iSCSI_TPG[%hu] - DefaultERL: %u\n", tpg->tpgt, a->default_erl);
+
+ return 0;
+}
+
+int iscsit_ta_t10_pi(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->t10_pi = flag;
+ pr_debug("iSCSI_TPG[%hu] - T10 Protection information bit:"
+ " %s\n", tpg->tpgt, (a->t10_pi) ?
+ "ON" : "OFF");
+
+ return 0;
+}
+
+int iscsit_ta_fabric_prot_type(
+ struct iscsi_portal_group *tpg,
+ u32 prot_type)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((prot_type != 0) && (prot_type != 1) && (prot_type != 3)) {
+ pr_err("Illegal value for fabric_prot_type: %u\n", prot_type);
+ return -EINVAL;
+ }
+
+ a->fabric_prot_type = prot_type;
+ pr_debug("iSCSI_TPG[%hu] - T10 Fabric Protection Type: %u\n",
+ tpg->tpgt, prot_type);
+
+ return 0;
+}
+
+int iscsit_ta_tpg_enabled_sendtargets(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->tpg_enabled_sendtargets = flag;
+ pr_debug("iSCSI_TPG[%hu] - TPG enabled bit required for SendTargets:"
+ " %s\n", tpg->tpgt, (a->tpg_enabled_sendtargets) ? "ON" : "OFF");
+
+ return 0;
+}
+
+int iscsit_ta_login_keys_workaround(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->login_keys_workaround = flag;
+ pr_debug("iSCSI_TPG[%hu] - TPG enabled bit for login keys workaround: %s ",
+ tpg->tpgt, (a->login_keys_workaround) ? "ON" : "OFF");
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index dda48c141a8c..1155b7b3164a 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -1,41 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_TPG_H
#define ISCSI_TARGET_TPG_H
+#include <linux/types.h>
+
+struct iscsi_np;
+struct iscsit_session;
+struct iscsi_tiqn;
+struct iscsi_tpg_np;
+struct se_node_acl;
+struct sockaddr_storage;
+
extern struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *, u16);
extern int iscsit_load_discovery_tpg(void);
extern void iscsit_release_discovery_tpg(void);
extern struct iscsi_portal_group *iscsit_get_tpg_from_np(struct iscsi_tiqn *,
- struct iscsi_np *);
+ struct iscsi_np *, struct iscsi_tpg_np **);
extern int iscsit_get_tpg(struct iscsi_portal_group *);
extern void iscsit_put_tpg(struct iscsi_portal_group *);
-extern void iscsit_clear_tpg_np_login_threads(struct iscsi_portal_group *);
-extern void iscsit_tpg_dump_params(struct iscsi_portal_group *);
extern int iscsit_tpg_add_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *);
extern int iscsit_tpg_del_portal_group(struct iscsi_tiqn *, struct iscsi_portal_group *,
int);
extern int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *);
extern int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *, int);
-extern struct iscsi_node_acl *iscsit_tpg_add_initiator_node_acl(
- struct iscsi_portal_group *, const char *, u32);
-extern void iscsit_tpg_del_initiator_node_acl(struct iscsi_portal_group *,
- struct se_node_acl *);
-extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsi_session *);
-extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
+extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsit_session *);
extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
- struct __kernel_sockaddr_storage *, char *, struct iscsi_tpg_np *,
+ struct sockaddr_storage *, struct iscsi_tpg_np *,
int);
extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
struct iscsi_tpg_np *);
-extern int iscsit_tpg_set_initiator_node_queue_depth(struct iscsi_portal_group *,
- unsigned char *, u32, int);
extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32);
extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32);
-extern int iscsit_ta_netif_timeout(struct iscsi_portal_group *, u32);
extern int iscsit_ta_generate_node_acls(struct iscsi_portal_group *, u32);
extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32);
extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);
extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_login_keys_workaround(struct iscsi_portal_group *, u32);
#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
deleted file mode 100644
index 81289520f96b..000000000000
--- a/drivers/target/iscsi/iscsi_target_tq.c
+++ /dev/null
@@ -1,544 +0,0 @@
-/*******************************************************************************
- * This file contains the iSCSI Login Thread and Thread Queue functions.
- *
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
- *
- * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- ******************************************************************************/
-
-#include <linux/kthread.h>
-#include <linux/list.h>
-#include <linux/bitmap.h>
-
-#include "iscsi_target_core.h"
-#include "iscsi_target_tq.h"
-#include "iscsi_target.h"
-
-static LIST_HEAD(active_ts_list);
-static LIST_HEAD(inactive_ts_list);
-static DEFINE_SPINLOCK(active_ts_lock);
-static DEFINE_SPINLOCK(inactive_ts_lock);
-static DEFINE_SPINLOCK(ts_bitmap_lock);
-
-static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
-{
- spin_lock(&active_ts_lock);
- list_add_tail(&ts->ts_list, &active_ts_list);
- iscsit_global->active_ts++;
- spin_unlock(&active_ts_lock);
-}
-
-static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
-{
- spin_lock(&inactive_ts_lock);
- list_add_tail(&ts->ts_list, &inactive_ts_list);
- iscsit_global->inactive_ts++;
- spin_unlock(&inactive_ts_lock);
-}
-
-static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts)
-{
- spin_lock(&active_ts_lock);
- list_del(&ts->ts_list);
- iscsit_global->active_ts--;
- spin_unlock(&active_ts_lock);
-}
-
-static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
-{
- struct iscsi_thread_set *ts;
-
- spin_lock(&inactive_ts_lock);
- if (list_empty(&inactive_ts_list)) {
- spin_unlock(&inactive_ts_lock);
- return NULL;
- }
-
- ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
-
- list_del(&ts->ts_list);
- iscsit_global->inactive_ts--;
- spin_unlock(&inactive_ts_lock);
-
- return ts;
-}
-
-int iscsi_allocate_thread_sets(u32 thread_pair_count)
-{
- int allocated_thread_pair_count = 0, i, thread_id;
- struct iscsi_thread_set *ts = NULL;
-
- for (i = 0; i < thread_pair_count; i++) {
- ts = kzalloc(sizeof(struct iscsi_thread_set), GFP_KERNEL);
- if (!ts) {
- pr_err("Unable to allocate memory for"
- " thread set.\n");
- return allocated_thread_pair_count;
- }
- /*
- * Locate the next available regision in the thread_set_bitmap
- */
- spin_lock(&ts_bitmap_lock);
- thread_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
- iscsit_global->ts_bitmap_count, get_order(1));
- spin_unlock(&ts_bitmap_lock);
- if (thread_id < 0) {
- pr_err("bitmap_find_free_region() failed for"
- " thread_set_bitmap\n");
- kfree(ts);
- return allocated_thread_pair_count;
- }
-
- ts->thread_id = thread_id;
- ts->status = ISCSI_THREAD_SET_FREE;
- INIT_LIST_HEAD(&ts->ts_list);
- spin_lock_init(&ts->ts_state_lock);
- init_completion(&ts->rx_post_start_comp);
- init_completion(&ts->tx_post_start_comp);
- init_completion(&ts->rx_restart_comp);
- init_completion(&ts->tx_restart_comp);
- init_completion(&ts->rx_start_comp);
- init_completion(&ts->tx_start_comp);
-
- ts->create_threads = 1;
- ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s",
- ISCSI_TX_THREAD_NAME);
- if (IS_ERR(ts->tx_thread)) {
- dump_stack();
- pr_err("Unable to start iscsi_target_tx_thread\n");
- break;
- }
-
- ts->rx_thread = kthread_run(iscsi_target_rx_thread, ts, "%s",
- ISCSI_RX_THREAD_NAME);
- if (IS_ERR(ts->rx_thread)) {
- kthread_stop(ts->tx_thread);
- pr_err("Unable to start iscsi_target_rx_thread\n");
- break;
- }
- ts->create_threads = 0;
-
- iscsi_add_ts_to_inactive_list(ts);
- allocated_thread_pair_count++;
- }
-
- pr_debug("Spawned %d thread set(s) (%d total threads).\n",
- allocated_thread_pair_count, allocated_thread_pair_count * 2);
- return allocated_thread_pair_count;
-}
-
-void iscsi_deallocate_thread_sets(void)
-{
- u32 released_count = 0;
- struct iscsi_thread_set *ts = NULL;
-
- while ((ts = iscsi_get_ts_from_inactive_list())) {
-
- spin_lock_bh(&ts->ts_state_lock);
- ts->status = ISCSI_THREAD_SET_DIE;
- spin_unlock_bh(&ts->ts_state_lock);
-
- if (ts->rx_thread) {
- send_sig(SIGINT, ts->rx_thread, 1);
- kthread_stop(ts->rx_thread);
- }
- if (ts->tx_thread) {
- send_sig(SIGINT, ts->tx_thread, 1);
- kthread_stop(ts->tx_thread);
- }
- /*
- * Release this thread_id in the thread_set_bitmap
- */
- spin_lock(&ts_bitmap_lock);
- bitmap_release_region(iscsit_global->ts_bitmap,
- ts->thread_id, get_order(1));
- spin_unlock(&ts_bitmap_lock);
-
- released_count++;
- kfree(ts);
- }
-
- if (released_count)
- pr_debug("Stopped %d thread set(s) (%d total threads)."
- "\n", released_count, released_count * 2);
-}
-
-static void iscsi_deallocate_extra_thread_sets(void)
-{
- u32 orig_count, released_count = 0;
- struct iscsi_thread_set *ts = NULL;
-
- orig_count = TARGET_THREAD_SET_COUNT;
-
- while ((iscsit_global->inactive_ts + 1) > orig_count) {
- ts = iscsi_get_ts_from_inactive_list();
- if (!ts)
- break;
-
- spin_lock_bh(&ts->ts_state_lock);
- ts->status = ISCSI_THREAD_SET_DIE;
- spin_unlock_bh(&ts->ts_state_lock);
-
- if (ts->rx_thread) {
- send_sig(SIGINT, ts->rx_thread, 1);
- kthread_stop(ts->rx_thread);
- }
- if (ts->tx_thread) {
- send_sig(SIGINT, ts->tx_thread, 1);
- kthread_stop(ts->tx_thread);
- }
- /*
- * Release this thread_id in the thread_set_bitmap
- */
- spin_lock(&ts_bitmap_lock);
- bitmap_release_region(iscsit_global->ts_bitmap,
- ts->thread_id, get_order(1));
- spin_unlock(&ts_bitmap_lock);
-
- released_count++;
- kfree(ts);
- }
-
- if (released_count) {
- pr_debug("Stopped %d thread set(s) (%d total threads)."
- "\n", released_count, released_count * 2);
- }
-}
-
-void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
-{
- iscsi_add_ts_to_active_list(ts);
-
- spin_lock_bh(&ts->ts_state_lock);
- conn->thread_set = ts;
- ts->conn = conn;
- spin_unlock_bh(&ts->ts_state_lock);
- /*
- * Start up the RX thread and wait on rx_post_start_comp. The RX
- * Thread will then do the same for the TX Thread in
- * iscsi_rx_thread_pre_handler().
- */
- complete(&ts->rx_start_comp);
- wait_for_completion(&ts->rx_post_start_comp);
-}
-
-struct iscsi_thread_set *iscsi_get_thread_set(void)
-{
- int allocate_ts = 0;
- struct completion comp;
- struct iscsi_thread_set *ts = NULL;
- /*
- * If no inactive thread set is available on the first call to
- * iscsi_get_ts_from_inactive_list(), sleep for a second and
- * try again. If still none are available after two attempts,
- * allocate a set ourselves.
- */
-get_set:
- ts = iscsi_get_ts_from_inactive_list();
- if (!ts) {
- if (allocate_ts == 2)
- iscsi_allocate_thread_sets(1);
-
- init_completion(&comp);
- wait_for_completion_timeout(&comp, 1 * HZ);
-
- allocate_ts++;
- goto get_set;
- }
-
- ts->delay_inactive = 1;
- ts->signal_sent = 0;
- ts->thread_count = 2;
- init_completion(&ts->rx_restart_comp);
- init_completion(&ts->tx_restart_comp);
-
- return ts;
-}
-
-void iscsi_set_thread_clear(struct iscsi_conn *conn, u8 thread_clear)
-{
- struct iscsi_thread_set *ts = NULL;
-
- if (!conn->thread_set) {
- pr_err("struct iscsi_conn->thread_set is NULL\n");
- return;
- }
- ts = conn->thread_set;
-
- spin_lock_bh(&ts->ts_state_lock);
- ts->thread_clear &= ~thread_clear;
-
- if ((thread_clear & ISCSI_CLEAR_RX_THREAD) &&
- (ts->blocked_threads & ISCSI_BLOCK_RX_THREAD))
- complete(&ts->rx_restart_comp);
- else if ((thread_clear & ISCSI_CLEAR_TX_THREAD) &&
- (ts->blocked_threads & ISCSI_BLOCK_TX_THREAD))
- complete(&ts->tx_restart_comp);
- spin_unlock_bh(&ts->ts_state_lock);
-}
-
-void iscsi_set_thread_set_signal(struct iscsi_conn *conn, u8 signal_sent)
-{
- struct iscsi_thread_set *ts = NULL;
-
- if (!conn->thread_set) {
- pr_err("struct iscsi_conn->thread_set is NULL\n");
- return;
- }
- ts = conn->thread_set;
-
- spin_lock_bh(&ts->ts_state_lock);
- ts->signal_sent |= signal_sent;
- spin_unlock_bh(&ts->ts_state_lock);
-}
-
-int iscsi_release_thread_set(struct iscsi_conn *conn)
-{
- int thread_called = 0;
- struct iscsi_thread_set *ts = NULL;
-
- if (!conn || !conn->thread_set) {
- pr_err("connection or thread set pointer is NULL\n");
- BUG();
- }
- ts = conn->thread_set;
-
- spin_lock_bh(&ts->ts_state_lock);
- ts->status = ISCSI_THREAD_SET_RESET;
-
- if (!strncmp(current->comm, ISCSI_RX_THREAD_NAME,
- strlen(ISCSI_RX_THREAD_NAME)))
- thread_called = ISCSI_RX_THREAD;
- else if (!strncmp(current->comm, ISCSI_TX_THREAD_NAME,
- strlen(ISCSI_TX_THREAD_NAME)))
- thread_called = ISCSI_TX_THREAD;
-
- if (ts->rx_thread && (thread_called == ISCSI_TX_THREAD) &&
- (ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) {
-
- if (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD)) {
- send_sig(SIGINT, ts->rx_thread, 1);
- ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
- }
- ts->blocked_threads |= ISCSI_BLOCK_RX_THREAD;
- spin_unlock_bh(&ts->ts_state_lock);
- wait_for_completion(&ts->rx_restart_comp);
- spin_lock_bh(&ts->ts_state_lock);
- ts->blocked_threads &= ~ISCSI_BLOCK_RX_THREAD;
- }
- if (ts->tx_thread && (thread_called == ISCSI_RX_THREAD) &&
- (ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) {
-
- if (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD)) {
- send_sig(SIGINT, ts->tx_thread, 1);
- ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
- }
- ts->blocked_threads |= ISCSI_BLOCK_TX_THREAD;
- spin_unlock_bh(&ts->ts_state_lock);
- wait_for_completion(&ts->tx_restart_comp);
- spin_lock_bh(&ts->ts_state_lock);
- ts->blocked_threads &= ~ISCSI_BLOCK_TX_THREAD;
- }
-
- ts->conn = NULL;
- ts->status = ISCSI_THREAD_SET_FREE;
- spin_unlock_bh(&ts->ts_state_lock);
-
- return 0;
-}
-
-int iscsi_thread_set_force_reinstatement(struct iscsi_conn *conn)
-{
- struct iscsi_thread_set *ts;
-
- if (!conn->thread_set)
- return -1;
- ts = conn->thread_set;
-
- spin_lock_bh(&ts->ts_state_lock);
- if (ts->status != ISCSI_THREAD_SET_ACTIVE) {
- spin_unlock_bh(&ts->ts_state_lock);
- return -1;
- }
-
- if (ts->tx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD))) {
- send_sig(SIGINT, ts->tx_thread, 1);
- ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
- }
- if (ts->rx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD))) {
- send_sig(SIGINT, ts->rx_thread, 1);
- ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
- }
- spin_unlock_bh(&ts->ts_state_lock);
-
- return 0;
-}
-
-static void iscsi_check_to_add_additional_sets(void)
-{
- int thread_sets_add;
-
- spin_lock(&inactive_ts_lock);
- thread_sets_add = iscsit_global->inactive_ts;
- spin_unlock(&inactive_ts_lock);
- if (thread_sets_add == 1)
- iscsi_allocate_thread_sets(1);
-}
-
-static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts)
-{
- spin_lock_bh(&ts->ts_state_lock);
- if ((ts->status == ISCSI_THREAD_SET_DIE) || signal_pending(current)) {
- spin_unlock_bh(&ts->ts_state_lock);
- return -1;
- }
- spin_unlock_bh(&ts->ts_state_lock);
-
- return 0;
-}
-
-struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
-{
- int ret;
-
- spin_lock_bh(&ts->ts_state_lock);
- if (ts->create_threads) {
- spin_unlock_bh(&ts->ts_state_lock);
- goto sleep;
- }
-
- flush_signals(current);
-
- if (ts->delay_inactive && (--ts->thread_count == 0)) {
- spin_unlock_bh(&ts->ts_state_lock);
- iscsi_del_ts_from_active_list(ts);
-
- if (!iscsit_global->in_shutdown)
- iscsi_deallocate_extra_thread_sets();
-
- iscsi_add_ts_to_inactive_list(ts);
- spin_lock_bh(&ts->ts_state_lock);
- }
-
- if ((ts->status == ISCSI_THREAD_SET_RESET) &&
- (ts->thread_clear & ISCSI_CLEAR_RX_THREAD))
- complete(&ts->rx_restart_comp);
-
- ts->thread_clear &= ~ISCSI_CLEAR_RX_THREAD;
- spin_unlock_bh(&ts->ts_state_lock);
-sleep:
- ret = wait_for_completion_interruptible(&ts->rx_start_comp);
- if (ret != 0)
- return NULL;
-
- if (iscsi_signal_thread_pre_handler(ts) < 0)
- return NULL;
-
- if (!ts->conn) {
- pr_err("struct iscsi_thread_set->conn is NULL for"
- " thread_id: %d, going back to sleep\n", ts->thread_id);
- goto sleep;
- }
- iscsi_check_to_add_additional_sets();
- /*
- * The RX Thread starts up the TX Thread and sleeps.
- */
- ts->thread_clear |= ISCSI_CLEAR_RX_THREAD;
- complete(&ts->tx_start_comp);
- wait_for_completion(&ts->tx_post_start_comp);
-
- return ts->conn;
-}
-
-struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
-{
- int ret;
-
- spin_lock_bh(&ts->ts_state_lock);
- if (ts->create_threads) {
- spin_unlock_bh(&ts->ts_state_lock);
- goto sleep;
- }
-
- flush_signals(current);
-
- if (ts->delay_inactive && (--ts->thread_count == 0)) {
- spin_unlock_bh(&ts->ts_state_lock);
- iscsi_del_ts_from_active_list(ts);
-
- if (!iscsit_global->in_shutdown)
- iscsi_deallocate_extra_thread_sets();
-
- iscsi_add_ts_to_inactive_list(ts);
- spin_lock_bh(&ts->ts_state_lock);
- }
- if ((ts->status == ISCSI_THREAD_SET_RESET) &&
- (ts->thread_clear & ISCSI_CLEAR_TX_THREAD))
- complete(&ts->tx_restart_comp);
-
- ts->thread_clear &= ~ISCSI_CLEAR_TX_THREAD;
- spin_unlock_bh(&ts->ts_state_lock);
-sleep:
- ret = wait_for_completion_interruptible(&ts->tx_start_comp);
- if (ret != 0)
- return NULL;
-
- if (iscsi_signal_thread_pre_handler(ts) < 0)
- return NULL;
-
- if (!ts->conn) {
- pr_err("struct iscsi_thread_set->conn is NULL for "
- " thread_id: %d, going back to sleep\n",
- ts->thread_id);
- goto sleep;
- }
-
- iscsi_check_to_add_additional_sets();
- /*
- * From the TX thread, up the tx_post_start_comp that the RX Thread is
- * sleeping on in iscsi_rx_thread_pre_handler(), then up the
- * rx_post_start_comp that iscsi_activate_thread_set() is sleeping on.
- */
- ts->thread_clear |= ISCSI_CLEAR_TX_THREAD;
- complete(&ts->tx_post_start_comp);
- complete(&ts->rx_post_start_comp);
-
- spin_lock_bh(&ts->ts_state_lock);
- ts->status = ISCSI_THREAD_SET_ACTIVE;
- spin_unlock_bh(&ts->ts_state_lock);
-
- return ts->conn;
-}
-
-int iscsi_thread_set_init(void)
-{
- int size;
-
- iscsit_global->ts_bitmap_count = ISCSI_TS_BITMAP_BITS;
-
- size = BITS_TO_LONGS(iscsit_global->ts_bitmap_count) * sizeof(long);
- iscsit_global->ts_bitmap = kzalloc(size, GFP_KERNEL);
- if (!iscsit_global->ts_bitmap) {
- pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void iscsi_thread_set_free(void)
-{
- kfree(iscsit_global->ts_bitmap);
-}
diff --git a/drivers/target/iscsi/iscsi_target_tq.h b/drivers/target/iscsi/iscsi_target_tq.h
deleted file mode 100644
index 547d11831282..000000000000
--- a/drivers/target/iscsi/iscsi_target_tq.h
+++ /dev/null
@@ -1,87 +0,0 @@
-#ifndef ISCSI_THREAD_QUEUE_H
-#define ISCSI_THREAD_QUEUE_H
-
-/*
- * Defines for thread sets.
- */
-extern int iscsi_thread_set_force_reinstatement(struct iscsi_conn *);
-extern int iscsi_allocate_thread_sets(u32);
-extern void iscsi_deallocate_thread_sets(void);
-extern void iscsi_activate_thread_set(struct iscsi_conn *, struct iscsi_thread_set *);
-extern struct iscsi_thread_set *iscsi_get_thread_set(void);
-extern void iscsi_set_thread_clear(struct iscsi_conn *, u8);
-extern void iscsi_set_thread_set_signal(struct iscsi_conn *, u8);
-extern int iscsi_release_thread_set(struct iscsi_conn *);
-extern struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *);
-extern struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *);
-extern int iscsi_thread_set_init(void);
-extern void iscsi_thread_set_free(void);
-
-extern int iscsi_target_tx_thread(void *);
-extern int iscsi_target_rx_thread(void *);
-
-#define TARGET_THREAD_SET_COUNT 4
-
-#define ISCSI_RX_THREAD 1
-#define ISCSI_TX_THREAD 2
-#define ISCSI_RX_THREAD_NAME "iscsi_trx"
-#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
-#define ISCSI_BLOCK_RX_THREAD 0x1
-#define ISCSI_BLOCK_TX_THREAD 0x2
-#define ISCSI_CLEAR_RX_THREAD 0x1
-#define ISCSI_CLEAR_TX_THREAD 0x2
-#define ISCSI_SIGNAL_RX_THREAD 0x1
-#define ISCSI_SIGNAL_TX_THREAD 0x2
-
-/* struct iscsi_thread_set->status */
-#define ISCSI_THREAD_SET_FREE 1
-#define ISCSI_THREAD_SET_ACTIVE 2
-#define ISCSI_THREAD_SET_DIE 3
-#define ISCSI_THREAD_SET_RESET 4
-#define ISCSI_THREAD_SET_DEALLOCATE_THREADS 5
-
-/* By default allow a maximum of 32K iSCSI connections */
-#define ISCSI_TS_BITMAP_BITS 32768
-
-struct iscsi_thread_set {
- /* flags used for blocking and restarting sets */
- int blocked_threads;
- /* flag for creating threads */
- int create_threads;
- /* flag for delaying readding to inactive list */
- int delay_inactive;
- /* status for thread set */
- int status;
- /* which threads have had signals sent */
- int signal_sent;
- /* flag for which threads exited first */
- int thread_clear;
- /* Active threads in the thread set */
- int thread_count;
- /* Unique thread ID */
- u32 thread_id;
- /* pointer to connection if set is active */
- struct iscsi_conn *conn;
- /* used for controlling ts state accesses */
- spinlock_t ts_state_lock;
- /* Used for rx side post startup */
- struct completion rx_post_start_comp;
- /* Used for tx side post startup */
- struct completion tx_post_start_comp;
- /* used for restarting thread queue */
- struct completion rx_restart_comp;
- /* used for restarting thread queue */
- struct completion tx_restart_comp;
- /* used for normal unused blocking */
- struct completion rx_start_comp;
- /* used for normal unused blocking */
- struct completion tx_start_comp;
- /* OS descriptor for rx thread */
- struct task_struct *rx_thread;
- /* OS descriptor for tx thread */
- struct task_struct *tx_thread;
- /* struct iscsi_thread_set in list list head*/
- struct list_head ts_list;
-};
-
-#endif /*** ISCSI_THREAD_QUEUE_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c
index 882728fac30c..27c85f260459 100644
--- a/drivers/target/iscsi/iscsi_target_transport.c
+++ b/drivers/target/iscsi/iscsi_target_transport.c
@@ -1,5 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/module.h>
#include <target/iscsi/iscsi_transport.h>
static LIST_HEAD(g_transport_list);
@@ -26,11 +28,10 @@ struct iscsit_transport *iscsit_get_transport(int type)
void iscsit_put_transport(struct iscsit_transport *t)
{
- if (t->owner)
- module_put(t->owner);
+ module_put(t->owner);
}
-int iscsit_register_transport(struct iscsit_transport *t)
+void iscsit_register_transport(struct iscsit_transport *t)
{
INIT_LIST_HEAD(&t->t_node);
@@ -39,8 +40,6 @@ int iscsit_register_transport(struct iscsit_transport *t)
mutex_unlock(&transport_mutex);
pr_debug("Registered iSCSI transport: %s\n", t->name);
-
- return 0;
}
EXPORT_SYMBOL(iscsit_register_transport);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 1df06d5e4e01..5e6cf34929b5 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1,32 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains the iSCSI Target specific utility functions.
*
- * \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
- *
- * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ * (c) Copyright 2007-2013 Datera, Inc.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
******************************************************************************/
#include <linux/list.h>
+#include <linux/sched/signal.h>
+#include <net/ipv6.h> /* ipv6_addr_equal() */
#include <scsi/scsi_tcq.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include <target/iscsi/iscsi_transport.h>
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h"
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_datain_values.h"
@@ -34,35 +25,14 @@
#include "iscsi_target_erl1.h"
#include "iscsi_target_erl2.h"
#include "iscsi_target_tpg.h"
-#include "iscsi_target_tq.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
-#define PRINT_BUFF(buff, len) \
-{ \
- int zzz; \
- \
- pr_debug("%d:\n", __LINE__); \
- for (zzz = 0; zzz < len; zzz++) { \
- if (zzz % 16 == 0) { \
- if (zzz) \
- pr_debug("\n"); \
- pr_debug("%4i: ", zzz); \
- } \
- pr_debug("%02x ", (unsigned char) (buff)[zzz]); \
- } \
- if ((len + 1) % 16) \
- pr_debug("\n"); \
-}
-
extern struct list_head g_tiqn_list;
extern spinlock_t tiqn_lock;
-/*
- * Called with cmd->r2t_lock held.
- */
int iscsit_add_r2t_to_list(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
u32 offset,
u32 xfer_len,
int recovery,
@@ -70,6 +40,10 @@ int iscsit_add_r2t_to_list(
{
struct iscsi_r2t *r2t;
+ lockdep_assert_held(&cmd->r2t_lock);
+
+ WARN_ON_ONCE((s32)xfer_len < 0);
+
r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
if (!r2t) {
pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
@@ -91,7 +65,7 @@ int iscsit_add_r2t_to_list(
}
struct iscsi_r2t *iscsit_get_r2t_for_eos(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
u32 offset,
u32 length)
{
@@ -112,7 +86,7 @@ struct iscsi_r2t *iscsit_get_r2t_for_eos(
return NULL;
}
-struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
+struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsit_cmd *cmd)
{
struct iscsi_r2t *r2t;
@@ -130,16 +104,15 @@ struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *cmd)
return NULL;
}
-/*
- * Called with cmd->r2t_lock held.
- */
-void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsi_cmd *cmd)
+void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsit_cmd *cmd)
{
+ lockdep_assert_held(&cmd->r2t_lock);
+
list_del(&r2t->r2t_list);
kmem_cache_free(lio_r2t_cache, r2t);
}
-void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
+void iscsit_free_r2ts_from_list(struct iscsit_cmd *cmd)
{
struct iscsi_r2t *r2t, *r2t_tmp;
@@ -149,32 +122,56 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
spin_unlock_bh(&cmd->r2t_lock);
}
-struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
+static int iscsit_wait_for_tag(struct se_session *se_sess, int state, int *cpup)
{
- struct iscsi_cmd *cmd;
-
- cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask);
- if (!cmd)
- return NULL;
+ int tag = -1;
+ DEFINE_SBQ_WAIT(wait);
+ struct sbq_wait_state *ws;
+ struct sbitmap_queue *sbq;
+
+ if (state == TASK_RUNNING)
+ return tag;
+
+ sbq = &se_sess->sess_tag_pool;
+ ws = &sbq->ws[0];
+ for (;;) {
+ sbitmap_prepare_to_wait(sbq, ws, &wait, state);
+ if (signal_pending_state(state, current))
+ break;
+ tag = sbitmap_queue_get(sbq, cpup);
+ if (tag >= 0)
+ break;
+ schedule();
+ }
- cmd->release_cmd = &iscsit_release_cmd;
- return cmd;
+ sbitmap_finish_wait(sbq, ws, &wait);
+ return tag;
}
/*
* May be called from software interrupt (timer) context for allocating
* iSCSI NopINs.
*/
-struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
+struct iscsit_cmd *iscsit_allocate_cmd(struct iscsit_conn *conn, int state)
{
- struct iscsi_cmd *cmd;
-
- cmd = conn->conn_transport->iscsit_alloc_cmd(conn, gfp_mask);
- if (!cmd) {
- pr_err("Unable to allocate memory for struct iscsi_cmd.\n");
+ struct iscsit_cmd *cmd;
+ struct se_session *se_sess = conn->sess->se_sess;
+ int size, tag, cpu;
+
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
+ if (tag < 0)
+ tag = iscsit_wait_for_tag(se_sess, state, &cpu);
+ if (tag < 0)
return NULL;
- }
+
+ size = sizeof(struct iscsit_cmd) + conn->conn_transport->priv_size;
+ cmd = (struct iscsit_cmd *)(se_sess->sess_cmd_map + (tag * size));
+ memset(cmd, 0, size);
+
+ cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
cmd->conn = conn;
+ cmd->data_direction = DMA_NONE;
INIT_LIST_HEAD(&cmd->i_conn_node);
INIT_LIST_HEAD(&cmd->datain_list);
INIT_LIST_HEAD(&cmd->cmd_r2t_list);
@@ -183,13 +180,14 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
spin_lock_init(&cmd->istate_lock);
spin_lock_init(&cmd->error_lock);
spin_lock_init(&cmd->r2t_lock);
+ timer_setup(&cmd->dataout_timer, iscsit_handle_dataout_timeout, 0);
return cmd;
}
EXPORT_SYMBOL(iscsit_allocate_cmd);
struct iscsi_seq *iscsit_get_seq_holder_for_datain(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
u32 seq_send_order)
{
u32 i;
@@ -201,12 +199,12 @@ struct iscsi_seq *iscsit_get_seq_holder_for_datain(
return NULL;
}
-struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
+struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsit_cmd *cmd)
{
u32 i;
if (!cmd->seq_list) {
- pr_err("struct iscsi_cmd->seq_list is NULL!\n");
+ pr_err("struct iscsit_cmd->seq_list is NULL!\n");
return NULL;
}
@@ -223,7 +221,7 @@ struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *cmd)
}
struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
- struct iscsi_cmd *cmd,
+ struct iscsit_cmd *cmd,
u32 r2t_sn)
{
struct iscsi_r2t *r2t;
@@ -240,8 +238,9 @@ struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
return NULL;
}
-static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cmdsn)
+static inline int iscsit_check_received_cmdsn(struct iscsit_session *sess, u32 cmdsn)
{
+ u32 max_cmdsn;
int ret;
/*
@@ -250,11 +249,11 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm
* or order CmdSNs due to multiple connection sessions and/or
* CRC failures.
*/
- if (iscsi_sna_gt(cmdsn, sess->max_cmd_sn)) {
+ max_cmdsn = atomic_read(&sess->max_cmd_sn);
+ if (iscsi_sna_gt(cmdsn, max_cmdsn)) {
pr_err("Received CmdSN: 0x%08x is greater than"
- " MaxCmdSN: 0x%08x, protocol error.\n", cmdsn,
- sess->max_cmd_sn);
- ret = CMDSN_ERROR_CANNOT_RECOVER;
+ " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, max_cmdsn);
+ ret = CMDSN_MAXCMDSN_OVERRUN;
} else if (cmdsn == sess->exp_cmd_sn) {
sess->exp_cmd_sn++;
@@ -283,7 +282,7 @@ static inline int iscsit_check_received_cmdsn(struct iscsi_session *sess, u32 cm
* Commands may be received out of order if MC/S is in use.
* Ensure they are executed in CmdSN order.
*/
-int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+int iscsit_sequence_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char *buf, __be32 cmdsn)
{
int ret, cmdsn_ret;
@@ -313,14 +312,16 @@ int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
ret = CMDSN_HIGHER_THAN_EXP;
break;
case CMDSN_LOWER_THAN_EXP:
+ case CMDSN_MAXCMDSN_OVERRUN:
+ default:
cmd->i_state = ISTATE_REMOVE;
iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
- ret = cmdsn_ret;
- break;
- default:
- reason = ISCSI_REASON_PROTOCOL_ERROR;
- reject = true;
- ret = cmdsn_ret;
+ /*
+ * Existing callers for iscsit_sequence_cmd() will silently
+ * ignore commands with CMDSN_LOWER_THAN_EXP, so force this
+ * return for CMDSN_MAXCMDSN_OVERRUN as well..
+ */
+ ret = CMDSN_LOWER_THAN_EXP;
break;
}
mutex_unlock(&conn->sess->cmdsn_mutex);
@@ -332,55 +333,11 @@ int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
EXPORT_SYMBOL(iscsit_sequence_cmd);
-int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
-{
- struct iscsi_conn *conn = cmd->conn;
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct iscsi_data *hdr = (struct iscsi_data *) buf;
- u32 payload_length = ntoh24(hdr->dlength);
-
- if (conn->sess->sess_ops->InitialR2T) {
- pr_err("Received unexpected unsolicited data"
- " while InitialR2T=Yes, protocol error.\n");
- transport_send_check_condition_and_sense(se_cmd,
- TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
- return -1;
- }
-
- if ((cmd->first_burst_len + payload_length) >
- conn->sess->sess_ops->FirstBurstLength) {
- pr_err("Total %u bytes exceeds FirstBurstLength: %u"
- " for this Unsolicited DataOut Burst.\n",
- (cmd->first_burst_len + payload_length),
- conn->sess->sess_ops->FirstBurstLength);
- transport_send_check_condition_and_sense(se_cmd,
- TCM_INCORRECT_AMOUNT_OF_DATA, 0);
- return -1;
- }
-
- if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
- return 0;
-
- if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) &&
- ((cmd->first_burst_len + payload_length) !=
- conn->sess->sess_ops->FirstBurstLength)) {
- pr_err("Unsolicited non-immediate data received %u"
- " does not equal FirstBurstLength: %u, and does"
- " not equal ExpXferLen %u.\n",
- (cmd->first_burst_len + payload_length),
- conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length);
- transport_send_check_condition_and_sense(se_cmd,
- TCM_INCORRECT_AMOUNT_OF_DATA, 0);
- return -1;
- }
- return 0;
-}
-
-struct iscsi_cmd *iscsit_find_cmd_from_itt(
- struct iscsi_conn *conn,
+struct iscsit_cmd *iscsit_find_cmd_from_itt(
+ struct iscsit_conn *conn,
itt_t init_task_tag)
{
- struct iscsi_cmd *cmd;
+ struct iscsit_cmd *cmd;
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
@@ -395,16 +352,19 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt(
init_task_tag, conn->cid);
return NULL;
}
+EXPORT_SYMBOL(iscsit_find_cmd_from_itt);
-struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
- struct iscsi_conn *conn,
+struct iscsit_cmd *iscsit_find_cmd_from_itt_or_dump(
+ struct iscsit_conn *conn,
itt_t init_task_tag,
u32 length)
{
- struct iscsi_cmd *cmd;
+ struct iscsit_cmd *cmd;
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
+ if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT)
+ continue;
if (cmd->init_task_tag == init_task_tag) {
spin_unlock_bh(&conn->cmd_lock);
return cmd;
@@ -419,12 +379,13 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
return NULL;
}
+EXPORT_SYMBOL(iscsit_find_cmd_from_itt_or_dump);
-struct iscsi_cmd *iscsit_find_cmd_from_ttt(
- struct iscsi_conn *conn,
+struct iscsit_cmd *iscsit_find_cmd_from_ttt(
+ struct iscsit_conn *conn,
u32 targ_xfer_tag)
{
- struct iscsi_cmd *cmd = NULL;
+ struct iscsit_cmd *cmd = NULL;
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
@@ -441,12 +402,12 @@ struct iscsi_cmd *iscsit_find_cmd_from_ttt(
}
int iscsit_find_cmd_for_recovery(
- struct iscsi_session *sess,
- struct iscsi_cmd **cmd_ptr,
+ struct iscsit_session *sess,
+ struct iscsit_cmd **cmd_ptr,
struct iscsi_conn_recovery **cr_ptr,
itt_t init_task_tag)
{
- struct iscsi_cmd *cmd = NULL;
+ struct iscsit_cmd *cmd = NULL;
struct iscsi_conn_recovery *cr;
/*
* Scan through the inactive connection recovery list's command list.
@@ -493,8 +454,8 @@ int iscsit_find_cmd_for_recovery(
}
void iscsit_add_cmd_to_immediate_queue(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn,
+ struct iscsit_cmd *cmd,
+ struct iscsit_conn *conn,
u8 state)
{
struct iscsi_queue_req *qr;
@@ -517,8 +478,9 @@ void iscsit_add_cmd_to_immediate_queue(
wake_up(&conn->queues_wq);
}
+EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue);
-struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
+struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsit_conn *conn)
{
struct iscsi_queue_req *qr;
@@ -539,8 +501,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *c
}
static void iscsit_remove_cmd_from_immediate_queue(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn)
+ struct iscsit_cmd *cmd,
+ struct iscsit_conn *conn)
{
struct iscsi_queue_req *qr, *qr_tmp;
@@ -567,9 +529,9 @@ static void iscsit_remove_cmd_from_immediate_queue(
}
}
-void iscsit_add_cmd_to_response_queue(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn,
+int iscsit_add_cmd_to_response_queue(
+ struct iscsit_cmd *cmd,
+ struct iscsit_conn *conn,
u8 state)
{
struct iscsi_queue_req *qr;
@@ -578,7 +540,7 @@ void iscsit_add_cmd_to_response_queue(
if (!qr) {
pr_err("Unable to allocate memory for"
" struct iscsi_queue_req\n");
- return;
+ return -ENOMEM;
}
INIT_LIST_HEAD(&qr->qr_list);
qr->cmd = cmd;
@@ -590,9 +552,10 @@ void iscsit_add_cmd_to_response_queue(
spin_unlock_bh(&conn->response_queue_lock);
wake_up(&conn->queues_wq);
+ return 0;
}
-struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
+struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsit_conn *conn)
{
struct iscsi_queue_req *qr;
@@ -614,8 +577,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *co
}
static void iscsit_remove_cmd_from_response_queue(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn)
+ struct iscsit_cmd *cmd,
+ struct iscsit_conn *conn)
{
struct iscsi_queue_req *qr, *qr_tmp;
@@ -643,7 +606,7 @@ static void iscsit_remove_cmd_from_response_queue(
}
}
-bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn)
+bool iscsit_conn_all_queues_empty(struct iscsit_conn *conn)
{
bool empty;
@@ -661,7 +624,7 @@ bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn)
return empty;
}
-void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
+void iscsit_free_queue_reqs_for_conn(struct iscsit_conn *conn)
{
struct iscsi_queue_req *qr, *qr_tmp;
@@ -687,103 +650,93 @@ void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
spin_unlock_bh(&conn->response_queue_lock);
}
-void iscsit_release_cmd(struct iscsi_cmd *cmd)
+void iscsit_release_cmd(struct iscsit_cmd *cmd)
{
+ struct iscsit_session *sess;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ WARN_ON(!list_empty(&cmd->i_conn_node));
+
+ if (cmd->conn)
+ sess = cmd->conn->sess;
+ else
+ sess = cmd->sess;
+
+ BUG_ON(!sess || !sess->se_sess);
+
kfree(cmd->buf_ptr);
kfree(cmd->pdu_list);
kfree(cmd->seq_list);
kfree(cmd->tmr_req);
+ kfree(cmd->overflow_buf);
kfree(cmd->iov_data);
kfree(cmd->text_in_ptr);
- kmem_cache_free(lio_cmd_cache, cmd);
+ target_free_tag(sess->se_sess, se_cmd);
}
+EXPORT_SYMBOL(iscsit_release_cmd);
-static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
- bool check_queues)
+void __iscsit_free_cmd(struct iscsit_cmd *cmd, bool check_queues)
{
- struct iscsi_conn *conn = cmd->conn;
+ struct iscsit_conn *conn = cmd->conn;
- if (scsi_cmd) {
- if (cmd->data_direction == DMA_TO_DEVICE) {
- iscsit_stop_dataout_timer(cmd);
- iscsit_free_r2ts_from_list(cmd);
- }
- if (cmd->data_direction == DMA_FROM_DEVICE)
- iscsit_free_all_datain_reqs(cmd);
+ WARN_ON(!list_empty(&cmd->i_conn_node));
+
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ iscsit_stop_dataout_timer(cmd);
+ iscsit_free_r2ts_from_list(cmd);
}
+ if (cmd->data_direction == DMA_FROM_DEVICE)
+ iscsit_free_all_datain_reqs(cmd);
if (conn && check_queues) {
iscsit_remove_cmd_from_immediate_queue(cmd, conn);
iscsit_remove_cmd_from_response_queue(cmd, conn);
}
+
+ if (conn && conn->conn_transport->iscsit_unmap_cmd)
+ conn->conn_transport->iscsit_unmap_cmd(conn, cmd);
}
-void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
+void iscsit_free_cmd(struct iscsit_cmd *cmd, bool shutdown)
{
- struct se_cmd *se_cmd = NULL;
+ struct se_cmd *se_cmd = cmd->se_cmd.se_tfo ? &cmd->se_cmd : NULL;
int rc;
- /*
- * Determine if a struct se_cmd is associated with
- * this struct iscsi_cmd.
- */
- switch (cmd->iscsi_opcode) {
- case ISCSI_OP_SCSI_CMD:
- se_cmd = &cmd->se_cmd;
- __iscsit_free_cmd(cmd, true, shutdown);
- /*
- * Fallthrough
- */
- case ISCSI_OP_SCSI_TMFUNC:
- rc = transport_generic_free_cmd(&cmd->se_cmd, 1);
- if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
- __iscsit_free_cmd(cmd, true, shutdown);
- target_put_sess_cmd(se_cmd->se_sess, se_cmd);
- }
- break;
- case ISCSI_OP_REJECT:
- /*
- * Handle special case for REJECT when iscsi_add_reject*() has
- * overwritten the original iscsi_opcode assignment, and the
- * associated cmd->se_cmd needs to be released.
- */
- if (cmd->se_cmd.se_tfo != NULL) {
- se_cmd = &cmd->se_cmd;
- __iscsit_free_cmd(cmd, true, shutdown);
-
- rc = transport_generic_free_cmd(&cmd->se_cmd, 1);
- if (!rc && shutdown && se_cmd->se_sess) {
- __iscsit_free_cmd(cmd, true, shutdown);
- target_put_sess_cmd(se_cmd->se_sess, se_cmd);
- }
- break;
+
+ WARN_ON(!list_empty(&cmd->i_conn_node));
+
+ __iscsit_free_cmd(cmd, shutdown);
+ if (se_cmd) {
+ rc = transport_generic_free_cmd(se_cmd, shutdown);
+ if (!rc && shutdown && se_cmd->se_sess) {
+ __iscsit_free_cmd(cmd, shutdown);
+ target_put_sess_cmd(se_cmd);
}
- /* Fall-through */
- default:
- __iscsit_free_cmd(cmd, false, shutdown);
- cmd->release_cmd(cmd);
- break;
+ } else {
+ iscsit_release_cmd(cmd);
}
}
+EXPORT_SYMBOL(iscsit_free_cmd);
-int iscsit_check_session_usage_count(struct iscsi_session *sess)
+bool iscsit_check_session_usage_count(struct iscsit_session *sess,
+ bool can_sleep)
{
spin_lock_bh(&sess->session_usage_lock);
if (sess->session_usage_count != 0) {
sess->session_waiting_on_uc = 1;
spin_unlock_bh(&sess->session_usage_lock);
- if (in_interrupt())
- return 2;
+ if (!can_sleep)
+ return true;
wait_for_completion(&sess->session_waiting_on_uc_comp);
- return 1;
+ return false;
}
spin_unlock_bh(&sess->session_usage_lock);
- return 0;
+ return false;
}
-void iscsit_dec_session_usage_count(struct iscsi_session *sess)
+void iscsit_dec_session_usage_count(struct iscsit_session *sess)
{
spin_lock_bh(&sess->session_usage_lock);
sess->session_usage_count--;
@@ -794,64 +747,16 @@ void iscsit_dec_session_usage_count(struct iscsi_session *sess)
spin_unlock_bh(&sess->session_usage_lock);
}
-void iscsit_inc_session_usage_count(struct iscsi_session *sess)
+void iscsit_inc_session_usage_count(struct iscsit_session *sess)
{
spin_lock_bh(&sess->session_usage_lock);
sess->session_usage_count++;
spin_unlock_bh(&sess->session_usage_lock);
}
-/*
- * Setup conn->if_marker and conn->of_marker values based upon
- * the initial marker-less interval. (see iSCSI v19 A.2)
- */
-int iscsit_set_sync_and_steering_values(struct iscsi_conn *conn)
+struct iscsit_conn *iscsit_get_conn_from_cid(struct iscsit_session *sess, u16 cid)
{
- int login_ifmarker_count = 0, login_ofmarker_count = 0, next_marker = 0;
- /*
- * IFMarkInt and OFMarkInt are negotiated as 32-bit words.
- */
- u32 IFMarkInt = (conn->conn_ops->IFMarkInt * 4);
- u32 OFMarkInt = (conn->conn_ops->OFMarkInt * 4);
-
- if (conn->conn_ops->OFMarker) {
- /*
- * Account for the first Login Command received not
- * via iscsi_recv_msg().
- */
- conn->of_marker += ISCSI_HDR_LEN;
- if (conn->of_marker <= OFMarkInt) {
- conn->of_marker = (OFMarkInt - conn->of_marker);
- } else {
- login_ofmarker_count = (conn->of_marker / OFMarkInt);
- next_marker = (OFMarkInt * (login_ofmarker_count + 1)) +
- (login_ofmarker_count * MARKER_SIZE);
- conn->of_marker = (next_marker - conn->of_marker);
- }
- conn->of_marker_offset = 0;
- pr_debug("Setting OFMarker value to %u based on Initial"
- " Markerless Interval.\n", conn->of_marker);
- }
-
- if (conn->conn_ops->IFMarker) {
- if (conn->if_marker <= IFMarkInt) {
- conn->if_marker = (IFMarkInt - conn->if_marker);
- } else {
- login_ifmarker_count = (conn->if_marker / IFMarkInt);
- next_marker = (IFMarkInt * (login_ifmarker_count + 1)) +
- (login_ifmarker_count * MARKER_SIZE);
- conn->if_marker = (next_marker - conn->if_marker);
- }
- pr_debug("Setting IFMarker value to %u based on Initial"
- " Markerless Interval.\n", conn->if_marker);
- }
-
- return 0;
-}
-
-struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
-{
- struct iscsi_conn *conn;
+ struct iscsit_conn *conn;
spin_lock_bh(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
@@ -867,9 +772,9 @@ struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *sess, u16 cid)
return NULL;
}
-struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16 cid)
+struct iscsit_conn *iscsit_get_conn_from_cid_rcfr(struct iscsit_session *sess, u16 cid)
{
- struct iscsi_conn *conn;
+ struct iscsit_conn *conn;
spin_lock_bh(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
@@ -887,7 +792,7 @@ struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *sess, u16
return NULL;
}
-void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
+void iscsit_check_conn_usage_count(struct iscsit_conn *conn)
{
spin_lock_bh(&conn->conn_usage_lock);
if (conn->conn_usage_count != 0) {
@@ -900,7 +805,7 @@ void iscsit_check_conn_usage_count(struct iscsi_conn *conn)
spin_unlock_bh(&conn->conn_usage_lock);
}
-void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
+void iscsit_dec_conn_usage_count(struct iscsit_conn *conn)
{
spin_lock_bh(&conn->conn_usage_lock);
conn->conn_usage_count--;
@@ -911,19 +816,19 @@ void iscsit_dec_conn_usage_count(struct iscsi_conn *conn)
spin_unlock_bh(&conn->conn_usage_lock);
}
-void iscsit_inc_conn_usage_count(struct iscsi_conn *conn)
+void iscsit_inc_conn_usage_count(struct iscsit_conn *conn)
{
spin_lock_bh(&conn->conn_usage_lock);
conn->conn_usage_count++;
spin_unlock_bh(&conn->conn_usage_lock);
}
-static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
+static int iscsit_add_nopin(struct iscsit_conn *conn, int want_response)
{
u8 state;
- struct iscsi_cmd *cmd;
+ struct iscsit_cmd *cmd;
- cmd = iscsit_allocate_cmd(conn, GFP_ATOMIC);
+ cmd = iscsit_allocate_cmd(conn, TASK_RUNNING);
if (!cmd)
return -1;
@@ -931,13 +836,8 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
ISTATE_SEND_NOPIN_NO_RESPONSE;
cmd->init_task_tag = RESERVED_ITT;
- spin_lock_bh(&conn->sess->ttt_lock);
- cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ :
- 0xFFFFFFFF;
- if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF))
- cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
- spin_unlock_bh(&conn->sess->ttt_lock);
-
+ cmd->targ_xfer_tag = (want_response) ?
+ session_get_next_ttt(conn->sess) : 0xFFFFFFFF;
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
@@ -949,9 +849,11 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
return 0;
}
-static void iscsit_handle_nopin_response_timeout(unsigned long data)
+void iscsit_handle_nopin_response_timeout(struct timer_list *t)
{
- struct iscsi_conn *conn = (struct iscsi_conn *) data;
+ struct iscsit_conn *conn = timer_container_of(conn, t,
+ nopin_response_timer);
+ struct iscsit_session *sess = conn->sess;
iscsit_inc_conn_usage_count(conn);
@@ -962,35 +864,21 @@ static void iscsit_handle_nopin_response_timeout(unsigned long data)
return;
}
- pr_debug("Did not receive response to NOPIN on CID: %hu on"
- " SID: %u, failing connection.\n", conn->cid,
- conn->sess->sid);
+ pr_err("Did not receive response to NOPIN on CID: %hu, failing"
+ " connection for I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
+ conn->cid, sess->sess_ops->InitiatorName, sess->isid,
+ sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
spin_unlock_bh(&conn->nopin_timer_lock);
- {
- struct iscsi_portal_group *tpg = conn->sess->tpg;
- struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
-
- if (tiqn) {
- spin_lock_bh(&tiqn->sess_err_stats.lock);
- strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
- conn->sess->sess_ops->InitiatorName);
- tiqn->sess_err_stats.last_sess_failure_type =
- ISCSI_SESS_ERR_CXN_TIMEOUT;
- tiqn->sess_err_stats.cxn_timeout_errors++;
- conn->sess->conn_timeout_errors++;
- spin_unlock_bh(&tiqn->sess_err_stats.lock);
- }
- }
-
+ iscsit_fill_cxn_timeout_err_stats(sess);
iscsit_cause_connection_reinstatement(conn, 0);
iscsit_dec_conn_usage_count(conn);
}
-void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
+void iscsit_mod_nopin_response_timer(struct iscsit_conn *conn)
{
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
spin_lock_bh(&conn->nopin_timer_lock);
@@ -1004,12 +892,9 @@ void iscsit_mod_nopin_response_timer(struct iscsi_conn *conn)
spin_unlock_bh(&conn->nopin_timer_lock);
}
-/*
- * Called with conn->nopin_timer_lock held.
- */
-void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
+void iscsit_start_nopin_response_timer(struct iscsit_conn *conn)
{
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
spin_lock_bh(&conn->nopin_timer_lock);
@@ -1018,21 +903,17 @@ void iscsit_start_nopin_response_timer(struct iscsi_conn *conn)
return;
}
- init_timer(&conn->nopin_response_timer);
- conn->nopin_response_timer.expires =
- (get_jiffies_64() + na->nopin_response_timeout * HZ);
- conn->nopin_response_timer.data = (unsigned long)conn;
- conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout;
conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
- add_timer(&conn->nopin_response_timer);
+ mod_timer(&conn->nopin_response_timer,
+ jiffies + na->nopin_response_timeout * HZ);
pr_debug("Started NOPIN Response Timer on CID: %d to %u"
" seconds\n", conn->cid, na->nopin_response_timeout);
spin_unlock_bh(&conn->nopin_timer_lock);
}
-void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
+void iscsit_stop_nopin_response_timer(struct iscsit_conn *conn)
{
spin_lock_bh(&conn->nopin_timer_lock);
if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
@@ -1042,16 +923,16 @@ void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn)
conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
spin_unlock_bh(&conn->nopin_timer_lock);
- del_timer_sync(&conn->nopin_response_timer);
+ timer_delete_sync(&conn->nopin_response_timer);
spin_lock_bh(&conn->nopin_timer_lock);
conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
spin_unlock_bh(&conn->nopin_timer_lock);
}
-static void iscsit_handle_nopin_timeout(unsigned long data)
+void iscsit_handle_nopin_timeout(struct timer_list *t)
{
- struct iscsi_conn *conn = (struct iscsi_conn *) data;
+ struct iscsit_conn *conn = timer_container_of(conn, t, nopin_timer);
iscsit_inc_conn_usage_count(conn);
@@ -1068,13 +949,13 @@ static void iscsit_handle_nopin_timeout(unsigned long data)
iscsit_dec_conn_usage_count(conn);
}
-/*
- * Called with conn->nopin_timer_lock held.
- */
-void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
+void __iscsit_start_nopin_timer(struct iscsit_conn *conn)
{
- struct iscsi_session *sess = conn->sess;
+ struct iscsit_session *sess = conn->sess;
struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
+
+ lockdep_assert_held(&conn->nopin_timer_lock);
+
/*
* NOPIN timeout is disabled.
*/
@@ -1084,48 +965,22 @@ void __iscsit_start_nopin_timer(struct iscsi_conn *conn)
if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
return;
- init_timer(&conn->nopin_timer);
- conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
- conn->nopin_timer.data = (unsigned long)conn;
- conn->nopin_timer.function = iscsit_handle_nopin_timeout;
conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
- add_timer(&conn->nopin_timer);
+ mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ);
pr_debug("Started NOPIN Timer on CID: %d at %u second"
" interval\n", conn->cid, na->nopin_timeout);
}
-void iscsit_start_nopin_timer(struct iscsi_conn *conn)
+void iscsit_start_nopin_timer(struct iscsit_conn *conn)
{
- struct iscsi_session *sess = conn->sess;
- struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
- /*
- * NOPIN timeout is disabled..
- */
- if (!na->nopin_timeout)
- return;
-
spin_lock_bh(&conn->nopin_timer_lock);
- if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) {
- spin_unlock_bh(&conn->nopin_timer_lock);
- return;
- }
-
- init_timer(&conn->nopin_timer);
- conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ);
- conn->nopin_timer.data = (unsigned long)conn;
- conn->nopin_timer.function = iscsit_handle_nopin_timeout;
- conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
- conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
- add_timer(&conn->nopin_timer);
-
- pr_debug("Started NOPIN Timer on CID: %d at %u second"
- " interval\n", conn->cid, na->nopin_timeout);
+ __iscsit_start_nopin_timer(conn);
spin_unlock_bh(&conn->nopin_timer_lock);
}
-void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
+void iscsit_stop_nopin_timer(struct iscsit_conn *conn)
{
spin_lock_bh(&conn->nopin_timer_lock);
if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
@@ -1135,16 +990,67 @@ void iscsit_stop_nopin_timer(struct iscsi_conn *conn)
conn->nopin_timer_flags |= ISCSI_TF_STOP;
spin_unlock_bh(&conn->nopin_timer_lock);
- del_timer_sync(&conn->nopin_timer);
+ timer_delete_sync(&conn->nopin_timer);
spin_lock_bh(&conn->nopin_timer_lock);
conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
spin_unlock_bh(&conn->nopin_timer_lock);
}
+void iscsit_login_timeout(struct timer_list *t)
+{
+ struct iscsit_conn *conn = timer_container_of(conn, t, login_timer);
+ struct iscsi_login *login = conn->login;
+
+ pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n");
+
+ spin_lock_bh(&conn->login_timer_lock);
+ login->login_failed = 1;
+
+ if (conn->login_kworker) {
+ pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n",
+ conn->login_kworker->comm, conn->login_kworker->pid);
+ send_sig(SIGINT, conn->login_kworker, 1);
+ } else {
+ schedule_delayed_work(&conn->login_work, 0);
+ }
+ spin_unlock_bh(&conn->login_timer_lock);
+}
+
+void iscsit_start_login_timer(struct iscsit_conn *conn, struct task_struct *kthr)
+{
+ pr_debug("Login timer started\n");
+
+ conn->login_kworker = kthr;
+ mod_timer(&conn->login_timer, jiffies + TA_LOGIN_TIMEOUT * HZ);
+}
+
+int iscsit_set_login_timer_kworker(struct iscsit_conn *conn, struct task_struct *kthr)
+{
+ struct iscsi_login *login = conn->login;
+ int ret = 0;
+
+ spin_lock_bh(&conn->login_timer_lock);
+ if (login->login_failed) {
+ /* The timer has already expired */
+ ret = -1;
+ } else {
+ conn->login_kworker = kthr;
+ }
+ spin_unlock_bh(&conn->login_timer_lock);
+
+ return ret;
+}
+
+void iscsit_stop_login_timer(struct iscsit_conn *conn)
+{
+ pr_debug("Login timer stopped\n");
+ timer_delete_sync(&conn->login_timer);
+}
+
int iscsit_send_tx_data(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn,
+ struct iscsit_cmd *cmd,
+ struct iscsit_conn *conn,
int use_misc)
{
int tx_sent, tx_size;
@@ -1176,10 +1082,12 @@ send_data:
}
int iscsit_fe_sendpage_sg(
- struct iscsi_cmd *cmd,
- struct iscsi_conn *conn)
+ struct iscsit_cmd *cmd,
+ struct iscsit_conn *conn)
{
struct scatterlist *sg = cmd->first_data_sg;
+ struct bio_vec bvec;
+ struct msghdr msghdr = { .msg_flags = MSG_SPLICE_PAGES, };
struct kvec iov;
u32 tx_hdr_size, data_len;
u32 offset = cmd->first_data_sg_off;
@@ -1223,17 +1131,18 @@ send_hdr:
u32 space = (sg->length - offset);
u32 sub_len = min_t(u32, data_len, space);
send_pg:
- tx_sent = conn->sock->ops->sendpage(conn->sock,
- sg_page(sg), sg->offset + offset, sub_len, 0);
+ bvec_set_page(&bvec, sg_page(sg), sub_len, sg->offset + offset);
+ iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, sub_len);
+
+ tx_sent = conn->sock->ops->sendmsg(conn->sock, &msghdr,
+ sub_len);
if (tx_sent != sub_len) {
if (tx_sent == -EAGAIN) {
- pr_err("tcp_sendpage() returned"
- " -EAGAIN\n");
+ pr_err("sendmsg/splice returned -EAGAIN\n");
goto send_pg;
}
- pr_err("tcp_sendpage() failure: %d\n",
- tx_sent);
+ pr_err("sendmsg/splice failure: %d\n", tx_sent);
return -1;
}
@@ -1281,7 +1190,7 @@ send_datacrc:
* Parameters: iSCSI Connection, Status Class, Status Detail.
* Returns: 0 on success, -1 on error.
*/
-int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
+int iscsit_tx_login_rsp(struct iscsit_conn *conn, u8 status_class, u8 status_detail)
{
struct iscsi_login_rsp *hdr;
struct iscsi_login *login = conn->conn_login;
@@ -1289,6 +1198,8 @@ int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_deta
login->login_failed = 1;
iscsit_collect_login_stats(conn, status_class, status_detail);
+ memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
+
hdr = (struct iscsi_login_rsp *)&login->rsp[0];
hdr->opcode = ISCSI_OP_LOGIN_RSP;
hdr->status_class = status_class;
@@ -1298,39 +1209,23 @@ int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_deta
return conn->conn_transport->iscsit_put_login_tx(conn, login, 0);
}
-void iscsit_print_session_params(struct iscsi_session *sess)
-{
- struct iscsi_conn *conn;
-
- pr_debug("-----------------------------[Session Params for"
- " SID: %u]-----------------------------\n", sess->sid);
- spin_lock_bh(&sess->conn_lock);
- list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
- iscsi_dump_conn_ops(conn->conn_ops);
- spin_unlock_bh(&sess->conn_lock);
-
- iscsi_dump_sess_ops(sess->sess_ops);
-}
-
-static int iscsit_do_rx_data(
- struct iscsi_conn *conn,
- struct iscsi_data_count *count)
+int rx_data(
+ struct iscsit_conn *conn,
+ struct kvec *iov,
+ int iov_count,
+ int data)
{
- int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len;
- struct kvec *iov_p;
+ int rx_loop = 0, total_rx = 0;
struct msghdr msg;
if (!conn || !conn->sock || !conn->conn_ops)
return -1;
memset(&msg, 0, sizeof(struct msghdr));
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, iov, iov_count, data);
- iov_p = count->iov;
- iov_len = count->iov_count;
-
- while (total_rx < data) {
- rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len,
- (data - total_rx), MSG_WAITALL);
+ while (msg_data_left(&msg)) {
+ rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL);
if (rx_loop <= 0) {
pr_debug("rx_loop: %d total_rx: %d\n",
rx_loop, total_rx);
@@ -1344,13 +1239,14 @@ static int iscsit_do_rx_data(
return total_rx;
}
-static int iscsit_do_tx_data(
- struct iscsi_conn *conn,
- struct iscsi_data_count *count)
+int tx_data(
+ struct iscsit_conn *conn,
+ struct kvec *iov,
+ int iov_count,
+ int data)
{
- int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
- struct kvec *iov_p;
struct msghdr msg;
+ int total_tx = 0;
if (!conn || !conn->sock || !conn->conn_ops)
return -1;
@@ -1362,12 +1258,10 @@ static int iscsit_do_tx_data(
memset(&msg, 0, sizeof(struct msghdr));
- iov_p = count->iov;
- iov_len = count->iov_count;
+ iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iov, iov_count, data);
- while (total_tx < data) {
- tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
- (data - total_tx));
+ while (msg_data_left(&msg)) {
+ int tx_loop = sock_sendmsg(conn->sock, &msg);
if (tx_loop <= 0) {
pr_debug("tx_loop: %d total_tx %d\n",
tx_loop, total_tx);
@@ -1381,48 +1275,8 @@ static int iscsit_do_tx_data(
return total_tx;
}
-int rx_data(
- struct iscsi_conn *conn,
- struct kvec *iov,
- int iov_count,
- int data)
-{
- struct iscsi_data_count c;
-
- if (!conn || !conn->sock || !conn->conn_ops)
- return -1;
-
- memset(&c, 0, sizeof(struct iscsi_data_count));
- c.iov = iov;
- c.iov_count = iov_count;
- c.data_length = data;
- c.type = ISCSI_RX_DATA;
-
- return iscsit_do_rx_data(conn, &c);
-}
-
-int tx_data(
- struct iscsi_conn *conn,
- struct kvec *iov,
- int iov_count,
- int data)
-{
- struct iscsi_data_count c;
-
- if (!conn || !conn->sock || !conn->conn_ops)
- return -1;
-
- memset(&c, 0, sizeof(struct iscsi_data_count));
- c.iov = iov;
- c.iov_count = iov_count;
- c.data_length = data;
- c.type = ISCSI_TX_DATA;
-
- return iscsit_do_tx_data(conn, &c);
-}
-
void iscsit_collect_login_stats(
- struct iscsi_conn *conn,
+ struct iscsit_conn *conn,
u8 status_class,
u8 status_detail)
{
@@ -1437,13 +1291,6 @@ void iscsit_collect_login_stats(
ls = &tiqn->login_stats;
spin_lock(&ls->lock);
- if (!strcmp(conn->login_ip, ls->last_intr_fail_ip_addr) &&
- ((get_jiffies_64() - ls->last_fail_time) < 10)) {
- /* We already have the failure info for this login */
- spin_unlock(&ls->lock);
- return;
- }
-
if (status_class == ISCSI_STATUS_CLS_SUCCESS)
ls->accepts++;
else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
@@ -1471,27 +1318,27 @@ void iscsit_collect_login_stats(
if (conn->param_list)
intrname = iscsi_find_param_from_key(INITIATORNAME,
conn->param_list);
- strcpy(ls->last_intr_fail_name,
- (intrname ? intrname->value : "Unknown"));
+ strscpy(ls->last_intr_fail_name,
+ (intrname ? intrname->value : "Unknown"),
+ sizeof(ls->last_intr_fail_name));
ls->last_intr_fail_ip_family = conn->login_family;
- snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE,
- "%s", conn->login_ip);
+ ls->last_intr_fail_sockaddr = conn->login_sockaddr;
ls->last_fail_time = get_jiffies_64();
}
spin_unlock(&ls->lock);
}
-struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
+struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsit_conn *conn)
{
struct iscsi_portal_group *tpg;
- if (!conn || !conn->sess)
+ if (!conn)
return NULL;
- tpg = conn->sess->tpg;
+ tpg = conn->tpg;
if (!tpg)
return NULL;
@@ -1500,3 +1347,22 @@ struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
return tpg->tpg_tiqn;
}
+
+void iscsit_fill_cxn_timeout_err_stats(struct iscsit_session *sess)
+{
+ struct iscsi_portal_group *tpg = sess->tpg;
+ struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+ if (!tiqn)
+ return;
+
+ spin_lock_bh(&tiqn->sess_err_stats.lock);
+ strscpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
+ sess->sess_ops->InitiatorName,
+ sizeof(tiqn->sess_err_stats.last_sess_fail_rem_name));
+ tiqn->sess_err_stats.last_sess_failure_type =
+ ISCSI_SESS_ERR_CXN_TIMEOUT;
+ tiqn->sess_err_stats.cxn_timeout_errors++;
+ atomic_long_inc(&sess->conn_timeout_errors);
+ spin_unlock_bh(&tiqn->sess_err_stats.lock);
+}
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index e4fc34a02f57..7ae48a8a5cbf 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -1,61 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ISCSI_TARGET_UTIL_H
#define ISCSI_TARGET_UTIL_H
+#include <linux/types.h>
+#include <scsi/iscsi_proto.h> /* itt_t */
+
#define MARKER_SIZE 8
-extern int iscsit_add_r2t_to_list(struct iscsi_cmd *, u32, u32, int, u32);
-extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsi_cmd *, u32, u32);
-extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
-extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
-extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
-extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t);
-extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
-extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
-extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
-extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
-extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+struct iscsit_cmd;
+struct iscsit_conn;
+struct iscsi_conn_recovery;
+struct iscsit_session;
+
+extern int iscsit_add_r2t_to_list(struct iscsit_cmd *, u32, u32, int, u32);
+extern struct iscsi_r2t *iscsit_get_r2t_for_eos(struct iscsit_cmd *, u32, u32);
+extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsit_cmd *);
+extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsit_cmd *);
+extern void iscsit_free_r2ts_from_list(struct iscsit_cmd *);
+extern struct iscsit_cmd *iscsit_allocate_cmd(struct iscsit_conn *, int);
+extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsit_cmd *, u32);
+extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsit_cmd *);
+extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsit_cmd *, u32);
+extern int iscsit_sequence_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
unsigned char * ,__be32 cmdsn);
-extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
-extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
-extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
+extern struct iscsit_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsit_conn *,
itt_t, u32);
-extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
-extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd **,
+extern struct iscsit_cmd *iscsit_find_cmd_from_ttt(struct iscsit_conn *, u32);
+extern int iscsit_find_cmd_for_recovery(struct iscsit_session *, struct iscsit_cmd **,
struct iscsi_conn_recovery **, itt_t);
-extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
-extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
-extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
-extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
-extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
-extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
-extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
-extern void iscsit_release_cmd(struct iscsi_cmd *);
-extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
-extern int iscsit_check_session_usage_count(struct iscsi_session *);
-extern void iscsit_dec_session_usage_count(struct iscsi_session *);
-extern void iscsit_inc_session_usage_count(struct iscsi_session *);
-extern int iscsit_set_sync_and_steering_values(struct iscsi_conn *);
-extern struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *, u16);
-extern struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *, u16);
-extern void iscsit_check_conn_usage_count(struct iscsi_conn *);
-extern void iscsit_dec_conn_usage_count(struct iscsi_conn *);
-extern void iscsit_inc_conn_usage_count(struct iscsi_conn *);
-extern void iscsit_mod_nopin_response_timer(struct iscsi_conn *);
-extern void iscsit_start_nopin_response_timer(struct iscsi_conn *);
-extern void iscsit_stop_nopin_response_timer(struct iscsi_conn *);
-extern void __iscsit_start_nopin_timer(struct iscsi_conn *);
-extern void iscsit_start_nopin_timer(struct iscsi_conn *);
-extern void iscsit_stop_nopin_timer(struct iscsi_conn *);
-extern int iscsit_send_tx_data(struct iscsi_cmd *, struct iscsi_conn *, int);
-extern int iscsit_fe_sendpage_sg(struct iscsi_cmd *, struct iscsi_conn *);
-extern int iscsit_tx_login_rsp(struct iscsi_conn *, u8, u8);
-extern void iscsit_print_session_params(struct iscsi_session *);
-extern int iscsit_print_dev_to_proc(char *, char **, off_t, int);
-extern int iscsit_print_sessions_to_proc(char *, char **, off_t, int);
-extern int iscsit_print_tpg_to_proc(char *, char **, off_t, int);
-extern int rx_data(struct iscsi_conn *, struct kvec *, int, int);
-extern int tx_data(struct iscsi_conn *, struct kvec *, int, int);
-extern void iscsit_collect_login_stats(struct iscsi_conn *, u8, u8);
-extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *);
+extern void iscsit_add_cmd_to_immediate_queue(struct iscsit_cmd *, struct iscsit_conn *, u8);
+extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsit_conn *);
+extern int iscsit_add_cmd_to_response_queue(struct iscsit_cmd *, struct iscsit_conn *, u8);
+extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsit_conn *);
+extern bool iscsit_conn_all_queues_empty(struct iscsit_conn *);
+extern void iscsit_free_queue_reqs_for_conn(struct iscsit_conn *);
+extern void iscsit_release_cmd(struct iscsit_cmd *);
+extern void __iscsit_free_cmd(struct iscsit_cmd *, bool);
+extern void iscsit_free_cmd(struct iscsit_cmd *, bool);
+extern bool iscsit_check_session_usage_count(struct iscsit_session *sess, bool can_sleep);
+extern void iscsit_dec_session_usage_count(struct iscsit_session *);
+extern void iscsit_inc_session_usage_count(struct iscsit_session *);
+extern struct iscsit_conn *iscsit_get_conn_from_cid(struct iscsit_session *, u16);
+extern struct iscsit_conn *iscsit_get_conn_from_cid_rcfr(struct iscsit_session *, u16);
+extern void iscsit_check_conn_usage_count(struct iscsit_conn *);
+extern void iscsit_dec_conn_usage_count(struct iscsit_conn *);
+extern void iscsit_inc_conn_usage_count(struct iscsit_conn *);
+extern void iscsit_handle_nopin_response_timeout(struct timer_list *t);
+extern void iscsit_mod_nopin_response_timer(struct iscsit_conn *);
+extern void iscsit_start_nopin_response_timer(struct iscsit_conn *);
+extern void iscsit_stop_nopin_response_timer(struct iscsit_conn *);
+extern void iscsit_handle_nopin_timeout(struct timer_list *t);
+extern void __iscsit_start_nopin_timer(struct iscsit_conn *);
+extern void iscsit_start_nopin_timer(struct iscsit_conn *);
+extern void iscsit_stop_nopin_timer(struct iscsit_conn *);
+extern void iscsit_login_timeout(struct timer_list *t);
+extern void iscsit_start_login_timer(struct iscsit_conn *, struct task_struct *kthr);
+extern void iscsit_stop_login_timer(struct iscsit_conn *);
+extern int iscsit_set_login_timer_kworker(struct iscsit_conn *, struct task_struct *kthr);
+extern int iscsit_send_tx_data(struct iscsit_cmd *, struct iscsit_conn *, int);
+extern int iscsit_fe_sendpage_sg(struct iscsit_cmd *, struct iscsit_conn *);
+extern int iscsit_tx_login_rsp(struct iscsit_conn *, u8, u8);
+extern int rx_data(struct iscsit_conn *, struct kvec *, int, int);
+extern int tx_data(struct iscsit_conn *, struct kvec *, int, int);
+extern void iscsit_collect_login_stats(struct iscsit_conn *, u8, u8);
+extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsit_conn *);
+extern void iscsit_fill_cxn_timeout_err_stats(struct iscsit_session *);
#endif /*** ISCSI_TARGET_UTIL_H ***/
diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig
index abe8ecbcdf06..f40f3160731b 100644
--- a/drivers/target/loopback/Kconfig
+++ b/drivers/target/loopback/Kconfig
@@ -1,5 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
config LOOPBACK_TARGET
tristate "TCM Virtual SAS target and Linux/SCSI LDD fabric loopback module"
+ depends on SCSI
help
Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD
fabric loopback module.
diff --git a/drivers/target/loopback/Makefile b/drivers/target/loopback/Makefile
index 6abebdf95659..336bd44bf9d2 100644
--- a/drivers/target/loopback/Makefile
+++ b/drivers/target/loopback/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_LOOPBACK_TARGET) += tcm_loop.o
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 568ad25f25d3..01a8e349dc4d 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -3,7 +3,7 @@
* This file contains the Linux/SCSI LLD virtual SCSI initiator driver
* for emulated SAS initiator ports
*
- * © Copyright 2011 RisingTide Systems LLC.
+ * © Copyright 2011-2013 Datera, Inc.
*
* Licensed to the Linux Foundation under the General Public License (GPL) version 2.
*
@@ -34,69 +34,57 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
#include "tcm_loop.h"
#define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
-/* Local pointer to allocated TCM configfs fabric module */
-static struct target_fabric_configfs *tcm_loop_fabric_configfs;
-
-static struct workqueue_struct *tcm_loop_workqueue;
static struct kmem_cache *tcm_loop_cmd_cache;
static int tcm_loop_hba_no_cnt;
static int tcm_loop_queue_status(struct se_cmd *se_cmd);
+static unsigned int tcm_loop_nr_hw_queues = 1;
+module_param_named(nr_hw_queues, tcm_loop_nr_hw_queues, uint, 0644);
+
+static unsigned int tcm_loop_can_queue = 1024;
+module_param_named(can_queue, tcm_loop_can_queue, uint, 0644);
+
+static unsigned int tcm_loop_cmd_per_lun = 1024;
+module_param_named(cmd_per_lun, tcm_loop_cmd_per_lun, uint, 0644);
+
/*
* Called from struct target_core_fabric_ops->check_stop_free()
*/
static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
{
- /*
- * Do not release struct se_cmd's containing a valid TMR
- * pointer. These will be released directly in tcm_loop_device_reset()
- * with transport_generic_free_cmd().
- */
- if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
- return 0;
- /*
- * Release the struct se_cmd, which will make a callback to release
- * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
- */
- transport_generic_free_cmd(se_cmd, 0);
- return 1;
+ return transport_generic_free_cmd(se_cmd, 0);
}
static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
+ struct scsi_cmnd *sc = tl_cmd->sc;
- kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+ kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+ else
+ scsi_done(sc);
}
static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
{
- seq_printf(m, "tcm_loop_proc_info()\n");
+ seq_puts(m, "tcm_loop_proc_info()\n");
return 0;
}
static int tcm_loop_driver_probe(struct device *);
-static int tcm_loop_driver_remove(struct device *);
+static void tcm_loop_driver_remove(struct device *);
-static int pseudo_lld_bus_match(struct device *dev,
- struct device_driver *dev_driver)
-{
- return 1;
-}
-
-static struct bus_type tcm_loop_lld_bus = {
+static const struct bus_type tcm_loop_lld_bus = {
.name = "tcm_loop_bus",
- .match = pseudo_lld_bus_match,
.probe = tcm_loop_driver_probe,
.remove = tcm_loop_driver_remove,
};
@@ -108,64 +96,17 @@ static struct device_driver tcm_loop_driverfs = {
/*
* Used with root_device_register() in tcm_loop_alloc_core_bus() below
*/
-struct device *tcm_loop_primary;
-
-/*
- * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and
- * drivers/scsi/libiscsi.c:iscsi_change_queue_depth()
- */
-static int tcm_loop_change_queue_depth(
- struct scsi_device *sdev,
- int depth,
- int reason)
-{
- switch (reason) {
- case SCSI_QDEPTH_DEFAULT:
- scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
- break;
- case SCSI_QDEPTH_QFULL:
- scsi_track_queue_full(sdev, depth);
- break;
- case SCSI_QDEPTH_RAMP_UP:
- scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
- break;
- default:
- return -EOPNOTSUPP;
- }
- return sdev->queue_depth;
-}
-
-/*
- * Locate the SAM Task Attr from struct scsi_cmnd *
- */
-static int tcm_loop_sam_attr(struct scsi_cmnd *sc)
-{
- if (sc->device->tagged_supported) {
- switch (sc->tag) {
- case HEAD_OF_QUEUE_TAG:
- return MSG_HEAD_TAG;
- case ORDERED_QUEUE_TAG:
- return MSG_ORDERED_TAG;
- default:
- break;
- }
- }
-
- return MSG_SIMPLE_TAG;
-}
+static struct device *tcm_loop_primary;
-static void tcm_loop_submission_work(struct work_struct *work)
+static void tcm_loop_target_queue_cmd(struct tcm_loop_cmd *tl_cmd)
{
- struct tcm_loop_cmd *tl_cmd =
- container_of(work, struct tcm_loop_cmd, work);
struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
struct scsi_cmnd *sc = tl_cmd->sc;
struct tcm_loop_nexus *tl_nexus;
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
struct scatterlist *sgl_bidi = NULL;
- u32 sgl_bidi_count = 0;
- int rc;
+ u32 sgl_bidi_count = 0, transfer_length;
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
@@ -178,37 +119,46 @@ static void tcm_loop_submission_work(struct work_struct *work)
set_host_byte(sc, DID_NO_CONNECT);
goto out_done;
}
-
- tl_nexus = tl_hba->tl_nexus;
+ if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
+ set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
+ goto out_done;
+ }
+ tl_nexus = tl_tpg->tl_nexus;
if (!tl_nexus) {
- scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
- " does not exist\n");
+ scmd_printk(KERN_ERR, sc,
+ "TCM_Loop I_T Nexus does not exist\n");
set_host_byte(sc, DID_ERROR);
goto out_done;
}
- if (scsi_bidi_cmnd(sc)) {
- struct scsi_data_buffer *sdb = scsi_in(sc);
-
- sgl_bidi = sdb->table.sgl;
- sgl_bidi_count = sdb->table.nents;
- se_cmd->se_cmd_flags |= SCF_BIDI;
+ transfer_length = scsi_transfer_length(sc);
+ if (!scsi_prot_sg_count(sc) &&
+ scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
+ se_cmd->prot_pto = true;
+ /*
+ * loopback transport doesn't support
+ * WRITE_GENERATE, READ_STRIP protection
+ * information operations, go ahead unprotected.
+ */
+ transfer_length = scsi_bufflen(sc);
}
- rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
- &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
- scsi_bufflen(sc), tcm_loop_sam_attr(sc),
- sc->sc_data_direction, 0,
- scsi_sglist(sc), scsi_sg_count(sc),
- sgl_bidi, sgl_bidi_count);
- if (rc < 0) {
- set_host_byte(sc, DID_NO_CONNECT);
- goto out_done;
- }
+
+ se_cmd->tag = tl_cmd->sc_cmd_tag;
+ target_init_cmd(se_cmd, tl_nexus->se_sess, &tl_cmd->tl_sense_buf[0],
+ tl_cmd->sc->device->lun, transfer_length,
+ TCM_SIMPLE_TAG, sc->sc_data_direction, 0);
+
+ if (target_submit_prep(se_cmd, sc->cmnd, scsi_sglist(sc),
+ scsi_sg_count(sc), sgl_bidi, sgl_bidi_count,
+ scsi_prot_sglist(sc), scsi_prot_sg_count(sc),
+ GFP_ATOMIC))
+ return;
+
+ target_submit(se_cmd);
return;
out_done:
- sc->scsi_done(sc);
- return;
+ scsi_done(sc);
}
/*
@@ -217,24 +167,18 @@ out_done:
*/
static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
{
- struct tcm_loop_cmd *tl_cmd;
+ struct tcm_loop_cmd *tl_cmd = scsi_cmd_priv(sc);
- pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
- " scsi_buf_len: %u\n", sc->device->host->host_no,
- sc->device->id, sc->device->channel, sc->device->lun,
- sc->cmnd[0], scsi_bufflen(sc));
-
- tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
- if (!tl_cmd) {
- pr_err("Unable to allocate struct tcm_loop_cmd\n");
- set_host_byte(sc, DID_ERROR);
- sc->scsi_done(sc);
- return 0;
- }
+ pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n",
+ __func__, sc->device->host->host_no, sc->device->id,
+ sc->device->channel, sc->device->lun, sc->cmnd[0],
+ scsi_bufflen(sc));
+ memset(tl_cmd, 0, sizeof(*tl_cmd));
tl_cmd->sc = sc;
- INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
- queue_work(tcm_loop_workqueue, &tl_cmd->work);
+ tl_cmd->sc_cmd_tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc));
+
+ tcm_loop_target_queue_cmd(tl_cmd);
return 0;
}
@@ -242,121 +186,135 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
* Called from SCSI EH process context to issue a LUN_RESET TMR
* to struct scsi_device
*/
-static int tcm_loop_device_reset(struct scsi_cmnd *sc)
+static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
+ u64 lun, int task, enum tcm_tmreq_table tmr)
{
- struct se_cmd *se_cmd = NULL;
- struct se_portal_group *se_tpg;
+ struct se_cmd *se_cmd;
struct se_session *se_sess;
- struct tcm_loop_cmd *tl_cmd = NULL;
- struct tcm_loop_hba *tl_hba;
struct tcm_loop_nexus *tl_nexus;
- struct tcm_loop_tmr *tl_tmr = NULL;
- struct tcm_loop_tpg *tl_tpg;
- int ret = FAILED, rc;
- /*
- * Locate the tcm_loop_hba_t pointer
- */
- tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+ struct tcm_loop_cmd *tl_cmd;
+ int ret = TMR_FUNCTION_FAILED, rc;
+
/*
* Locate the tl_nexus and se_sess pointers
*/
- tl_nexus = tl_hba->tl_nexus;
+ tl_nexus = tl_tpg->tl_nexus;
if (!tl_nexus) {
- pr_err("Unable to perform device reset without"
- " active I_T Nexus\n");
- return FAILED;
+ pr_err("Unable to perform device reset without active I_T Nexus\n");
+ return ret;
}
- se_sess = tl_nexus->se_sess;
- /*
- * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
- */
- tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
- se_tpg = &tl_tpg->tl_se_tpg;
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
- if (!tl_cmd) {
- pr_err("Unable to allocate memory for tl_cmd\n");
- return FAILED;
- }
+ if (!tl_cmd)
+ return ret;
- tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
- if (!tl_tmr) {
- pr_err("Unable to allocate memory for tl_tmr\n");
- goto release;
- }
- init_waitqueue_head(&tl_tmr->tl_tmr_wait);
+ init_completion(&tl_cmd->tmr_done);
se_cmd = &tl_cmd->tl_se_cmd;
- /*
- * Initialize struct se_cmd descriptor from target_core_mod infrastructure
- */
- transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
- DMA_NONE, MSG_SIMPLE_TAG,
- &tl_cmd->tl_sense_buf[0]);
+ se_sess = tl_tpg->tl_nexus->se_sess;
- rc = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET, GFP_KERNEL);
+ rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun,
+ NULL, tmr, GFP_KERNEL, task,
+ TARGET_SCF_ACK_KREF);
if (rc < 0)
goto release;
+ wait_for_completion(&tl_cmd->tmr_done);
+ ret = se_cmd->se_tmr_req->response;
+ target_put_sess_cmd(se_cmd);
+
+out:
+ return ret;
+
+release:
+ kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
+ goto out;
+}
+
+static int tcm_loop_abort_task(struct scsi_cmnd *sc)
+{
+ struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_tpg *tl_tpg;
+ int ret;
+
/*
- * Locate the underlying TCM struct se_lun from sc->device->lun
- */
- if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0)
- goto release;
- /*
- * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
- * to wake us up.
- */
- transport_generic_handle_tmr(se_cmd);
- wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
- /*
- * The TMR LUN_RESET has completed, check the response status and
- * then release allocations.
+ * Locate the tcm_loop_hba_t pointer
*/
- ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
- SUCCESS : FAILED;
-release:
- if (se_cmd)
- transport_generic_free_cmd(se_cmd, 1);
- else
- kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
- kfree(tl_tmr);
- return ret;
+ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+ ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
+ blk_mq_unique_tag(scsi_cmd_to_rq(sc)),
+ TMR_ABORT_TASK);
+ return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
}
-static int tcm_loop_slave_alloc(struct scsi_device *sd)
+/*
+ * Called from SCSI EH process context to issue a LUN_RESET TMR
+ * to struct scsi_device
+ */
+static int tcm_loop_device_reset(struct scsi_cmnd *sc)
{
- set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
- return 0;
+ struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_tpg *tl_tpg;
+ int ret;
+
+ /*
+ * Locate the tcm_loop_hba_t pointer
+ */
+ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+
+ ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
+ 0, TMR_LUN_RESET);
+ return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
}
-static int tcm_loop_slave_configure(struct scsi_device *sd)
+static int tcm_loop_target_reset(struct scsi_cmnd *sc)
{
- return 0;
+ struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_tpg *tl_tpg;
+
+ /*
+ * Locate the tcm_loop_hba_t pointer
+ */
+ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+ if (!tl_hba) {
+ pr_err("Unable to perform device reset without active I_T Nexus\n");
+ return FAILED;
+ }
+ /*
+ * Locate the tl_tpg pointer from TargetID in sc->device->id
+ */
+ tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+ if (tl_tpg) {
+ tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
+ return SUCCESS;
+ }
+ return FAILED;
}
-static struct scsi_host_template tcm_loop_driver_template = {
+static const struct scsi_host_template tcm_loop_driver_template = {
.show_info = tcm_loop_show_info,
.proc_name = "tcm_loopback",
.name = "TCM_Loopback",
.queuecommand = tcm_loop_queuecommand,
- .change_queue_depth = tcm_loop_change_queue_depth,
+ .change_queue_depth = scsi_change_queue_depth,
+ .eh_abort_handler = tcm_loop_abort_task,
.eh_device_reset_handler = tcm_loop_device_reset,
- .can_queue = 1024,
+ .eh_target_reset_handler = tcm_loop_target_reset,
.this_id = -1,
.sg_tablesize = 256,
- .cmd_per_lun = 1024,
.max_sectors = 0xFFFF,
- .use_clustering = DISABLE_CLUSTERING,
- .slave_alloc = tcm_loop_slave_alloc,
- .slave_configure = tcm_loop_slave_configure,
+ .dma_boundary = PAGE_SIZE - 1,
.module = THIS_MODULE,
+ .track_queue_depth = 1,
+ .cmd_size = sizeof(struct tcm_loop_cmd),
};
static int tcm_loop_driver_probe(struct device *dev)
{
struct tcm_loop_hba *tl_hba;
struct Scsi_Host *sh;
- int error;
+ int error, host_prot;
tl_hba = to_tcm_loop_hba(dev);
@@ -378,7 +336,17 @@ static int tcm_loop_driver_probe(struct device *dev)
sh->max_id = 2;
sh->max_lun = 0;
sh->max_channel = 0;
- sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
+ sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+ sh->nr_hw_queues = tcm_loop_nr_hw_queues;
+ sh->can_queue = tcm_loop_can_queue;
+ sh->cmd_per_lun = tcm_loop_cmd_per_lun;
+
+ host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
+
+ scsi_host_set_prot(sh, host_prot);
+ scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
error = scsi_add_host(sh, &tl_hba->dev);
if (error) {
@@ -389,7 +357,7 @@ static int tcm_loop_driver_probe(struct device *dev)
return 0;
}
-static int tcm_loop_driver_remove(struct device *dev)
+static void tcm_loop_driver_remove(struct device *dev)
{
struct tcm_loop_hba *tl_hba;
struct Scsi_Host *sh;
@@ -399,7 +367,6 @@ static int tcm_loop_driver_remove(struct device *dev)
scsi_remove_host(sh);
scsi_host_put(sh);
- return 0;
}
static void tcm_loop_release_adapter(struct device *dev)
@@ -423,8 +390,8 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host
ret = device_register(&tl_hba->dev);
if (ret) {
- pr_err("device_register() failed for"
- " tl_hba->dev: %d\n", ret);
+ pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
+ put_device(&tl_hba->dev);
return -ENODEV;
}
@@ -453,8 +420,7 @@ static int tcm_loop_alloc_core_bus(void)
ret = driver_register(&tcm_loop_driverfs);
if (ret) {
- pr_err("driver_register() failed for"
- "tcm_loop_driverfs\n");
+ pr_err("driver_register() failed for tcm_loop_driverfs\n");
goto bus_unreg;
}
@@ -477,152 +443,26 @@ static void tcm_loop_release_core_bus(void)
pr_debug("Releasing TCM Loop Core BUS\n");
}
-static char *tcm_loop_get_fabric_name(void)
+static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
{
- return "loopback";
-}
-
-static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
- struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
- struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
- /*
- * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
- * time based on the protocol dependent prefix of the passed configfs group.
- *
- * Based upon tl_proto_id, TCM_Loop emulates the requested fabric
- * ProtocolID using target_core_fabric_lib.c symbols.
- */
- switch (tl_hba->tl_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_get_fabric_proto_ident(se_tpg);
- case SCSI_PROTOCOL_FCP:
- return fc_get_fabric_proto_ident(se_tpg);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_get_fabric_proto_ident(se_tpg);
- default:
- pr_err("Unknown tl_proto_id: 0x%02x, using"
- " SAS emulation\n", tl_hba->tl_proto_id);
- break;
- }
-
- return sas_get_fabric_proto_ident(se_tpg);
+ return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
}
static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
{
- struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
/*
- * Return the passed NAA identifier for the SAS Target Port
+ * Return the passed NAA identifier for the Target Port
*/
- return &tl_tpg->tl_hba->tl_wwn_address[0];
+ return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
}
static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
{
- struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
/*
* This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
* to represent the SCSI Target Port.
*/
- return tl_tpg->tl_tpgt;
-}
-
-static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static u32 tcm_loop_get_pr_transport_id(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code,
- unsigned char *buf)
-{
- struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
- struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
-
- switch (tl_hba->tl_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
- case SCSI_PROTOCOL_FCP:
- return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
- default:
- pr_err("Unknown tl_proto_id: 0x%02x, using"
- " SAS emulation\n", tl_hba->tl_proto_id);
- break;
- }
-
- return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
- format_code, buf);
-}
-
-static u32 tcm_loop_get_pr_transport_id_len(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code)
-{
- struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
- struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
-
- switch (tl_hba->tl_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
- case SCSI_PROTOCOL_FCP:
- return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
- default:
- pr_err("Unknown tl_proto_id: 0x%02x, using"
- " SAS emulation\n", tl_hba->tl_proto_id);
- break;
- }
-
- return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
- format_code);
-}
-
-/*
- * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
- * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
- */
-static char *tcm_loop_parse_pr_out_transport_id(
- struct se_portal_group *se_tpg,
- const char *buf,
- u32 *out_tid_len,
- char **port_nexus_ptr)
-{
- struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
- struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
-
- switch (tl_hba->tl_proto_id) {
- case SCSI_PROTOCOL_SAS:
- return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
- case SCSI_PROTOCOL_FCP:
- return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
- case SCSI_PROTOCOL_ISCSI:
- return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
- default:
- pr_err("Unknown tl_proto_id: 0x%02x, using"
- " SAS emulation\n", tl_hba->tl_proto_id);
- break;
- }
-
- return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
- port_nexus_ptr);
+ return tl_tpg(se_tpg)->tl_tpgt;
}
/*
@@ -634,57 +474,11 @@ static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
return 1;
}
-static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
-{
- return 0;
-}
-
-/*
- * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
- * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
- */
-static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
-{
- return 0;
-}
-
-/*
- * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
- * never be called for TCM_Loop by target_core_fabric_configfs.c code.
- * It has been added here as a nop for target_fabric_tf_ops_check()
- */
-static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
-{
- return 0;
-}
-
-static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
- struct se_portal_group *se_tpg)
-{
- struct tcm_loop_nacl *tl_nacl;
-
- tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
- if (!tl_nacl) {
- pr_err("Unable to allocate struct tcm_loop_nacl\n");
- return NULL;
- }
-
- return &tl_nacl->se_node_acl;
-}
-
-static void tcm_loop_tpg_release_fabric_acl(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl)
+static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
{
- struct tcm_loop_nacl *tl_nacl = container_of(se_nacl,
- struct tcm_loop_nacl, se_node_acl);
-
- kfree(tl_nacl);
-}
-
-static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
-{
- return 1;
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
+ tl_se_tpg);
+ return tl_tpg->tl_fabric_prot_type;
}
static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
@@ -692,16 +486,6 @@ static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
return 1;
}
-static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
-{
- return;
-}
-
-static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
-{
- return 1;
-}
-
static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
@@ -710,16 +494,6 @@ static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
return tl_cmd->sc_cmd_state;
}
-static int tcm_loop_shutdown_session(struct se_session *se_sess)
-{
- return 0;
-}
-
-static void tcm_loop_close_session(struct se_session *se_sess)
-{
- return;
-};
-
static int tcm_loop_write_pending(struct se_cmd *se_cmd)
{
/*
@@ -735,37 +509,15 @@ static int tcm_loop_write_pending(struct se_cmd *se_cmd)
return 0;
}
-static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
-{
- return 0;
-}
-
-static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
+static int tcm_loop_queue_data_or_status(const char *func,
+ struct se_cmd *se_cmd, u8 scsi_status)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
- pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
- " cdb: 0x%02x\n", sc, sc->cmnd[0]);
-
- sc->result = SAM_STAT_GOOD;
- set_host_byte(sc, DID_OK);
- if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
- (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
- scsi_set_resid(sc, se_cmd->residual_count);
- sc->scsi_done(sc);
- return 0;
-}
-
-static int tcm_loop_queue_status(struct se_cmd *se_cmd)
-{
- struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
- struct tcm_loop_cmd, tl_se_cmd);
- struct scsi_cmnd *sc = tl_cmd->sc;
-
- pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
- " cdb: 0x%02x\n", sc, sc->cmnd[0]);
+ pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
+ func, sc, sc->cmnd[0]);
if (se_cmd->sense_buffer &&
((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
@@ -774,28 +526,39 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
memcpy(sc->sense_buffer, se_cmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
sc->result = SAM_STAT_CHECK_CONDITION;
- set_driver_byte(sc, DRIVER_SENSE);
} else
- sc->result = se_cmd->scsi_status;
+ sc->result = scsi_status;
set_host_byte(sc, DID_OK);
if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
(se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
scsi_set_resid(sc, se_cmd->residual_count);
- sc->scsi_done(sc);
return 0;
}
+static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
+{
+ return tcm_loop_queue_data_or_status(__func__, se_cmd, SAM_STAT_GOOD);
+}
+
+static int tcm_loop_queue_status(struct se_cmd *se_cmd)
+{
+ return tcm_loop_queue_data_or_status(__func__,
+ se_cmd, se_cmd->scsi_status);
+}
+
static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
{
- struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
- struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
- /*
- * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
- * and wake up the wait_queue_head_t in tcm_loop_device_reset()
- */
- atomic_set(&tl_tmr->tmr_complete, 1);
- wake_up(&tl_tmr->tl_tmr_wait);
+ struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
+ struct tcm_loop_cmd, tl_se_cmd);
+
+ /* Wake up tcm_loop_issue_tmr(). */
+ complete(&tl_cmd->tmr_done);
+}
+
+static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
+{
+ return;
}
static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
@@ -824,8 +587,7 @@ static int tcm_loop_port_link(
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
- atomic_inc(&tl_tpg->tl_tpg_port_count);
- smp_mb__after_atomic_inc();
+ atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
/*
* Add Linux/SCSI struct scsi_device by HCTL
*/
@@ -849,8 +611,8 @@ static void tcm_loop_port_unlink(
sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
se_lun->unpacked_lun);
if (!sd) {
- pr_err("Unable to locate struct scsi_device for %d:%d:"
- "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
+ pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n",
+ 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
return;
}
/*
@@ -859,70 +621,93 @@ static void tcm_loop_port_unlink(
scsi_remove_device(sd);
scsi_device_put(sd);
- atomic_dec(&tl_tpg->tl_tpg_port_count);
- smp_mb__after_atomic_dec();
+ atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
}
/* End items for tcm_loop_port_cit */
+static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
+ struct config_item *item, char *page)
+{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
+ tl_se_tpg);
+
+ return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
+}
+
+static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
+ struct config_item *item, const char *page, size_t count)
+{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
+ tl_se_tpg);
+ unsigned long val;
+ int ret = kstrtoul(page, 0, &val);
+
+ if (ret) {
+ pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
+ return ret;
+ }
+ if (val != 0 && val != 1 && val != 3) {
+ pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
+ return -EINVAL;
+ }
+ tl_tpg->tl_fabric_prot_type = val;
+
+ return count;
+}
+
+CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
+
+static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
+ &tcm_loop_tpg_attrib_attr_fabric_prot_type,
+ NULL,
+};
+
/* Start items for tcm_loop_nexus_cit */
+static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
+ struct se_session *se_sess, void *p)
+{
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+ struct tcm_loop_tpg, tl_se_tpg);
+
+ tl_tpg->tl_nexus = p;
+ return 0;
+}
+
static int tcm_loop_make_nexus(
struct tcm_loop_tpg *tl_tpg,
const char *name)
{
- struct se_portal_group *se_tpg;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
struct tcm_loop_nexus *tl_nexus;
- int ret = -ENOMEM;
+ int ret;
- if (tl_tpg->tl_hba->tl_nexus) {
- pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
+ if (tl_tpg->tl_nexus) {
+ pr_debug("tl_tpg->tl_nexus already exists\n");
return -EEXIST;
}
- se_tpg = &tl_tpg->tl_se_tpg;
- tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
- if (!tl_nexus) {
- pr_err("Unable to allocate struct tcm_loop_nexus\n");
+ tl_nexus = kzalloc(sizeof(*tl_nexus), GFP_KERNEL);
+ if (!tl_nexus)
return -ENOMEM;
- }
- /*
- * Initialize the struct se_session pointer
- */
- tl_nexus->se_sess = transport_init_session();
+
+ tl_nexus->se_sess = target_setup_session(&tl_tpg->tl_se_tpg, 0, 0,
+ TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
+ name, tl_nexus, tcm_loop_alloc_sess_cb);
if (IS_ERR(tl_nexus->se_sess)) {
ret = PTR_ERR(tl_nexus->se_sess);
- goto out;
- }
- /*
- * Since we are running in 'demo mode' this call with generate a
- * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
- * Initiator port name of the passed configfs group 'name'.
- */
- tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
- se_tpg, (unsigned char *)name);
- if (!tl_nexus->se_sess->se_node_acl) {
- transport_free_session(tl_nexus->se_sess);
- goto out;
+ kfree(tl_nexus);
+ return ret;
}
- /*
- * Now, register the SAS I_T Nexus as active with the call to
- * transport_register_session()
- */
- __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
- tl_nexus->se_sess, tl_nexus);
- tl_tpg->tl_hba->tl_nexus = tl_nexus;
- pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
- " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
- name);
- return 0;
-out:
- kfree(tl_nexus);
- return ret;
+ pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n",
+ tcm_loop_dump_proto_id(tl_hba), name);
+ return 0;
}
static int tcm_loop_drop_nexus(
@@ -930,9 +715,8 @@ static int tcm_loop_drop_nexus(
{
struct se_session *se_sess;
struct tcm_loop_nexus *tl_nexus;
- struct tcm_loop_hba *tl_hba = tpg->tl_hba;
- tl_nexus = tpg->tl_hba->tl_nexus;
+ tl_nexus = tpg->tl_nexus;
if (!tl_nexus)
return -ENODEV;
@@ -941,36 +725,34 @@ static int tcm_loop_drop_nexus(
return -ENODEV;
if (atomic_read(&tpg->tl_tpg_port_count)) {
- pr_err("Unable to remove TCM_Loop I_T Nexus with"
- " active TPG port count: %d\n",
- atomic_read(&tpg->tl_tpg_port_count));
+ pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n",
+ atomic_read(&tpg->tl_tpg_port_count));
return -EPERM;
}
- pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
- " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
- tl_nexus->se_sess->se_node_acl->initiatorname);
+ pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n",
+ tcm_loop_dump_proto_id(tpg->tl_hba),
+ tl_nexus->se_sess->se_node_acl->initiatorname);
/*
- * Release the SCSI I_T Nexus to the emulated SAS Target Port
+ * Release the SCSI I_T Nexus to the emulated Target Port
*/
- transport_deregister_session(tl_nexus->se_sess);
- tpg->tl_hba->tl_nexus = NULL;
+ target_remove_session(se_sess);
+ tpg->tl_nexus = NULL;
kfree(tl_nexus);
return 0;
}
/* End items for tcm_loop_nexus_cit */
-static ssize_t tcm_loop_tpg_show_nexus(
- struct se_portal_group *se_tpg,
- char *page)
+static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
{
+ struct se_portal_group *se_tpg = to_tpg(item);
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_nexus *tl_nexus;
ssize_t ret;
- tl_nexus = tl_tpg->tl_hba->tl_nexus;
+ tl_nexus = tl_tpg->tl_nexus;
if (!tl_nexus)
return -ENODEV;
@@ -980,11 +762,10 @@ static ssize_t tcm_loop_tpg_show_nexus(
return ret;
}
-static ssize_t tcm_loop_tpg_store_nexus(
- struct se_portal_group *se_tpg,
- const char *page,
- size_t count)
+static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
+ const char *page, size_t count)
{
+ struct se_portal_group *se_tpg = to_tpg(item);
struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
struct tcm_loop_tpg, tl_se_tpg);
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
@@ -1003,8 +784,8 @@ static ssize_t tcm_loop_tpg_store_nexus(
* tcm_loop_make_nexus()
*/
if (strlen(page) >= TL_WWN_ADDR_LEN) {
- pr_err("Emulated NAA Sas Address: %s, exceeds"
- " max: %d\n", page, TL_WWN_ADDR_LEN);
+ pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n",
+ page, TL_WWN_ADDR_LEN);
return -EINVAL;
}
snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
@@ -1012,9 +793,8 @@ static ssize_t tcm_loop_tpg_store_nexus(
ptr = strstr(i_port, "naa.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
- pr_err("Passed SAS Initiator Port %s does not"
- " match target port protoid: %s\n", i_port,
- tcm_loop_dump_proto_id(tl_hba));
+ pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n",
+ i_port, tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
}
port_ptr = &i_port[0];
@@ -1023,9 +803,8 @@ static ssize_t tcm_loop_tpg_store_nexus(
ptr = strstr(i_port, "fc.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
- pr_err("Passed FCP Initiator Port %s does not"
- " match target port protoid: %s\n", i_port,
- tcm_loop_dump_proto_id(tl_hba));
+ pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n",
+ i_port, tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
}
port_ptr = &i_port[3]; /* Skip over "fc." */
@@ -1034,16 +813,15 @@ static ssize_t tcm_loop_tpg_store_nexus(
ptr = strstr(i_port, "iqn.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
- pr_err("Passed iSCSI Initiator Port %s does not"
- " match target port protoid: %s\n", i_port,
- tcm_loop_dump_proto_id(tl_hba));
+ pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n",
+ i_port, tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
}
port_ptr = &i_port[0];
goto check_newline;
}
- pr_err("Unable to locate prefix for emulated Initiator Port:"
- " %s\n", i_port);
+ pr_err("Unable to locate prefix for emulated Initiator Port: %s\n",
+ i_port);
return -EINVAL;
/*
* Clear any trailing newline for the NAA WWN
@@ -1059,61 +837,121 @@ check_newline:
return count;
}
-TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
+static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
+ char *page)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+ struct tcm_loop_tpg, tl_se_tpg);
+ const char *status = NULL;
+ ssize_t ret = -EINVAL;
+
+ switch (tl_tpg->tl_transport_status) {
+ case TCM_TRANSPORT_ONLINE:
+ status = "online";
+ break;
+ case TCM_TRANSPORT_OFFLINE:
+ status = "offline";
+ break;
+ default:
+ break;
+ }
+
+ if (status)
+ ret = snprintf(page, PAGE_SIZE, "%s\n", status);
+
+ return ret;
+}
+
+static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+ struct tcm_loop_tpg, tl_se_tpg);
+
+ if (!strncmp(page, "online", 6)) {
+ tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
+ return count;
+ }
+ if (!strncmp(page, "offline", 7)) {
+ tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
+ if (tl_tpg->tl_nexus) {
+ struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
+
+ core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
+ }
+ return count;
+ }
+ return -EINVAL;
+}
+
+static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
+ char *page)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+ struct tcm_loop_tpg, tl_se_tpg);
+ struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
+
+ if (!tl_hba->sh)
+ return -ENODEV;
+
+ return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
+ tl_hba->sh->host_no, tl_tpg->tl_tpgt);
+}
+
+CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
+CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
+CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
- &tcm_loop_tpg_nexus.attr,
+ &tcm_loop_tpg_attr_nexus,
+ &tcm_loop_tpg_attr_transport_status,
+ &tcm_loop_tpg_attr_address,
NULL,
};
/* Start items for tcm_loop_naa_cit */
-struct se_portal_group *tcm_loop_make_naa_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *tcm_loop_make_naa_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
struct tcm_loop_tpg *tl_tpg;
- char *tpgt_str, *end_ptr;
int ret;
- unsigned short int tpgt;
+ unsigned long tpgt;
- tpgt_str = strstr(name, "tpgt_");
- if (!tpgt_str) {
- pr_err("Unable to locate \"tpgt_#\" directory"
- " group\n");
+ if (strstr(name, "tpgt_") != name) {
+ pr_err("Unable to locate \"tpgt_#\" directory group\n");
return ERR_PTR(-EINVAL);
}
- tpgt_str += 5; /* Skip ahead of "tpgt_" */
- tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
+ if (kstrtoul(name+5, 10, &tpgt))
+ return ERR_PTR(-EINVAL);
if (tpgt >= TL_TPGS_PER_HBA) {
- pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
- " %u\n", tpgt, TL_TPGS_PER_HBA);
+ pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n",
+ tpgt, TL_TPGS_PER_HBA);
return ERR_PTR(-EINVAL);
}
tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
tl_tpg->tl_hba = tl_hba;
tl_tpg->tl_tpgt = tpgt;
/*
- * Register the tl_tpg as a emulated SAS TCM Target Endpoint
+ * Register the tl_tpg as a emulated TCM Target Endpoint
*/
- ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
- wwn, &tl_tpg->tl_se_tpg, tl_tpg,
- TRANSPORT_TPG_TYPE_NORMAL);
+ ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
if (ret < 0)
return ERR_PTR(-ENOMEM);
- pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
- " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
- config_item_name(&wwn->wwn_group.cg_item), tpgt);
-
+ pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n",
+ tcm_loop_dump_proto_id(tl_hba),
+ config_item_name(&wwn->wwn_group.cg_item), tpgt);
return &tl_tpg->tl_se_tpg;
}
-void tcm_loop_drop_naa_tpg(
+static void tcm_loop_drop_naa_tpg(
struct se_portal_group *se_tpg)
{
struct se_wwn *wwn = se_tpg->se_tpg_wwn;
@@ -1125,27 +963,27 @@ void tcm_loop_drop_naa_tpg(
tl_hba = tl_tpg->tl_hba;
tpgt = tl_tpg->tl_tpgt;
/*
- * Release the I_T Nexus for the Virtual SAS link if present
+ * Release the I_T Nexus for the Virtual target link if present
*/
tcm_loop_drop_nexus(tl_tpg);
/*
- * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint
+ * Deregister the tl_tpg as a emulated TCM Target Endpoint
*/
core_tpg_deregister(se_tpg);
tl_tpg->tl_hba = NULL;
tl_tpg->tl_tpgt = 0;
- pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
- " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
- config_item_name(&wwn->wwn_group.cg_item), tpgt);
+ pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n",
+ tcm_loop_dump_proto_id(tl_hba),
+ config_item_name(&wwn->wwn_group.cg_item), tpgt);
}
/* End items for tcm_loop_naa_cit */
/* Start items for tcm_loop_cit */
-struct se_wwn *tcm_loop_make_scsi_hba(
+static struct se_wwn *tcm_loop_make_scsi_hba(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
@@ -1155,11 +993,10 @@ struct se_wwn *tcm_loop_make_scsi_hba(
char *ptr;
int ret, off = 0;
- tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
- if (!tl_hba) {
- pr_err("Unable to allocate struct tcm_loop_hba\n");
+ tl_hba = kzalloc(sizeof(*tl_hba), GFP_KERNEL);
+ if (!tl_hba)
return ERR_PTR(-ENOMEM);
- }
+
/*
* Determine the emulated Protocol Identifier and Target Port Name
* based on the incoming configfs directory name.
@@ -1177,8 +1014,8 @@ struct se_wwn *tcm_loop_make_scsi_hba(
}
ptr = strstr(name, "iqn.");
if (!ptr) {
- pr_err("Unable to locate prefix for emulated Target "
- "Port: %s\n", name);
+ pr_err("Unable to locate prefix for emulated Target Port: %s\n",
+ name);
ret = -EINVAL;
goto out;
}
@@ -1186,9 +1023,8 @@ struct se_wwn *tcm_loop_make_scsi_hba(
check_len:
if (strlen(name) >= TL_WWN_ADDR_LEN) {
- pr_err("Emulated NAA %s Address: %s, exceeds"
- " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
- TL_WWN_ADDR_LEN);
+ pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n",
+ name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN);
ret = -EINVAL;
goto out;
}
@@ -1201,29 +1037,27 @@ check_len:
*/
ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
if (ret)
- goto out;
+ return ERR_PTR(ret);
sh = tl_hba->sh;
tcm_loop_hba_no_cnt++;
- pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
- " %s Address: %s at Linux/SCSI Host ID: %d\n",
- tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
-
+ pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
+ tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
return &tl_hba->tl_hba_wwn;
out:
kfree(tl_hba);
return ERR_PTR(ret);
}
-void tcm_loop_drop_scsi_hba(
+static void tcm_loop_drop_scsi_hba(
struct se_wwn *wwn)
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
- pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
- " SAS Address: %s at Linux/SCSI Host ID: %d\n",
- tl_hba->tl_wwn_address, tl_hba->sh->host_no);
+ pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
+ tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
+ tl_hba->sh->host_no);
/*
* Call device_unregister() on the original tl_hba->dev.
* tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
@@ -1233,166 +1067,67 @@ void tcm_loop_drop_scsi_hba(
}
/* Start items for tcm_loop_cit */
-static ssize_t tcm_loop_wwn_show_attr_version(
- struct target_fabric_configfs *tf,
- char *page)
+static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
{
return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
}
-TF_WWN_ATTR_RO(tcm_loop, version);
+CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
- &tcm_loop_wwn_version.attr,
+ &tcm_loop_wwn_attr_version,
NULL,
};
/* End items for tcm_loop_cit */
-static int tcm_loop_register_configfs(void)
-{
- struct target_fabric_configfs *fabric;
- int ret;
- /*
- * Set the TCM Loop HBA counter to zero
- */
- tcm_loop_hba_no_cnt = 0;
- /*
- * Register the top level struct config_item_type with TCM core
- */
- fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
- if (IS_ERR(fabric)) {
- pr_err("tcm_loop_register_configfs() failed!\n");
- return PTR_ERR(fabric);
- }
- /*
- * Setup the fabric API of function pointers used by target_core_mod
- */
- fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name;
- fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident;
- fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn;
- fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag;
- fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth;
- fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id;
- fabric->tf_ops.tpg_get_pr_transport_id_len =
- &tcm_loop_get_pr_transport_id_len;
- fabric->tf_ops.tpg_parse_pr_out_transport_id =
- &tcm_loop_parse_pr_out_transport_id;
- fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode;
- fabric->tf_ops.tpg_check_demo_mode_cache =
- &tcm_loop_check_demo_mode_cache;
- fabric->tf_ops.tpg_check_demo_mode_write_protect =
- &tcm_loop_check_demo_mode_write_protect;
- fabric->tf_ops.tpg_check_prod_mode_write_protect =
- &tcm_loop_check_prod_mode_write_protect;
- /*
- * The TCM loopback fabric module runs in demo-mode to a local
- * virtual SCSI device, so fabric dependent initator ACLs are
- * not required.
- */
- fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl;
- fabric->tf_ops.tpg_release_fabric_acl =
- &tcm_loop_tpg_release_fabric_acl;
- fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
- /*
- * Used for setting up remaining TCM resources in process context
- */
- fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
- fabric->tf_ops.release_cmd = &tcm_loop_release_cmd;
- fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
- fabric->tf_ops.close_session = &tcm_loop_close_session;
- fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index;
- fabric->tf_ops.sess_get_initiator_sid = NULL;
- fabric->tf_ops.write_pending = &tcm_loop_write_pending;
- fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status;
- /*
- * Not used for TCM loopback
- */
- fabric->tf_ops.set_default_node_attributes =
- &tcm_loop_set_default_node_attributes;
- fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
- fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
- fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
- fabric->tf_ops.queue_status = &tcm_loop_queue_status;
- fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
-
- /*
- * Setup function pointers for generic logic in target_core_fabric_configfs.c
- */
- fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba;
- fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba;
- fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg;
- fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg;
- /*
- * fabric_post_link() and fabric_pre_unlink() are used for
- * registration and release of TCM Loop Virtual SCSI LUNs.
- */
- fabric->tf_ops.fabric_post_link = &tcm_loop_port_link;
- fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink;
- fabric->tf_ops.fabric_make_np = NULL;
- fabric->tf_ops.fabric_drop_np = NULL;
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- */
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
- /*
- * Once fabric->tf_ops has been setup, now register the fabric for
- * use within TCM
- */
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- pr_err("target_fabric_configfs_register() for"
- " TCM_Loop failed!\n");
- target_fabric_configfs_free(fabric);
- return -1;
- }
- /*
- * Setup our local pointer to *fabric.
- */
- tcm_loop_fabric_configfs = fabric;
- pr_debug("TCM_LOOP[0] - Set fabric ->"
- " tcm_loop_fabric_configfs\n");
- return 0;
-}
-
-static void tcm_loop_deregister_configfs(void)
-{
- if (!tcm_loop_fabric_configfs)
- return;
-
- target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
- tcm_loop_fabric_configfs = NULL;
- pr_debug("TCM_LOOP[0] - Cleared"
- " tcm_loop_fabric_configfs\n");
-}
+static const struct target_core_fabric_ops loop_ops = {
+ .module = THIS_MODULE,
+ .fabric_name = "loopback",
+ .tpg_get_wwn = tcm_loop_get_endpoint_wwn,
+ .tpg_get_tag = tcm_loop_get_tag,
+ .tpg_check_demo_mode = tcm_loop_check_demo_mode,
+ .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only,
+ .check_stop_free = tcm_loop_check_stop_free,
+ .release_cmd = tcm_loop_release_cmd,
+ .sess_get_index = tcm_loop_sess_get_index,
+ .write_pending = tcm_loop_write_pending,
+ .get_cmd_state = tcm_loop_get_cmd_state,
+ .queue_data_in = tcm_loop_queue_data_in,
+ .queue_status = tcm_loop_queue_status,
+ .queue_tm_rsp = tcm_loop_queue_tm_rsp,
+ .aborted_task = tcm_loop_aborted_task,
+ .fabric_make_wwn = tcm_loop_make_scsi_hba,
+ .fabric_drop_wwn = tcm_loop_drop_scsi_hba,
+ .fabric_make_tpg = tcm_loop_make_naa_tpg,
+ .fabric_drop_tpg = tcm_loop_drop_naa_tpg,
+ .fabric_post_link = tcm_loop_port_link,
+ .fabric_pre_unlink = tcm_loop_port_unlink,
+ .tfc_wwn_attrs = tcm_loop_wwn_attrs,
+ .tfc_tpg_base_attrs = tcm_loop_tpg_attrs,
+ .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs,
+ .default_submit_type = TARGET_QUEUE_SUBMIT,
+ .direct_submit_supp = 0,
+};
static int __init tcm_loop_fabric_init(void)
{
int ret = -ENOMEM;
- tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
- if (!tcm_loop_workqueue)
- goto out;
-
tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
sizeof(struct tcm_loop_cmd),
__alignof__(struct tcm_loop_cmd),
0, NULL);
if (!tcm_loop_cmd_cache) {
- pr_debug("kmem_cache_create() for"
- " tcm_loop_cmd_cache failed\n");
- goto out_destroy_workqueue;
+ pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n");
+ goto out;
}
ret = tcm_loop_alloc_core_bus();
if (ret)
goto out_destroy_cache;
- ret = tcm_loop_register_configfs();
+ ret = target_register_template(&loop_ops);
if (ret)
goto out_release_core_bus;
@@ -1402,18 +1137,15 @@ out_release_core_bus:
tcm_loop_release_core_bus();
out_destroy_cache:
kmem_cache_destroy(tcm_loop_cmd_cache);
-out_destroy_workqueue:
- destroy_workqueue(tcm_loop_workqueue);
out:
return ret;
}
static void __exit tcm_loop_fabric_exit(void)
{
- tcm_loop_deregister_configfs();
+ target_unregister_template(&loop_ops);
tcm_loop_release_core_bus();
kmem_cache_destroy(tcm_loop_cmd_cache);
- destroy_workqueue(tcm_loop_workqueue);
}
MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index dd7a84ee78e1..437663b3905c 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -1,50 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/types.h>
+#include <linux/device.h>
+#include <target/target_core_base.h> /* struct se_cmd */
+
#define TCM_LOOP_VERSION "v2.1-rc2"
#define TL_WWN_ADDR_LEN 256
#define TL_TPGS_PER_HBA 32
-/*
- * Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len
- */
-#define TL_SCSI_MAX_CMD_LEN 32
-
struct tcm_loop_cmd {
/* State of Linux/SCSI CDB+Data descriptor */
u32 sc_cmd_state;
+ /* Tagged command queueing */
+ u32 sc_cmd_tag;
/* Pointer to the CDB+Data descriptor from Linux/SCSI subsystem */
struct scsi_cmnd *sc;
/* The TCM I/O descriptor that is accessed via container_of() */
struct se_cmd tl_se_cmd;
- struct work_struct work;
+ struct completion tmr_done;
/* Sense buffer that will be mapped into outgoing status */
unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER];
};
-struct tcm_loop_tmr {
- atomic_t tmr_complete;
- wait_queue_head_t tl_tmr_wait;
-};
-
struct tcm_loop_nexus {
- int it_nexus_active;
- /*
- * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
- */
- struct scsi_host *sh;
/*
* Pointer to TCM session for I_T Nexus
*/
struct se_session *se_sess;
};
-struct tcm_loop_nacl {
- struct se_node_acl se_node_acl;
-};
+#define TCM_TRANSPORT_ONLINE 0
+#define TCM_TRANSPORT_OFFLINE 1
struct tcm_loop_tpg {
unsigned short tl_tpgt;
+ unsigned short tl_transport_status;
+ enum target_prot_type tl_fabric_prot_type;
atomic_t tl_tpg_port_count;
struct se_portal_group tl_se_tpg;
struct tcm_loop_hba *tl_hba;
+ struct tcm_loop_nexus *tl_nexus;
};
struct tcm_loop_hba {
@@ -53,7 +47,6 @@ struct tcm_loop_hba {
struct se_hba_s *se_hba;
struct se_lun *tl_hba_lun;
struct se_port *tl_hba_lun_sep;
- struct tcm_loop_nexus *tl_nexus;
struct device dev;
struct Scsi_Host *sh;
struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
diff --git a/drivers/target/sbp/Kconfig b/drivers/target/sbp/Kconfig
index 1614bc710d4e..53a1c75f5660 100644
--- a/drivers/target/sbp/Kconfig
+++ b/drivers/target/sbp/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
config SBP_TARGET
tristate "FireWire SBP-2 fabric module"
depends on FIREWIRE
diff --git a/drivers/target/sbp/Makefile b/drivers/target/sbp/Makefile
index 27747ad054c4..766f23690013 100644
--- a/drivers/target/sbp/Makefile
+++ b/drivers/target/sbp/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_SBP_TARGET) += sbp_target.o
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index e51b09a04d52..b8457477cee9 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SBP2 target driver (SCSI over IEEE1394 in target mode)
*
* Copyright (C) 2011 Chris Boot <bootc@bootc.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#define KMSG_COMPONENT "sbp_target"
@@ -28,23 +15,18 @@
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
+#include <linux/delay.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
-#include <scsi/scsi.h>
+#include <scsi/scsi_proto.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
-#include <target/configfs_macros.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "sbp_target.h"
-/* Local pointer to allocated TCM configfs fabric module */
-static struct target_fabric_configfs *sbp_fabric_configfs;
-
/* FireWire address region for management and command block address handlers */
static const struct fw_address_region sbp_register_region = {
.start = CSR_REGISTER_BASE + 0x10000,
@@ -110,13 +92,13 @@ static struct sbp_session *sbp_session_find_by_guid(
}
static struct sbp_login_descriptor *sbp_login_find_by_lun(
- struct sbp_session *session, struct se_lun *lun)
+ struct sbp_session *session, u32 unpacked_lun)
{
struct sbp_login_descriptor *login, *found = NULL;
spin_lock_bh(&session->lock);
list_for_each_entry(login, &session->login_list, link) {
- if (login->lun == lun)
+ if (login->login_lun == unpacked_lun)
found = login;
}
spin_unlock_bh(&session->lock);
@@ -126,7 +108,7 @@ static struct sbp_login_descriptor *sbp_login_find_by_lun(
static int sbp_login_count_all_by_lun(
struct sbp_tpg *tpg,
- struct se_lun *lun,
+ u32 unpacked_lun,
int exclusive)
{
struct se_session *se_sess;
@@ -140,7 +122,7 @@ static int sbp_login_count_all_by_lun(
spin_lock_bh(&sess->lock);
list_for_each_entry(login, &sess->login_list, link) {
- if (login->lun != lun)
+ if (login->login_lun != unpacked_lun)
continue;
if (!exclusive || login->exclusive)
@@ -176,23 +158,23 @@ static struct sbp_login_descriptor *sbp_login_find_by_id(
return found;
}
-static struct se_lun *sbp_get_lun_from_tpg(struct sbp_tpg *tpg, int lun)
+static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
{
struct se_portal_group *se_tpg = &tpg->se_tpg;
struct se_lun *se_lun;
- if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
- return ERR_PTR(-EINVAL);
-
- spin_lock(&se_tpg->tpg_lun_lock);
- se_lun = se_tpg->tpg_lun_list[lun];
-
- if (se_lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
- se_lun = ERR_PTR(-ENODEV);
-
- spin_unlock(&se_tpg->tpg_lun_lock);
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
+ if (se_lun->unpacked_lun == login_lun) {
+ rcu_read_unlock();
+ *err = 0;
+ return login_lun;
+ }
+ }
+ rcu_read_unlock();
- return se_lun;
+ *err = -ENODEV;
+ return login_lun;
}
static struct sbp_session *sbp_session_create(
@@ -202,45 +184,29 @@ static struct sbp_session *sbp_session_create(
struct sbp_session *sess;
int ret;
char guid_str[17];
- struct se_node_acl *se_nacl;
+
+ snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
sess = kmalloc(sizeof(*sess), GFP_KERNEL);
- if (!sess) {
- pr_err("failed to allocate session descriptor\n");
+ if (!sess)
return ERR_PTR(-ENOMEM);
- }
- sess->se_sess = transport_init_session();
+ spin_lock_init(&sess->lock);
+ INIT_LIST_HEAD(&sess->login_list);
+ INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
+ sess->guid = guid;
+
+ sess->se_sess = target_setup_session(&tpg->se_tpg, 128,
+ sizeof(struct sbp_target_request),
+ TARGET_PROT_NORMAL, guid_str,
+ sess, NULL);
if (IS_ERR(sess->se_sess)) {
pr_err("failed to init se_session\n");
-
ret = PTR_ERR(sess->se_sess);
kfree(sess);
return ERR_PTR(ret);
}
- snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
-
- se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str);
- if (!se_nacl) {
- pr_warn("Node ACL not found for %s\n", guid_str);
-
- transport_free_session(sess->se_sess);
- kfree(sess);
-
- return ERR_PTR(-EPERM);
- }
-
- sess->se_sess->se_node_acl = se_nacl;
-
- spin_lock_init(&sess->lock);
- INIT_LIST_HEAD(&sess->login_list);
- INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
-
- sess->guid = guid;
-
- transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess);
-
return sess;
}
@@ -256,8 +222,7 @@ static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
if (cancel_work)
cancel_delayed_work_sync(&sess->maint_work);
- transport_deregister_session_configfs(sess->se_sess);
- transport_deregister_session(sess->se_sess);
+ target_remove_session(sess->se_sess);
if (sess->card)
fw_card_put(sess->card);
@@ -296,17 +261,16 @@ static void sbp_management_request_login(
{
struct sbp_tport *tport = agent->tport;
struct sbp_tpg *tpg = tport->tpg;
- struct se_lun *se_lun;
- int ret;
- u64 guid;
struct sbp_session *sess;
struct sbp_login_descriptor *login;
struct sbp_login_response_block *response;
- int login_response_len;
+ u64 guid;
+ u32 unpacked_lun;
+ int login_response_len, ret;
- se_lun = sbp_get_lun_from_tpg(tpg,
- LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
- if (IS_ERR(se_lun)) {
+ unpacked_lun = sbp_get_lun_from_tpg(tpg,
+ LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
+ if (ret) {
pr_notice("login to unknown LUN: %d\n",
LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
@@ -327,11 +291,11 @@ static void sbp_management_request_login(
}
pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
- se_lun->unpacked_lun, guid);
+ unpacked_lun, guid);
sess = sbp_session_find_by_guid(tpg, guid);
if (sess) {
- login = sbp_login_find_by_lun(sess, se_lun);
+ login = sbp_login_find_by_lun(sess, unpacked_lun);
if (login) {
pr_notice("initiator already logged-in\n");
@@ -359,7 +323,7 @@ static void sbp_management_request_login(
* reject with access_denied if any logins present
*/
if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
- sbp_login_count_all_by_lun(tpg, se_lun, 0)) {
+ sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
pr_warn("refusing exclusive login with other active logins\n");
req->status.status = cpu_to_be32(
@@ -372,7 +336,7 @@ static void sbp_management_request_login(
* check exclusive bit in any existing login descriptor
* reject with access_denied if any exclusive logins present
*/
- if (sbp_login_count_all_by_lun(tpg, se_lun, 1)) {
+ if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
pr_warn("refusing login while another exclusive login present\n");
req->status.status = cpu_to_be32(
@@ -385,7 +349,7 @@ static void sbp_management_request_login(
* check we haven't exceeded the number of allowed logins
* reject with resources_unavailable if we have
*/
- if (sbp_login_count_all_by_lun(tpg, se_lun, 0) >=
+ if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
tport->max_logins_per_lun) {
pr_warn("max number of logins reached\n");
@@ -441,7 +405,7 @@ static void sbp_management_request_login(
}
login->sess = sess;
- login->lun = se_lun;
+ login->login_lun = unpacked_lun;
login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
login->login_id = atomic_inc_return(&login_id);
@@ -603,7 +567,7 @@ static void sbp_management_request_logout(
}
pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
- login->lun->unpacked_lun, login->login_id);
+ login->login_lun, login->login_id);
if (req->node_addr != login->sess->node_id) {
pr_warn("logout from different node ID\n");
@@ -766,7 +730,7 @@ static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
agent->orb_pointer);
- queue_work(system_unbound_wq, &agent->work);
+ queue_work(system_dfl_wq, &agent->work);
return RCODE_COMPLETE;
@@ -800,7 +764,7 @@ static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
pr_debug("tgt_agent DOORBELL\n");
- queue_work(system_unbound_wq, &agent->work);
+ queue_work(system_dfl_wq, &agent->work);
return RCODE_COMPLETE;
@@ -915,7 +879,6 @@ static void tgt_agent_process_work(struct work_struct *work)
STATUS_BLOCK_SBP_STATUS(
SBP_STATUS_REQ_TYPE_NOTSUPP));
sbp_send_status(req);
- sbp_free_request(req);
return;
case 3: /* Dummy ORB */
req->status.status |= cpu_to_be32(
@@ -926,7 +889,6 @@ static void tgt_agent_process_work(struct work_struct *work)
STATUS_BLOCK_SBP_STATUS(
SBP_STATUS_DUMMY_ORB_COMPLETE));
sbp_send_status(req);
- sbp_free_request(req);
return;
default:
BUG();
@@ -945,6 +907,26 @@ static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
return active;
}
+static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
+ struct fw_card *card, u64 next_orb)
+{
+ struct se_session *se_sess = sess->se_sess;
+ struct sbp_target_request *req;
+ int tag, cpu;
+
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
+ if (tag < 0)
+ return ERR_PTR(-ENOMEM);
+
+ req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
+ memset(req, 0, sizeof(*req));
+ req->se_cmd.map_tag = tag;
+ req->se_cmd.map_cpu = cpu;
+ req->se_cmd.tag = next_orb;
+
+ return req;
+}
+
static void tgt_agent_fetch_work(struct work_struct *work)
{
struct sbp_target_agent *agent =
@@ -956,8 +938,8 @@ static void tgt_agent_fetch_work(struct work_struct *work)
u64 next_orb = agent->orb_pointer;
while (next_orb && tgt_agent_check_active(agent)) {
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req) {
+ req = sbp_mgt_get_req(sess, sess->card, next_orb);
+ if (IS_ERR(req)) {
spin_lock_bh(&agent->lock);
agent->state = AGENT_STATE_DEAD;
spin_unlock_bh(&agent->lock);
@@ -992,7 +974,6 @@ static void tgt_agent_fetch_work(struct work_struct *work)
spin_unlock_bh(&agent->lock);
sbp_send_status(req);
- sbp_free_request(req);
return;
}
@@ -1009,7 +990,7 @@ static void tgt_agent_fetch_work(struct work_struct *work)
if (tgt_agent_check_active(agent) && !doorbell) {
INIT_WORK(&req->work, tgt_agent_process_work);
- queue_work(system_unbound_wq, &req->work);
+ queue_work(system_dfl_wq, &req->work);
} else {
/* don't process this request, just check next_ORB */
sbp_free_request(req);
@@ -1025,7 +1006,7 @@ static void tgt_agent_fetch_work(struct work_struct *work)
agent->state = AGENT_STATE_SUSPENDED;
spin_unlock_bh(&agent->lock);
- };
+ }
}
static struct sbp_target_agent *sbp_target_agent_register(
@@ -1229,17 +1210,17 @@ static void sbp_handle_command(struct sbp_target_request *req)
goto err;
}
- unpacked_lun = req->login->lun->unpacked_lun;
+ unpacked_lun = req->login->login_lun;
sbp_calc_data_length_direction(req, &data_length, &data_dir);
pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
req->orb_pointer, unpacked_lun, data_length, data_dir);
- if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
- req->sense_buf, unpacked_lun, data_length,
- MSG_SIMPLE_TAG, data_dir, 0))
- goto err;
-
+ /* only used for printk until we do TMRs */
+ req->se_cmd.tag = req->orb_pointer;
+ target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
+ req->sense_buf, unpacked_lun, data_length,
+ TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF);
return;
err:
@@ -1249,7 +1230,6 @@ err:
STATUS_BLOCK_LEN(1) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
sbp_send_status(req);
- sbp_free_request(req);
}
/*
@@ -1281,7 +1261,6 @@ static int sbp_rw_data(struct sbp_target_request *req)
pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
if (pg_size) {
pr_err("sbp_run_transaction: page size ignored\n");
- pg_size = 0x100 << pg_size;
}
spin_lock_bh(&sess->lock);
@@ -1348,22 +1327,29 @@ static int sbp_rw_data(struct sbp_target_request *req)
static int sbp_send_status(struct sbp_target_request *req)
{
- int ret, length;
+ int rc, ret = 0, length;
struct sbp_login_descriptor *login = req->login;
length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
- ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
+ rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
login->status_fifo_addr, &req->status, length);
- if (ret != RCODE_COMPLETE) {
- pr_debug("sbp_send_status: write failed: 0x%x\n", ret);
- return -EIO;
+ if (rc != RCODE_COMPLETE) {
+ pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
+ ret = -EIO;
+ goto put_ref;
}
pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
req->orb_pointer);
-
- return 0;
+ /*
+ * Drop the extra ACK_KREF reference taken by target_submit_cmd()
+ * ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
+ * final se_cmd->cmd_kref put.
+ */
+put_ref:
+ target_put_sess_cmd(&req->se_cmd);
+ return ret;
}
static void sbp_sense_mangle(struct sbp_target_request *req)
@@ -1403,8 +1389,8 @@ static void sbp_sense_mangle(struct sbp_target_request *req)
(sense[0] & 0x80) | /* valid */
((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */
(sense[2] & 0x0f); /* sense_key */
- status[2] = se_cmd->scsi_asc; /* sense_code */
- status[3] = se_cmd->scsi_ascq; /* sense_qualifier */
+ status[2] = 0; /* XXX sense_code */
+ status[3] = 0; /* XXX sense_qualifier */
/* information */
status[4] = sense[3];
@@ -1452,9 +1438,13 @@ static int sbp_send_sense(struct sbp_target_request *req)
static void sbp_free_request(struct sbp_target_request *req)
{
+ struct se_cmd *se_cmd = &req->se_cmd;
+ struct se_session *se_sess = se_cmd->se_sess;
+
kfree(req->pg_tbl);
kfree(req->cmd_buf);
- kfree(req);
+
+ target_free_tag(se_sess, se_cmd);
}
static void sbp_mgt_agent_process(struct work_struct *work)
@@ -1614,7 +1604,6 @@ static void sbp_mgt_agent_rw(struct fw_card *card,
rcode = RCODE_CONFLICT_ERROR;
goto out;
}
-
req = kzalloc(sizeof(*req), GFP_ATOMIC);
if (!req) {
rcode = RCODE_CONFLICT_ERROR;
@@ -1629,7 +1618,7 @@ static void sbp_mgt_agent_rw(struct fw_card *card,
agent->orb_offset = sbp2_pointer_to_addr(ptr);
agent->request = req;
- queue_work(system_unbound_wq, &agent->work);
+ queue_work(system_dfl_wq, &agent->work);
rcode = RCODE_COMPLETE;
} else if (tcode == TCODE_READ_BLOCK_REQUEST) {
addr_to_sbp2_pointer(agent->orb_offset, ptr);
@@ -1684,16 +1673,6 @@ static int sbp_check_true(struct se_portal_group *se_tpg)
return 1;
}
-static int sbp_check_false(struct se_portal_group *se_tpg)
-{
- return 0;
-}
-
-static char *sbp_get_fabric_name(void)
-{
- return "sbp";
-}
-
static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
{
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
@@ -1708,38 +1687,6 @@ static u16 sbp_get_tag(struct se_portal_group *se_tpg)
return tpg->tport_tpgt;
}
-static u32 sbp_get_default_depth(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static struct se_node_acl *sbp_alloc_fabric_acl(struct se_portal_group *se_tpg)
-{
- struct sbp_nacl *nacl;
-
- nacl = kzalloc(sizeof(struct sbp_nacl), GFP_KERNEL);
- if (!nacl) {
- pr_err("Unable to allocate struct sbp_nacl\n");
- return NULL;
- }
-
- return &nacl->se_node_acl;
-}
-
-static void sbp_release_fabric_acl(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl)
-{
- struct sbp_nacl *nacl =
- container_of(se_nacl, struct sbp_nacl, se_node_acl);
- kfree(nacl);
-}
-
-static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
static void sbp_release_cmd(struct se_cmd *se_cmd)
{
struct sbp_target_request *req = container_of(se_cmd,
@@ -1748,21 +1695,6 @@ static void sbp_release_cmd(struct se_cmd *se_cmd)
sbp_free_request(req);
}
-static int sbp_shutdown_session(struct se_session *se_sess)
-{
- return 0;
-}
-
-static void sbp_close_session(struct se_session *se_sess)
-{
- return;
-}
-
-static u32 sbp_sess_get_index(struct se_session *se_sess)
-{
- return 0;
-}
-
static int sbp_write_pending(struct se_cmd *se_cmd)
{
struct sbp_target_request *req = container_of(se_cmd,
@@ -1786,30 +1718,6 @@ static int sbp_write_pending(struct se_cmd *se_cmd)
return 0;
}
-static int sbp_write_pending_status(struct se_cmd *se_cmd)
-{
- return 0;
-}
-
-static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
-{
- return;
-}
-
-static u32 sbp_get_task_tag(struct se_cmd *se_cmd)
-{
- struct sbp_target_request *req = container_of(se_cmd,
- struct sbp_target_request, se_cmd);
-
- /* only used for printk until we do TMRs */
- return (u32)req->orb_pointer;
-}
-
-static int sbp_get_cmd_state(struct se_cmd *se_cmd)
-{
- return 0;
-}
-
static int sbp_queue_data_in(struct se_cmd *se_cmd)
{
struct sbp_target_request *req = container_of(se_cmd,
@@ -1846,115 +1754,36 @@ static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
{
}
-static int sbp_check_stop_free(struct se_cmd *se_cmd)
-{
- struct sbp_target_request *req = container_of(se_cmd,
- struct sbp_target_request, se_cmd);
-
- transport_generic_free_cmd(&req->se_cmd, 0);
- return 1;
-}
-
-/*
- * Handlers for Serial Bus Protocol 2/3 (SBP-2 / SBP-3)
- */
-static u8 sbp_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
- /*
- * Return a IEEE 1394 SCSI Protocol identifier for loopback operations
- * This is defined in section 7.5.1 Table 362 in spc4r17
- */
- return SCSI_PROTOCOL_SBP;
-}
-
-static u32 sbp_get_pr_transport_id(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code,
- unsigned char *buf)
-{
- int ret;
-
- /*
- * Set PROTOCOL IDENTIFIER to 3h for SBP
- */
- buf[0] = SCSI_PROTOCOL_SBP;
- /*
- * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
- * over IEEE 1394
- */
- ret = hex2bin(&buf[8], se_nacl->initiatorname, 8);
- if (ret < 0)
- pr_debug("sbp transport_id: invalid hex string\n");
-
- /*
- * The IEEE 1394 Transport ID is a hardcoded 24-byte length
- */
- return 24;
-}
-
-static u32 sbp_get_pr_transport_id_len(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code)
+static void sbp_aborted_task(struct se_cmd *se_cmd)
{
- *format_code = 0;
- /*
- * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI
- * over IEEE 1394
- *
- * The SBP Transport ID is a hardcoded 24-byte length
- */
- return 24;
+ return;
}
-/*
- * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
- * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
- */
-static char *sbp_parse_pr_out_transport_id(
- struct se_portal_group *se_tpg,
- const char *buf,
- u32 *out_tid_len,
- char **port_nexus_ptr)
+static int sbp_check_stop_free(struct se_cmd *se_cmd)
{
- /*
- * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.4 TransportID
- * for initiator ports using SCSI over SBP Serial SCSI Protocol
- *
- * The TransportID for a IEEE 1394 Initiator Port is of fixed size of
- * 24 bytes, and IEEE 1394 does not contain a I_T nexus identifier,
- * so we return the **port_nexus_ptr set to NULL.
- */
- *port_nexus_ptr = NULL;
- *out_tid_len = 24;
+ struct sbp_target_request *req = container_of(se_cmd,
+ struct sbp_target_request, se_cmd);
- return (char *)&buf[8];
+ return transport_generic_free_cmd(&req->se_cmd, 0);
}
static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
{
- int i, count = 0;
-
- spin_lock(&tpg->tpg_lun_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- struct se_lun *se_lun = tpg->tpg_lun_list[i];
-
- if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
- continue;
+ struct se_lun *lun;
+ int count = 0;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
count++;
- }
- spin_unlock(&tpg->tpg_lun_lock);
+ rcu_read_unlock();
return count;
}
static int sbp_update_unit_directory(struct sbp_tport *tport)
{
- int num_luns, num_entries, idx = 0, mgt_agt_addr, ret, i;
+ struct se_lun *lun;
+ int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
u32 *data;
if (tport->unit_directory.data) {
@@ -2016,28 +1845,23 @@ static int sbp_update_unit_directory(struct sbp_tport *tport)
/* unit unique ID (leaf is just after LUNs) */
data[idx++] = 0x8d000000 | (num_luns + 1);
- spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- struct se_lun *se_lun = tport->tpg->se_tpg.tpg_lun_list[i];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
struct se_device *dev;
int type;
-
- if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
- continue;
-
- spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
-
- dev = se_lun->lun_se_dev;
+ /*
+ * rcu_dereference_raw protected by se_lun->lun_group symlink
+ * reference to se_device->dev_group.
+ */
+ dev = rcu_dereference_raw(lun->lun_se_dev);
type = dev->transport->get_device_type(dev);
/* logical_unit_number */
data[idx++] = 0x14000000 |
((type << 16) & 0x1f0000) |
- (se_lun->unpacked_lun & 0xffff);
-
- spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
+ (lun->unpacked_lun & 0xffff);
}
- spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
+ rcu_read_unlock();
/* unit unique ID leaf */
data[idx++] = 2 << 16;
@@ -2096,48 +1920,13 @@ static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
return snprintf(buf, len, "%016llx", wwn);
}
-static struct se_node_acl *sbp_make_nodeacl(
- struct se_portal_group *se_tpg,
- struct config_group *group,
- const char *name)
+static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
{
- struct se_node_acl *se_nacl, *se_nacl_new;
- struct sbp_nacl *nacl;
u64 guid = 0;
- u32 nexus_depth = 1;
if (sbp_parse_wwn(name, &guid) < 0)
- return ERR_PTR(-EINVAL);
-
- se_nacl_new = sbp_alloc_fabric_acl(se_tpg);
- if (!se_nacl_new)
- return ERR_PTR(-ENOMEM);
-
- /*
- * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
- * when converting a NodeACL from demo mode -> explict
- */
- se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
- name, nexus_depth);
- if (IS_ERR(se_nacl)) {
- sbp_release_fabric_acl(se_tpg, se_nacl_new);
- return se_nacl;
- }
-
- nacl = container_of(se_nacl, struct sbp_nacl, se_node_acl);
- nacl->guid = guid;
- sbp_format_wwn(nacl->iport_name, SBP_NAMELEN, guid);
-
- return se_nacl;
-}
-
-static void sbp_drop_nodeacl(struct se_node_acl *se_acl)
-{
- struct sbp_nacl *nacl =
- container_of(se_acl, struct sbp_nacl, se_node_acl);
-
- core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
- kfree(nacl);
+ return -EINVAL;
+ return 0;
}
static int sbp_post_link_lun(
@@ -2165,10 +1954,8 @@ static void sbp_pre_unlink_lun(
pr_err("unlink LUN: failed to update unit directory\n");
}
-static struct se_portal_group *sbp_make_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *sbp_make_tpg(struct se_wwn *wwn,
+ const char *name)
{
struct sbp_tport *tport =
container_of(wwn, struct sbp_tport, tport_wwn);
@@ -2188,10 +1975,8 @@ static struct se_portal_group *sbp_make_tpg(
}
tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
- if (!tpg) {
- pr_err("Unable to allocate struct sbp_tpg\n");
+ if (!tpg)
return ERR_PTR(-ENOMEM);
- }
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
@@ -2210,9 +1995,7 @@ static struct se_portal_group *sbp_make_tpg(
goto out_free_tpg;
}
- ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn,
- &tpg->se_tpg, (void *)tpg,
- TRANSPORT_TPG_TYPE_NORMAL);
+ ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
if (ret < 0)
goto out_unreg_mgt_agt;
@@ -2249,10 +2032,8 @@ static struct se_wwn *sbp_make_tport(
return ERR_PTR(-EINVAL);
tport = kzalloc(sizeof(*tport), GFP_KERNEL);
- if (!tport) {
- pr_err("Unable to allocate struct sbp_tport\n");
+ if (!tport)
return ERR_PTR(-ENOMEM);
- }
tport->guid = guid;
sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
@@ -2268,24 +2049,21 @@ static void sbp_drop_tport(struct se_wwn *wwn)
kfree(tport);
}
-static ssize_t sbp_wwn_show_attr_version(
- struct target_fabric_configfs *tf,
- char *page)
+static ssize_t sbp_wwn_version_show(struct config_item *item, char *page)
{
return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
}
-TF_WWN_ATTR_RO(sbp, version);
+CONFIGFS_ATTR_RO(sbp_wwn_, version);
static struct configfs_attribute *sbp_wwn_attrs[] = {
- &sbp_wwn_version.attr,
+ &sbp_wwn_attr_version,
NULL,
};
-static ssize_t sbp_tpg_show_directory_id(
- struct se_portal_group *se_tpg,
- char *page)
+static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page)
{
+ struct se_portal_group *se_tpg = to_tpg(item);
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
@@ -2295,11 +2073,10 @@ static ssize_t sbp_tpg_show_directory_id(
return sprintf(page, "%06x\n", tport->directory_id);
}
-static ssize_t sbp_tpg_store_directory_id(
- struct se_portal_group *se_tpg,
- const char *page,
- size_t count)
+static ssize_t sbp_tpg_directory_id_store(struct config_item *item,
+ const char *page, size_t count)
{
+ struct se_portal_group *se_tpg = to_tpg(item);
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
unsigned long val;
@@ -2323,34 +2100,13 @@ static ssize_t sbp_tpg_store_directory_id(
return count;
}
-static ssize_t sbp_tpg_show_enable(
- struct se_portal_group *se_tpg,
- char *page)
-{
- struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
- struct sbp_tport *tport = tpg->tport;
- return sprintf(page, "%d\n", tport->enable);
-}
-
-static ssize_t sbp_tpg_store_enable(
- struct se_portal_group *se_tpg,
- const char *page,
- size_t count)
+static int sbp_enable_tpg(struct se_portal_group *se_tpg, bool enable)
{
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
- unsigned long val;
int ret;
- if (kstrtoul(page, 0, &val) < 0)
- return -EINVAL;
- if ((val != 0) && (val != 1))
- return -EINVAL;
-
- if (tport->enable == val)
- return count;
-
- if (val) {
+ if (enable) {
if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
pr_err("Cannot enable a target with no LUNs!\n");
return -EINVAL;
@@ -2365,7 +2121,7 @@ static ssize_t sbp_tpg_store_enable(
spin_unlock_bh(&se_tpg->session_lock);
}
- tport->enable = val;
+ tport->enable = enable;
ret = sbp_update_unit_directory(tport);
if (ret < 0) {
@@ -2373,32 +2129,29 @@ static ssize_t sbp_tpg_store_enable(
return ret;
}
- return count;
+ return 0;
}
-TF_TPG_BASE_ATTR(sbp, directory_id, S_IRUGO | S_IWUSR);
-TF_TPG_BASE_ATTR(sbp, enable, S_IRUGO | S_IWUSR);
+CONFIGFS_ATTR(sbp_tpg_, directory_id);
static struct configfs_attribute *sbp_tpg_base_attrs[] = {
- &sbp_tpg_directory_id.attr,
- &sbp_tpg_enable.attr,
+ &sbp_tpg_attr_directory_id,
NULL,
};
-static ssize_t sbp_tpg_attrib_show_mgt_orb_timeout(
- struct se_portal_group *se_tpg,
+static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item,
char *page)
{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
return sprintf(page, "%d\n", tport->mgt_orb_timeout);
}
-static ssize_t sbp_tpg_attrib_store_mgt_orb_timeout(
- struct se_portal_group *se_tpg,
- const char *page,
- size_t count)
+static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item,
+ const char *page, size_t count)
{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
unsigned long val;
@@ -2421,20 +2174,19 @@ static ssize_t sbp_tpg_attrib_store_mgt_orb_timeout(
return count;
}
-static ssize_t sbp_tpg_attrib_show_max_reconnect_timeout(
- struct se_portal_group *se_tpg,
+static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item,
char *page)
{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
return sprintf(page, "%d\n", tport->max_reconnect_timeout);
}
-static ssize_t sbp_tpg_attrib_store_max_reconnect_timeout(
- struct se_portal_group *se_tpg,
- const char *page,
- size_t count)
+static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item,
+ const char *page, size_t count)
{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
unsigned long val;
@@ -2457,20 +2209,19 @@ static ssize_t sbp_tpg_attrib_store_max_reconnect_timeout(
return count;
}
-static ssize_t sbp_tpg_attrib_show_max_logins_per_lun(
- struct se_portal_group *se_tpg,
+static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item,
char *page)
{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
return sprintf(page, "%d\n", tport->max_logins_per_lun);
}
-static ssize_t sbp_tpg_attrib_store_max_logins_per_lun(
- struct se_portal_group *se_tpg,
- const char *page,
- size_t count)
+static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item,
+ const char *page, size_t count)
{
+ struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
struct sbp_tport *tport = tpg->tport;
unsigned long val;
@@ -2487,119 +2238,59 @@ static ssize_t sbp_tpg_attrib_store_max_logins_per_lun(
return count;
}
-TF_TPG_ATTRIB_ATTR(sbp, mgt_orb_timeout, S_IRUGO | S_IWUSR);
-TF_TPG_ATTRIB_ATTR(sbp, max_reconnect_timeout, S_IRUGO | S_IWUSR);
-TF_TPG_ATTRIB_ATTR(sbp, max_logins_per_lun, S_IRUGO | S_IWUSR);
+CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout);
+CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout);
+CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun);
static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
- &sbp_tpg_attrib_mgt_orb_timeout.attr,
- &sbp_tpg_attrib_max_reconnect_timeout.attr,
- &sbp_tpg_attrib_max_logins_per_lun.attr,
+ &sbp_tpg_attrib_attr_mgt_orb_timeout,
+ &sbp_tpg_attrib_attr_max_reconnect_timeout,
+ &sbp_tpg_attrib_attr_max_logins_per_lun,
NULL,
};
-static struct target_core_fabric_ops sbp_ops = {
- .get_fabric_name = sbp_get_fabric_name,
- .get_fabric_proto_ident = sbp_get_fabric_proto_ident,
+static const struct target_core_fabric_ops sbp_ops = {
+ .module = THIS_MODULE,
+ .fabric_name = "sbp",
.tpg_get_wwn = sbp_get_fabric_wwn,
.tpg_get_tag = sbp_get_tag,
- .tpg_get_default_depth = sbp_get_default_depth,
- .tpg_get_pr_transport_id = sbp_get_pr_transport_id,
- .tpg_get_pr_transport_id_len = sbp_get_pr_transport_id_len,
- .tpg_parse_pr_out_transport_id = sbp_parse_pr_out_transport_id,
.tpg_check_demo_mode = sbp_check_true,
.tpg_check_demo_mode_cache = sbp_check_true,
- .tpg_check_demo_mode_write_protect = sbp_check_false,
- .tpg_check_prod_mode_write_protect = sbp_check_false,
- .tpg_alloc_fabric_acl = sbp_alloc_fabric_acl,
- .tpg_release_fabric_acl = sbp_release_fabric_acl,
- .tpg_get_inst_index = sbp_tpg_get_inst_index,
.release_cmd = sbp_release_cmd,
- .shutdown_session = sbp_shutdown_session,
- .close_session = sbp_close_session,
- .sess_get_index = sbp_sess_get_index,
.write_pending = sbp_write_pending,
- .write_pending_status = sbp_write_pending_status,
- .set_default_node_attributes = sbp_set_default_node_attrs,
- .get_task_tag = sbp_get_task_tag,
- .get_cmd_state = sbp_get_cmd_state,
.queue_data_in = sbp_queue_data_in,
.queue_status = sbp_queue_status,
.queue_tm_rsp = sbp_queue_tm_rsp,
+ .aborted_task = sbp_aborted_task,
.check_stop_free = sbp_check_stop_free,
.fabric_make_wwn = sbp_make_tport,
.fabric_drop_wwn = sbp_drop_tport,
.fabric_make_tpg = sbp_make_tpg,
+ .fabric_enable_tpg = sbp_enable_tpg,
.fabric_drop_tpg = sbp_drop_tpg,
.fabric_post_link = sbp_post_link_lun,
.fabric_pre_unlink = sbp_pre_unlink_lun,
.fabric_make_np = NULL,
.fabric_drop_np = NULL,
- .fabric_make_nodeacl = sbp_make_nodeacl,
- .fabric_drop_nodeacl = sbp_drop_nodeacl,
-};
-
-static int sbp_register_configfs(void)
-{
- struct target_fabric_configfs *fabric;
- int ret;
-
- fabric = target_fabric_configfs_init(THIS_MODULE, "sbp");
- if (IS_ERR(fabric)) {
- pr_err("target_fabric_configfs_init() failed\n");
- return PTR_ERR(fabric);
- }
+ .fabric_init_nodeacl = sbp_init_nodeacl,
- fabric->tf_ops = sbp_ops;
+ .tfc_wwn_attrs = sbp_wwn_attrs,
+ .tfc_tpg_base_attrs = sbp_tpg_base_attrs,
+ .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs,
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- */
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = sbp_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
-
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- pr_err("target_fabric_configfs_register() failed for SBP\n");
- return ret;
- }
-
- sbp_fabric_configfs = fabric;
-
- return 0;
-};
-
-static void sbp_deregister_configfs(void)
-{
- if (!sbp_fabric_configfs)
- return;
-
- target_fabric_configfs_deregister(sbp_fabric_configfs);
- sbp_fabric_configfs = NULL;
+ .default_submit_type = TARGET_DIRECT_SUBMIT,
+ .direct_submit_supp = 1,
};
static int __init sbp_init(void)
{
- int ret;
-
- ret = sbp_register_configfs();
- if (ret < 0)
- return ret;
-
- return 0;
+ return target_register_template(&sbp_ops);
};
static void __exit sbp_exit(void)
{
- sbp_deregister_configfs();
+ target_unregister_template(&sbp_ops);
};
MODULE_DESCRIPTION("FireWire SBP fabric driver");
diff --git a/drivers/target/sbp/sbp_target.h b/drivers/target/sbp/sbp_target.h
index 6d0d74a2c545..1d101ac86527 100644
--- a/drivers/target/sbp/sbp_target.h
+++ b/drivers/target/sbp/sbp_target.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SBP_BASE_H
#define _SBP_BASE_H
@@ -125,7 +126,7 @@ struct sbp_login_descriptor {
struct sbp_session *sess;
struct list_head link;
- struct se_lun *lun;
+ u32 login_lun;
u64 status_fifo_addr;
int exclusive;
@@ -151,15 +152,6 @@ struct sbp_session {
u64 reconnect_expires;
};
-struct sbp_nacl {
- /* Initiator EUI-64 */
- u64 guid;
- /* ASCII formatted GUID for SBP Initiator port */
- char iport_name[SBP_NAMELEN];
- /* Returned by sbp_make_nodeacl() */
- struct se_node_acl se_node_acl;
-};
-
struct sbp_tpg {
/* Target portal group tag for TCM */
u16 tport_tpgt;
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index cbe48ab41745..10250aca5a81 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -1,50 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_alua.c
*
* This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
*
- * (c) Copyright 2009-2012 RisingTide Systems LLC.
+ * (c) Copyright 2009-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/configfs.h>
+#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/fcntl.h>
#include <linux/file.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <asm/unaligned.h>
+#include <linux/fs.h>
+#include <scsi/scsi_proto.h>
+#include <linux/unaligned.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_ua.h"
-static sense_reason_t core_alua_check_transition(int state, int *primary);
+static sense_reason_t core_alua_check_transition(int state, int valid,
+ int *primary, int explicit);
static int core_alua_set_tg_pt_secondary_state(
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
- struct se_port *port, int explict, int offline);
+ struct se_lun *lun, int explicit, int offline);
+
+static char *core_alua_dump_state(int state);
+
+static void __target_attach_tg_pt_gp(struct se_lun *lun,
+ struct t10_alua_tg_pt_gp *tg_pt_gp);
static u16 alua_lu_gps_counter;
static u32 alua_lu_gps_count;
@@ -55,6 +48,86 @@ static LIST_HEAD(lu_gps_list);
struct t10_alua_lu_gp *default_lu_gp;
/*
+ * REPORT REFERRALS
+ *
+ * See sbc3r35 section 5.23
+ */
+sense_reason_t
+target_emulate_report_referrals(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct t10_alua_lba_map *map;
+ struct t10_alua_lba_map_member *map_mem;
+ unsigned char *buf;
+ u32 rd_len = 0, off;
+
+ if (cmd->data_length < 4) {
+ pr_warn("REPORT REFERRALS allocation length %u too"
+ " small\n", cmd->data_length);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ off = 4;
+ spin_lock(&dev->t10_alua.lba_map_lock);
+ if (list_empty(&dev->t10_alua.lba_map_list)) {
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ transport_kunmap_data_sg(cmd);
+
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ list_for_each_entry(map, &dev->t10_alua.lba_map_list,
+ lba_map_list) {
+ int desc_num = off + 3;
+ int pg_num;
+
+ off += 4;
+ if (cmd->data_length > off)
+ put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
+ off += 8;
+ if (cmd->data_length > off)
+ put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
+ off += 8;
+ rd_len += 20;
+ pg_num = 0;
+ list_for_each_entry(map_mem, &map->lba_map_mem_list,
+ lba_map_mem_list) {
+ int alua_state = map_mem->lba_map_mem_alua_state;
+ int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
+
+ if (cmd->data_length > off)
+ buf[off] = alua_state & 0x0f;
+ off += 2;
+ if (cmd->data_length > off)
+ buf[off] = (alua_pg_id >> 8) & 0xff;
+ off++;
+ if (cmd->data_length > off)
+ buf[off] = (alua_pg_id & 0xff);
+ off++;
+ rd_len += 4;
+ pg_num++;
+ }
+ if (cmd->data_length > desc_num)
+ buf[desc_num] = pg_num;
+ }
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+
+ /*
+ * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
+ */
+ put_unaligned_be16(rd_len, &buf[2]);
+
+ transport_kunmap_data_sg(cmd);
+
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
+}
+
+/*
* REPORT_TARGET_PORT_GROUPS
*
* See spc4r17 section 6.27
@@ -63,9 +136,8 @@ sense_reason_t
target_emulate_report_target_port_groups(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct se_port *port;
struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ struct se_lun *lun;
unsigned char *buf;
u32 rd_len = 0, off;
int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
@@ -92,6 +164,9 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
+ /* Skip empty port groups */
+ if (!tg_pt_gp->tg_pt_gp_members)
+ continue;
/*
* Check if the Target port group and Target port descriptor list
* based on tg_pt_gp_members count will fit into the response payload.
@@ -112,22 +187,16 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
/*
* Set the ASYMMETRIC ACCESS State
*/
- buf[off++] |= (atomic_read(
- &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
+ buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
/*
* Set supported ASYMMETRIC ACCESS State bits
*/
- buf[off] = 0x80; /* T_SUP */
- buf[off] |= 0x40; /* O_SUP */
- buf[off] |= 0x8; /* U_SUP */
- buf[off] |= 0x4; /* S_SUP */
- buf[off] |= 0x2; /* AN_SUP */
- buf[off++] |= 0x1; /* AO_SUP */
+ buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
/*
* TARGET PORT GROUP
*/
- buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
- buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
+ put_unaligned_be16(tg_pt_gp->tg_pt_gp_id, &buf[off]);
+ off += 2;
off++; /* Skip over Reserved */
/*
@@ -145,9 +214,8 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
rd_len += 8;
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
- list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
- tg_pt_gp_mem_list) {
- port = tg_pt_gp_mem->tg_pt;
+ list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
+ lun_tg_pt_gp_link) {
/*
* Start Target Port descriptor format
*
@@ -157,8 +225,8 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
/*
* Set RELATIVE TARGET PORT IDENTIFIER
*/
- buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
- buf[off++] = (port->sep_rtpi & 0xff);
+ put_unaligned_be16(lun->lun_tpg->tpg_rtpi, &buf[off]);
+ off += 2;
rd_len += 4;
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
@@ -175,31 +243,27 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
if (ext_hdr != 0) {
buf[4] = 0x10;
/*
- * Set the implict transition time (in seconds) for the application
+ * Set the implicit transition time (in seconds) for the application
* client to use as a base for it's transition timeout value.
*
* Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
* this CDB was received upon to determine this value individually
* for ALUA target port group.
*/
- port = cmd->se_lun->lun_sep;
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (tg_pt_gp_mem) {
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if (tg_pt_gp)
- buf[5] = tg_pt_gp->tg_pt_gp_implict_trans_secs;
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- }
+ rcu_read_lock();
+ tg_pt_gp = rcu_dereference(cmd->se_lun->lun_tg_pt_gp);
+ if (tg_pt_gp)
+ buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
+ rcu_read_unlock();
}
transport_kunmap_data_sg(cmd);
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, rd_len + 4);
return 0;
}
/*
- * SET_TARGET_PORT_GROUPS for explict ALUA operation.
+ * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
*
* See spc4r17 section 6.35
*/
@@ -207,20 +271,16 @@ sense_reason_t
target_emulate_set_target_port_groups(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct se_port *port, *l_port = cmd->se_lun->lun_sep;
+ struct se_lun *l_lun = cmd->se_lun;
struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
unsigned char *buf;
unsigned char *ptr;
sense_reason_t rc = TCM_NO_SENSE;
u32 len = 4; /* Skip over RESERVED area in header */
- int alua_access_state, primary = 0;
+ int alua_access_state, primary = 0, valid_states;
u16 tg_pt_id, rtpi;
- if (!l_port)
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
if (cmd->data_length < 4) {
pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
" small\n", cmd->data_length);
@@ -232,31 +292,27 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
- * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
+ * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
* for the local tg_pt_gp.
*/
- l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
- if (!l_tg_pt_gp_mem) {
- pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
- rc = TCM_UNSUPPORTED_SCSI_OPCODE;
- goto out;
- }
- spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
- l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
+ rcu_read_lock();
+ l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp);
if (!l_tg_pt_gp) {
- spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
- pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
+ rcu_read_unlock();
+ pr_err("Unable to access l_lun->tg_pt_gp\n");
rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
- spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
- if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)) {
+ if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
+ rcu_read_unlock();
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
- " while TPGS_EXPLICT_ALUA is disabled\n");
+ " while TPGS_EXPLICIT_ALUA is disabled\n");
rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out;
}
+ valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
+ rcu_read_unlock();
ptr = &buf[4]; /* Skip over RESERVED area in header */
@@ -268,7 +324,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
* the state is a primary or secondary target port asymmetric
* access state.
*/
- rc = core_alua_check_transition(alua_access_state, &primary);
+ rc = core_alua_check_transition(alua_access_state, valid_states,
+ &primary, 1);
if (rc) {
/*
* If the SET TARGET PORT GROUPS attempts to establish
@@ -312,26 +369,26 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
continue;
- atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
- smp_mb__after_atomic_inc();
+ atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
if (!core_alua_do_port_transition(tg_pt_gp,
- dev, l_port, nacl,
+ dev, l_lun, nacl,
alua_access_state, 1))
found = true;
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
- atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
- smp_mb__after_atomic_dec();
+ atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
break;
}
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
} else {
+ struct se_lun *lun;
+
/*
- * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
- * the Target Port in question for the the incoming
+ * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
+ * the Target Port in question for the incoming
* SET_TARGET_PORT_GROUPS op.
*/
rtpi = get_unaligned_be16(ptr + 2);
@@ -340,17 +397,16 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
* for the struct se_device storage object.
*/
spin_lock(&dev->se_port_lock);
- list_for_each_entry(port, &dev->dev_sep_list,
- sep_list) {
- if (port->sep_rtpi != rtpi)
+ list_for_each_entry(lun, &dev->dev_sep_list,
+ lun_dev_link) {
+ if (lun->lun_tpg->tpg_rtpi != rtpi)
continue;
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
-
+ // XXX: racy unlock
spin_unlock(&dev->se_port_lock);
if (!core_alua_set_tg_pt_secondary_state(
- tg_pt_gp_mem, port, 1, 1))
+ lun, 1, 1))
found = true;
spin_lock(&dev->se_port_lock);
@@ -371,15 +427,14 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
out:
transport_kunmap_data_sg(cmd);
if (!rc)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return rc;
}
-static inline int core_alua_state_nonoptimized(
+static inline void core_alua_state_nonoptimized(
struct se_cmd *cmd,
unsigned char *cdb,
- int nonop_delay_msecs,
- u8 *alua_ascq)
+ int nonop_delay_msecs)
{
/*
* Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
@@ -388,13 +443,81 @@ static inline int core_alua_state_nonoptimized(
*/
cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
cmd->alua_nonop_delay = nonop_delay_msecs;
+}
+
+static inline sense_reason_t core_alua_state_lba_dependent(
+ struct se_cmd *cmd,
+ u16 tg_pt_gp_id)
+{
+ struct se_device *dev = cmd->se_dev;
+ u64 segment_size, segment_mult, sectors, lba;
+
+ /* Only need to check for cdb actually containing LBAs */
+ if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
+ return 0;
+
+ spin_lock(&dev->t10_alua.lba_map_lock);
+ segment_size = dev->t10_alua.lba_map_segment_size;
+ segment_mult = dev->t10_alua.lba_map_segment_multiplier;
+ sectors = cmd->data_length / dev->dev_attrib.block_size;
+
+ lba = cmd->t_task_lba;
+ while (lba < cmd->t_task_lba + sectors) {
+ struct t10_alua_lba_map *cur_map = NULL, *map;
+ struct t10_alua_lba_map_member *map_mem;
+
+ list_for_each_entry(map, &dev->t10_alua.lba_map_list,
+ lba_map_list) {
+ u64 start_lba, last_lba;
+ u64 first_lba = map->lba_map_first_lba;
+
+ if (segment_mult) {
+ u64 tmp = lba;
+ start_lba = do_div(tmp, segment_size * segment_mult);
+
+ last_lba = first_lba + segment_size - 1;
+ if (start_lba >= first_lba &&
+ start_lba <= last_lba) {
+ lba += segment_size;
+ cur_map = map;
+ break;
+ }
+ } else {
+ last_lba = map->lba_map_last_lba;
+ if (lba >= first_lba && lba <= last_lba) {
+ lba = last_lba + 1;
+ cur_map = map;
+ break;
+ }
+ }
+ }
+ if (!cur_map) {
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ return TCM_ALUA_TG_PT_UNAVAILABLE;
+ }
+ list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
+ lba_map_mem_list) {
+ if (map_mem->lba_map_mem_alua_pg_id != tg_pt_gp_id)
+ continue;
+ switch(map_mem->lba_map_mem_alua_state) {
+ case ALUA_ACCESS_STATE_STANDBY:
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ return TCM_ALUA_TG_PT_STANDBY;
+ case ALUA_ACCESS_STATE_UNAVAILABLE:
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ return TCM_ALUA_TG_PT_UNAVAILABLE;
+ default:
+ break;
+ }
+ }
+ }
+ spin_unlock(&dev->t10_alua.lba_map_lock);
return 0;
}
-static inline int core_alua_state_standby(
+static inline sense_reason_t core_alua_state_standby(
struct se_cmd *cmd,
- unsigned char *cdb,
- u8 *alua_ascq)
+ unsigned char *cdb)
{
/*
* Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
@@ -409,22 +532,28 @@ static inline int core_alua_state_standby(
case REPORT_LUNS:
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
+ case READ_CAPACITY:
return 0;
+ case SERVICE_ACTION_IN_16:
+ switch (cdb[1] & 0x1f) {
+ case SAI_READ_CAPACITY_16:
+ return 0;
+ default:
+ return TCM_ALUA_TG_PT_STANDBY;
+ }
case MAINTENANCE_IN:
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
- return 1;
+ return TCM_ALUA_TG_PT_STANDBY;
}
case MAINTENANCE_OUT:
switch (cdb[1]) {
case MO_SET_TARGET_PGS:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
- return 1;
+ return TCM_ALUA_TG_PT_STANDBY;
}
case REQUEST_SENSE:
case PERSISTENT_RESERVE_IN:
@@ -433,17 +562,15 @@ static inline int core_alua_state_standby(
case WRITE_BUFFER:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
- return 1;
+ return TCM_ALUA_TG_PT_STANDBY;
}
return 0;
}
-static inline int core_alua_state_unavailable(
+static inline sense_reason_t core_alua_state_unavailable(
struct se_cmd *cmd,
- unsigned char *cdb,
- u8 *alua_ascq)
+ unsigned char *cdb)
{
/*
* Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
@@ -458,36 +585,32 @@ static inline int core_alua_state_unavailable(
case MI_REPORT_TARGET_PGS:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
- return 1;
+ return TCM_ALUA_TG_PT_UNAVAILABLE;
}
case MAINTENANCE_OUT:
switch (cdb[1]) {
case MO_SET_TARGET_PGS:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
- return 1;
+ return TCM_ALUA_TG_PT_UNAVAILABLE;
}
case REQUEST_SENSE:
case READ_BUFFER:
case WRITE_BUFFER:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
- return 1;
+ return TCM_ALUA_TG_PT_UNAVAILABLE;
}
return 0;
}
-static inline int core_alua_state_transition(
+static inline sense_reason_t core_alua_state_transition(
struct se_cmd *cmd,
- unsigned char *cdb,
- u8 *alua_ascq)
+ unsigned char *cdb)
{
/*
- * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
+ * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
* spc4r17 section 5.9.2.5
*/
switch (cdb[0]) {
@@ -499,25 +622,23 @@ static inline int core_alua_state_transition(
case MI_REPORT_TARGET_PGS:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
- return 1;
+ return TCM_ALUA_STATE_TRANSITION;
}
case REQUEST_SENSE:
case READ_BUFFER:
case WRITE_BUFFER:
return 0;
default:
- *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
- return 1;
+ return TCM_ALUA_STATE_TRANSITION;
}
return 0;
}
/*
- * return 1: Is used to signal LUN not accecsable, and check condition/not ready
+ * return 1: Is used to signal LUN not accessible, and check condition/not ready
* return 0: Used to signal success
- * reutrn -1: Used to signal failure, and invalid cdb field
+ * return -1: Used to signal failure, and invalid cdb field
*/
sense_reason_t
target_alua_state_check(struct se_cmd *cmd)
@@ -525,112 +646,115 @@ target_alua_state_check(struct se_cmd *cmd)
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
struct se_lun *lun = cmd->se_lun;
- struct se_port *port = lun->lun_sep;
struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
int out_alua_state, nonop_delay_msecs;
- u8 alua_ascq;
- int ret;
+ u16 tg_pt_gp_id;
+ sense_reason_t rc = TCM_NO_SENSE;
if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
return 0;
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
return 0;
- if (!port)
- return 0;
/*
* First, check for a struct se_port specific secondary ALUA target port
* access state: OFFLINE
*/
- if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
+ if (atomic_read(&lun->lun_tg_pt_secondary_offline)) {
pr_debug("ALUA: Got secondary offline status for local"
" target port\n");
- alua_ascq = ASCQ_04H_ALUA_OFFLINE;
- ret = 1;
- goto out;
+ return TCM_ALUA_OFFLINE;
}
- /*
- * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
- * ALUA target port group, to obtain current ALUA access state.
- * Otherwise look for the underlying struct se_device association with
- * a ALUA logical unit group.
- */
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+ rcu_read_lock();
+ tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
+ if (!tg_pt_gp) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
+ rcu_read_unlock();
/*
- * Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a separate conditional
+ * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
* statement so the compiler knows explicitly to check this case first.
* For the Optimized ALUA access state case, we want to process the
* incoming fabric cmd ASAP..
*/
- if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
+ if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
return 0;
switch (out_alua_state) {
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
- ret = core_alua_state_nonoptimized(cmd, cdb,
- nonop_delay_msecs, &alua_ascq);
+ core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
break;
case ALUA_ACCESS_STATE_STANDBY:
- ret = core_alua_state_standby(cmd, cdb, &alua_ascq);
+ rc = core_alua_state_standby(cmd, cdb);
break;
case ALUA_ACCESS_STATE_UNAVAILABLE:
- ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq);
+ rc = core_alua_state_unavailable(cmd, cdb);
break;
case ALUA_ACCESS_STATE_TRANSITION:
- ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
+ rc = core_alua_state_transition(cmd, cdb);
+ break;
+ case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+ rc = core_alua_state_lba_dependent(cmd, tg_pt_gp_id);
break;
/*
* OFFLINE is a secondary ALUA target port group access state, that is
- * handled above with struct se_port->sep_tg_pt_secondary_offline=1
+ * handled above with struct se_lun->lun_tg_pt_secondary_offline=1
*/
case ALUA_ACCESS_STATE_OFFLINE:
default:
pr_err("Unknown ALUA access state: 0x%02x\n",
out_alua_state);
- return TCM_INVALID_CDB_FIELD;
+ rc = TCM_INVALID_CDB_FIELD;
}
-out:
- if (ret > 0) {
- /*
- * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
- * The ALUA additional sense code qualifier (ASCQ) is determined
- * by the ALUA primary or secondary access state..
- */
+ if (rc && rc != TCM_INVALID_CDB_FIELD) {
pr_debug("[%s]: ALUA TG Port not available, "
- "SenseKey: NOT_READY, ASC/ASCQ: "
- "0x04/0x%02x\n",
- cmd->se_tfo->get_fabric_name(), alua_ascq);
-
- cmd->scsi_asc = 0x04;
- cmd->scsi_ascq = alua_ascq;
- return TCM_CHECK_CONDITION_NOT_READY;
+ "SenseKey: NOT_READY, ASC/rc: 0x04/%d\n",
+ cmd->se_tfo->fabric_name, rc);
}
- return 0;
+ return rc;
}
/*
- * Check implict and explict ALUA state change request.
+ * Check implicit and explicit ALUA state change request.
*/
static sense_reason_t
-core_alua_check_transition(int state, int *primary)
+core_alua_check_transition(int state, int valid, int *primary, int explicit)
{
+ /*
+ * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
+ * defined as primary target port asymmetric access states.
+ */
switch (state) {
- case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+ case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
+ if (!(valid & ALUA_AO_SUP))
+ goto not_supported;
+ *primary = 1;
+ break;
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+ if (!(valid & ALUA_AN_SUP))
+ goto not_supported;
+ *primary = 1;
+ break;
case ALUA_ACCESS_STATE_STANDBY:
+ if (!(valid & ALUA_S_SUP))
+ goto not_supported;
+ *primary = 1;
+ break;
case ALUA_ACCESS_STATE_UNAVAILABLE:
- /*
- * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
- * defined as primary target port asymmetric access states.
- */
+ if (!(valid & ALUA_U_SUP))
+ goto not_supported;
+ *primary = 1;
+ break;
+ case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+ if (!(valid & ALUA_LBD_SUP))
+ goto not_supported;
*primary = 1;
break;
case ALUA_ACCESS_STATE_OFFLINE:
@@ -638,6 +762,17 @@ core_alua_check_transition(int state, int *primary)
* OFFLINE state is defined as a secondary target port
* asymmetric access state.
*/
+ if (!(valid & ALUA_O_SUP))
+ goto not_supported;
+ *primary = 0;
+ break;
+ case ALUA_ACCESS_STATE_TRANSITION:
+ if (!(valid & ALUA_T_SUP) || explicit)
+ /*
+ * Transitioning is set internally and by tcmu daemon,
+ * and cannot be selected through a STPG.
+ */
+ goto not_supported;
*primary = 0;
break;
default:
@@ -646,21 +781,30 @@ core_alua_check_transition(int state, int *primary)
}
return 0;
+
+not_supported:
+ pr_err("ALUA access state %s not supported",
+ core_alua_dump_state(state));
+ return TCM_INVALID_PARAMETER_LIST;
}
static char *core_alua_dump_state(int state)
{
switch (state) {
- case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
+ case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
return "Active/Optimized";
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
return "Active/NonOptimized";
+ case ALUA_ACCESS_STATE_LBA_DEPENDENT:
+ return "LBA Dependent";
case ALUA_ACCESS_STATE_STANDBY:
return "Standby";
case ALUA_ACCESS_STATE_UNAVAILABLE:
return "Unavailable";
case ALUA_ACCESS_STATE_OFFLINE:
return "Offline";
+ case ALUA_ACCESS_STATE_TRANSITION:
+ return "Transitioning";
default:
return "Unknown";
}
@@ -673,10 +817,10 @@ char *core_alua_dump_status(int status)
switch (status) {
case ALUA_STATUS_NONE:
return "None";
- case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
- return "Altered by Explict STPG";
- case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
- return "Altered by Implict ALUA";
+ case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
+ return "Altered by Explicit STPG";
+ case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
+ return "Altered by Implicit ALUA";
default:
return "Unknown";
}
@@ -693,8 +837,6 @@ int core_alua_check_nonop_delay(
{
if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
return 0;
- if (in_interrupt())
- return 0;
/*
* The ALUA Active/NonOptimized access state delay can be disabled
* in via configfs with a value of zero
@@ -708,98 +850,78 @@ int core_alua_check_nonop_delay(
msleep_interruptible(cmd->alua_nonop_delay);
return 0;
}
-EXPORT_SYMBOL(core_alua_check_nonop_delay);
-/*
- * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
- *
- */
static int core_alua_write_tpg_metadata(
const char *path,
unsigned char *md_buf,
u32 md_buf_len)
{
struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
+ loff_t pos = 0;
int ret;
if (IS_ERR(file)) {
pr_err("filp_open(%s) for ALUA metadata failed\n", path);
return -ENODEV;
}
- ret = kernel_write(file, md_buf, md_buf_len, 0);
+ ret = kernel_write(file, md_buf, md_buf_len, &pos);
if (ret < 0)
pr_err("Error writing ALUA metadata file: %s\n", path);
fput(file);
- return ret ? -EIO : 0;
+ return (ret < 0) ? -EIO : 0;
}
-/*
- * Called with tg_pt_gp->tg_pt_gp_md_mutex held
- */
static int core_alua_update_tpg_primary_metadata(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- int primary_state,
- unsigned char *md_buf)
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
{
+ unsigned char *md_buf;
struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
- char path[ALUA_METADATA_PATH_LEN];
- int len;
+ char *path;
+ int len, rc;
- memset(path, 0, ALUA_METADATA_PATH_LEN);
+ lockdep_assert_held(&tg_pt_gp->tg_pt_gp_transition_mutex);
+
+ md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
+ if (!md_buf) {
+ pr_err("Unable to allocate buf for ALUA metadata\n");
+ return -ENOMEM;
+ }
- len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
+ len = snprintf(md_buf, ALUA_MD_BUF_LEN,
"tg_pt_gp_id=%hu\n"
"alua_access_state=0x%02x\n"
"alua_access_status=0x%02x\n",
- tg_pt_gp->tg_pt_gp_id, primary_state,
+ tg_pt_gp->tg_pt_gp_id,
+ tg_pt_gp->tg_pt_gp_alua_access_state,
tg_pt_gp->tg_pt_gp_alua_access_status);
- snprintf(path, ALUA_METADATA_PATH_LEN,
- "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
- config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
-
- return core_alua_write_tpg_metadata(path, md_buf, len);
+ rc = -ENOMEM;
+ path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
+ &wwn->unit_serial[0],
+ config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
+ if (path) {
+ rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ kfree(path);
+ }
+ kfree(md_buf);
+ return rc;
}
-static int core_alua_do_transition_tg_pt(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- struct se_port *l_port,
- struct se_node_acl *nacl,
- unsigned char *md_buf,
- int new_state,
- int explict)
+static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
{
struct se_dev_entry *se_deve;
+ struct se_lun *lun;
struct se_lun_acl *lacl;
- struct se_port *port;
- struct t10_alua_tg_pt_gp_member *mem;
- int old_state = 0;
- /*
- * Save the old primary ALUA access state, and set the current state
- * to ALUA_ACCESS_STATE_TRANSITION.
- */
- old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
- atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
- ALUA_ACCESS_STATE_TRANSITION);
- tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
- ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
- ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
- /*
- * Check for the optional ALUA primary state transition delay
- */
- if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
- msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
- list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
- tg_pt_gp_mem_list) {
- port = mem->tg_pt;
+ list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
+ lun_tg_pt_gp_link) {
/*
* After an implicit target port asymmetric access state
* change, a device server shall establish a unit attention
* condition for the initiator port associated with every I_T
* nexus with the additional sense code set to ASYMMETRIC
- * ACCESS STATE CHAGED.
+ * ACCESS STATE CHANGED.
*
* After an explicit target port asymmetric access state
* change, a device server shall establish a unit attention
@@ -808,37 +930,96 @@ static int core_alua_do_transition_tg_pt(
* every I_T nexus other than the I_T nexus on which the SET
* TARGET PORT GROUPS command
*/
- atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
- smp_mb__after_atomic_inc();
+ if (!percpu_ref_tryget_live(&lun->lun_ref))
+ continue;
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
- spin_lock_bh(&port->sep_alua_lock);
- list_for_each_entry(se_deve, &port->sep_alua_list,
- alua_port_list) {
+ spin_lock(&lun->lun_deve_lock);
+ list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) {
lacl = se_deve->se_lun_acl;
+
/*
- * se_deve->se_lun_acl pointer may be NULL for a
- * entry created without explict Node+MappedLUN ACLs
+ * spc4r37 p.242:
+ * After an explicit target port asymmetric access
+ * state change, a device server shall establish a
+ * unit attention condition with the additional sense
+ * code set to ASYMMETRIC ACCESS STATE CHANGED for
+ * the initiator port associated with every I_T nexus
+ * other than the I_T nexus on which the SET TARGET
+ * PORT GROUPS command was received.
*/
- if (!lacl)
+ if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
+ (tg_pt_gp->tg_pt_gp_alua_lun != NULL) &&
+ (tg_pt_gp->tg_pt_gp_alua_lun == lun))
continue;
- if (explict &&
- (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
- (l_port != NULL) && (l_port == port))
+ /*
+ * se_deve->se_lun_acl pointer may be NULL for a
+ * entry created without explicit Node+MappedLUN ACLs
+ */
+ if (lacl && (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
+ (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl))
continue;
- core_scsi3_ua_allocate(lacl->se_lun_nacl,
- se_deve->mapped_lun, 0x2A,
+ core_scsi3_ua_allocate(se_deve, 0x2A,
ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
}
- spin_unlock_bh(&port->sep_alua_lock);
+ spin_unlock(&lun->lun_deve_lock);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
- atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
- smp_mb__after_atomic_dec();
+ percpu_ref_put(&lun->lun_ref);
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+}
+
+static int core_alua_do_transition_tg_pt(
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
+ int new_state,
+ int explicit)
+{
+ int prev_state;
+
+ mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
+ /* Nothing to be done here */
+ if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
+ mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
+ return 0;
+ }
+
+ if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
+ mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
+ return -EAGAIN;
+ }
+
+ /*
+ * Save the old primary ALUA access state, and set the current state
+ * to ALUA_ACCESS_STATE_TRANSITION.
+ */
+ prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
+ tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
+ tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
+ ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
+
+ core_alua_queue_state_change_ua(tg_pt_gp);
+
+ if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
+ mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
+ return 0;
+ }
+
+ /*
+ * Check for the optional ALUA primary state transition delay
+ */
+ if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
+ msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
+
+ /*
+ * Set the current primary ALUA access state to the requested new state
+ */
+ tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
+
/*
* Update the ALUA metadata buf that has been allocated in
* core_alua_do_port_transition(), this metadata will be written
@@ -852,56 +1033,48 @@ static int core_alua_do_transition_tg_pt(
* struct file does NOT affect the actual ALUA transition.
*/
if (tg_pt_gp->tg_pt_gp_write_metadata) {
- mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
- core_alua_update_tpg_primary_metadata(tg_pt_gp,
- new_state, md_buf);
- mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
+ core_alua_update_tpg_primary_metadata(tg_pt_gp);
}
- /*
- * Set the current primary ALUA access state to the requested new state
- */
- atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
- " from primary access state %s to %s\n", (explict) ? "explict" :
- "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
- tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
+ " from primary access state %s to %s\n", (explicit) ? "explicit" :
+ "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+ tg_pt_gp->tg_pt_gp_id,
+ core_alua_dump_state(prev_state),
core_alua_dump_state(new_state));
+ core_alua_queue_state_change_ua(tg_pt_gp);
+
+ mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
return 0;
}
int core_alua_do_port_transition(
struct t10_alua_tg_pt_gp *l_tg_pt_gp,
struct se_device *l_dev,
- struct se_port *l_port,
+ struct se_lun *l_lun,
struct se_node_acl *l_nacl,
int new_state,
- int explict)
+ int explicit)
{
struct se_device *dev;
- struct se_port *port;
- struct se_node_acl *nacl;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
struct t10_alua_tg_pt_gp *tg_pt_gp;
- unsigned char *md_buf;
- int primary;
+ int primary, valid_states, rc = 0;
- if (core_alua_check_transition(new_state, &primary) != 0)
- return -EINVAL;
+ if (l_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
+ return -ENODEV;
- md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
- if (!md_buf) {
- pr_err("Unable to allocate buf for ALUA metadata\n");
- return -ENOMEM;
- }
+ valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
+ if (core_alua_check_transition(new_state, valid_states, &primary,
+ explicit) != 0)
+ return -EINVAL;
local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
lu_gp = local_lu_gp_mem->lu_gp;
atomic_inc(&lu_gp->lu_gp_ref_cnt);
- smp_mb__after_atomic_inc();
spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
/*
* For storage objects that are members of the 'default_lu_gp',
@@ -913,12 +1086,12 @@ int core_alua_do_port_transition(
* core_alua_do_transition_tg_pt() will always return
* success.
*/
- core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
- md_buf, new_state, explict);
- atomic_dec(&lu_gp->lu_gp_ref_cnt);
- smp_mb__after_atomic_dec();
- kfree(md_buf);
- return 0;
+ l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
+ l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
+ rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
+ new_state, explicit);
+ atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
+ return rc;
}
/*
* For all other LU groups aside from 'default_lu_gp', walk all of
@@ -930,8 +1103,7 @@ int core_alua_do_port_transition(
lu_gp_mem_list) {
dev = lu_gp_mem->lu_gp_mem_dev;
- atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
- smp_mb__after_atomic_inc();
+ atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
spin_unlock(&lu_gp->lu_gp_lock);
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
@@ -943,7 +1115,7 @@ int core_alua_do_port_transition(
continue;
/*
* If the target behavior port asymmetric access state
- * is changed for any target port group accessiable via
+ * is changed for any target port group accessible via
* a logical unit within a LU group, the target port
* behavior group asymmetric access states for the same
* target port group accessible via other logical units
@@ -953,96 +1125,105 @@ int core_alua_do_port_transition(
continue;
if (l_tg_pt_gp == tg_pt_gp) {
- port = l_port;
- nacl = l_nacl;
+ tg_pt_gp->tg_pt_gp_alua_lun = l_lun;
+ tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
} else {
- port = NULL;
- nacl = NULL;
+ tg_pt_gp->tg_pt_gp_alua_lun = NULL;
+ tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
}
- atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
- smp_mb__after_atomic_inc();
+ atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/*
* core_alua_do_transition_tg_pt() will always return
* success.
*/
- core_alua_do_transition_tg_pt(tg_pt_gp, port,
- nacl, md_buf, new_state, explict);
+ rc = core_alua_do_transition_tg_pt(tg_pt_gp,
+ new_state, explicit);
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
- atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
- smp_mb__after_atomic_dec();
+ atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
+ if (rc)
+ break;
}
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
spin_lock(&lu_gp->lu_gp_lock);
- atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
- smp_mb__after_atomic_dec();
+ atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
}
spin_unlock(&lu_gp->lu_gp_lock);
- pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
- " Group IDs: %hu %s transition to primary state: %s\n",
- config_item_name(&lu_gp->lu_gp_group.cg_item),
- l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
- core_alua_dump_state(new_state));
+ if (!rc) {
+ pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
+ " Group IDs: %hu %s transition to primary state: %s\n",
+ config_item_name(&lu_gp->lu_gp_group.cg_item),
+ l_tg_pt_gp->tg_pt_gp_id,
+ (explicit) ? "explicit" : "implicit",
+ core_alua_dump_state(new_state));
+ }
- atomic_dec(&lu_gp->lu_gp_ref_cnt);
- smp_mb__after_atomic_dec();
- kfree(md_buf);
- return 0;
+ atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
+ return rc;
}
-/*
- * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
- */
-static int core_alua_update_tpg_secondary_metadata(
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
- struct se_port *port,
- unsigned char *md_buf,
- u32 md_buf_len)
+static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
{
- struct se_portal_group *se_tpg = port->sep_tpg;
- char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
- int len;
-
- memset(path, 0, ALUA_METADATA_PATH_LEN);
- memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
+ struct se_portal_group *se_tpg = lun->lun_tpg;
+ unsigned char *md_buf;
+ char *path;
+ int len, rc;
- len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
- se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
+ mutex_lock(&lun->lun_tg_pt_md_mutex);
- if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
- snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
- se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
+ md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
+ if (!md_buf) {
+ pr_err("Unable to allocate buf for ALUA metadata\n");
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
- len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
+ len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
"alua_tg_pt_status=0x%02x\n",
- atomic_read(&port->sep_tg_pt_secondary_offline),
- port->sep_tg_pt_secondary_stat);
-
- snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
- se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
- port->sep_lun->unpacked_lun);
-
- return core_alua_write_tpg_metadata(path, md_buf, len);
+ atomic_read(&lun->lun_tg_pt_secondary_offline),
+ lun->lun_tg_pt_secondary_stat);
+
+ if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
+ path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
+ db_root, se_tpg->se_tpg_tfo->fabric_name,
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
+ lun->unpacked_lun);
+ } else {
+ path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
+ db_root, se_tpg->se_tpg_tfo->fabric_name,
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+ lun->unpacked_lun);
+ }
+ if (!path) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
+
+ rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ kfree(path);
+out_free:
+ kfree(md_buf);
+out_unlock:
+ mutex_unlock(&lun->lun_tg_pt_md_mutex);
+ return rc;
}
static int core_alua_set_tg_pt_secondary_state(
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
- struct se_port *port,
- int explict,
+ struct se_lun *lun,
+ int explicit,
int offline)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
- unsigned char *md_buf;
- u32 md_buf_len;
int trans_delay_msecs;
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ rcu_read_lock();
+ tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
if (!tg_pt_gp) {
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ rcu_read_unlock();
pr_err("Unable to complete secondary state"
" transition\n");
return -EINVAL;
@@ -1050,24 +1231,23 @@ static int core_alua_set_tg_pt_secondary_state(
trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
/*
* Set the secondary ALUA target port access state to OFFLINE
- * or release the previously secondary state for struct se_port
+ * or release the previously secondary state for struct se_lun
*/
if (offline)
- atomic_set(&port->sep_tg_pt_secondary_offline, 1);
+ atomic_set(&lun->lun_tg_pt_secondary_offline, 1);
else
- atomic_set(&port->sep_tg_pt_secondary_offline, 0);
+ atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
- md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
- port->sep_tg_pt_secondary_stat = (explict) ?
- ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
- ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
+ lun->lun_tg_pt_secondary_stat = (explicit) ?
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
+ ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
- " to secondary access state: %s\n", (explict) ? "explict" :
- "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+ " to secondary access state: %s\n", (explicit) ? "explicit" :
+ "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ rcu_read_unlock();
/*
* Do the optional transition delay after we set the secondary
* ALUA access state.
@@ -1078,24 +1258,113 @@ static int core_alua_set_tg_pt_secondary_state(
* See if we need to update the ALUA fabric port metadata for
* secondary state and status
*/
- if (port->sep_tg_pt_secondary_write_md) {
- md_buf = kzalloc(md_buf_len, GFP_KERNEL);
- if (!md_buf) {
- pr_err("Unable to allocate md_buf for"
- " secondary ALUA access metadata\n");
- return -ENOMEM;
+ if (lun->lun_tg_pt_secondary_write_md)
+ core_alua_update_tpg_secondary_metadata(lun);
+
+ return 0;
+}
+
+struct t10_alua_lba_map *
+core_alua_allocate_lba_map(struct list_head *list,
+ u64 first_lba, u64 last_lba)
+{
+ struct t10_alua_lba_map *lba_map;
+
+ lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
+ if (!lba_map) {
+ pr_err("Unable to allocate struct t10_alua_lba_map\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
+ lba_map->lba_map_first_lba = first_lba;
+ lba_map->lba_map_last_lba = last_lba;
+
+ list_add_tail(&lba_map->lba_map_list, list);
+ return lba_map;
+}
+
+int
+core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
+ int pg_id, int state)
+{
+ struct t10_alua_lba_map_member *lba_map_mem;
+
+ list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
+ lba_map_mem_list) {
+ if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
+ pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
+ return -EINVAL;
}
- mutex_lock(&port->sep_tg_pt_md_mutex);
- core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
- md_buf, md_buf_len);
- mutex_unlock(&port->sep_tg_pt_md_mutex);
+ }
- kfree(md_buf);
+ lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
+ if (!lba_map_mem) {
+ pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
+ return -ENOMEM;
}
+ lba_map_mem->lba_map_mem_alua_state = state;
+ lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
+ list_add_tail(&lba_map_mem->lba_map_mem_list,
+ &lba_map->lba_map_mem_list);
return 0;
}
+void
+core_alua_free_lba_map(struct list_head *lba_list)
+{
+ struct t10_alua_lba_map *lba_map, *lba_map_tmp;
+ struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
+
+ list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
+ lba_map_list) {
+ list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
+ &lba_map->lba_map_mem_list,
+ lba_map_mem_list) {
+ list_del(&lba_map_mem->lba_map_mem_list);
+ kmem_cache_free(t10_alua_lba_map_mem_cache,
+ lba_map_mem);
+ }
+ list_del(&lba_map->lba_map_list);
+ kmem_cache_free(t10_alua_lba_map_cache, lba_map);
+ }
+}
+
+void
+core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
+ int segment_size, int segment_mult)
+{
+ struct list_head old_lba_map_list;
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+ int activate = 0, supported;
+
+ INIT_LIST_HEAD(&old_lba_map_list);
+ spin_lock(&dev->t10_alua.lba_map_lock);
+ dev->t10_alua.lba_map_segment_size = segment_size;
+ dev->t10_alua.lba_map_segment_multiplier = segment_mult;
+ list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
+ if (lba_map_list) {
+ list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
+ activate = 1;
+ }
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
+ tg_pt_gp_list) {
+
+ if (!tg_pt_gp->tg_pt_gp_valid_id)
+ continue;
+ supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
+ if (activate)
+ supported |= ALUA_LBD_SUP;
+ else
+ supported &= ~ALUA_LBD_SUP;
+ tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
+ }
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+ core_alua_free_lba_map(&old_lba_map_list);
+}
+
struct t10_alua_lu_gp *
core_alua_allocate_lu_gp(const char *name, int def_group)
{
@@ -1229,7 +1498,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
* struct se_device is released via core_alua_free_lu_gp_mem().
*
* If the passed lu_gp does NOT match the default_lu_gp, assume
- * we want to re-assocate a given lu_gp_mem with default_lu_gp.
+ * we want to re-associate a given lu_gp_mem with default_lu_gp.
*/
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
if (lu_gp != default_lu_gp)
@@ -1344,25 +1613,31 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
return NULL;
}
INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
- INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
- mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
+ INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
+ mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
tg_pt_gp->tg_pt_gp_dev = dev;
- tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
- atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
- ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
+ tg_pt_gp->tg_pt_gp_alua_access_state =
+ ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
/*
- * Enable both explict and implict ALUA support by default
+ * Enable both explicit and implicit ALUA support by default
*/
tg_pt_gp->tg_pt_gp_alua_access_type =
- TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
+ TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
/*
* Set the default Active/NonOptimized Delay in milliseconds
*/
tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
- tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS;
+ tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
+
+ /*
+ * Enable all supported states
+ */
+ tg_pt_gp->tg_pt_gp_alua_supported_states =
+ ALUA_T_SUP | ALUA_O_SUP |
+ ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
if (def_group) {
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
@@ -1400,7 +1675,6 @@ int core_alua_set_tg_pt_gp_id(
pr_err("Maximum ALUA alua_tg_pt_gps_count:"
" 0x0000ffff reached\n");
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
- kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
return -ENOSPC;
}
again:
@@ -1430,44 +1704,25 @@ again:
return 0;
}
-struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
- struct se_port *port)
-{
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
-
- tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
- GFP_KERNEL);
- if (!tg_pt_gp_mem) {
- pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
- return ERR_PTR(-ENOMEM);
- }
- INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
- spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
-
- tg_pt_gp_mem->tg_pt = port;
- port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
-
- return tg_pt_gp_mem;
-}
-
void core_alua_free_tg_pt_gp(
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
+ struct se_lun *lun, *next;
/*
* Once we have reached this point, config_item_put() has already
* been called from target_core_alua_drop_tg_pt_gp().
*
* Here we remove *tg_pt_gp from the global list so that
- * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
+ * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
* can be made while we are releasing struct t10_alua_tg_pt_gp.
*/
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
- list_del(&tg_pt_gp->tg_pt_gp_list);
- dev->t10_alua.alua_tg_pt_gps_counter--;
+ if (tg_pt_gp->tg_pt_gp_valid_id) {
+ list_del(&tg_pt_gp->tg_pt_gp_list);
+ dev->t10_alua.alua_tg_pt_gps_count--;
+ }
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
/*
@@ -1484,67 +1739,33 @@ void core_alua_free_tg_pt_gp(
* struct se_port.
*/
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
- list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
- &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
- if (tg_pt_gp_mem->tg_pt_gp_assoc) {
- list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
- tg_pt_gp->tg_pt_gp_members--;
- tg_pt_gp_mem->tg_pt_gp_assoc = 0;
- }
+ list_for_each_entry_safe(lun, next,
+ &tg_pt_gp->tg_pt_gp_lun_list, lun_tg_pt_gp_link) {
+ list_del_init(&lun->lun_tg_pt_gp_link);
+ tg_pt_gp->tg_pt_gp_members--;
+
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
/*
- * tg_pt_gp_mem is associated with a single
- * se_port->sep_alua_tg_pt_gp_mem, and is released via
- * core_alua_free_tg_pt_gp_mem().
- *
* If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
- * assume we want to re-assocate a given tg_pt_gp_mem with
+ * assume we want to re-associate a given tg_pt_gp_mem with
* default_tg_pt_gp.
*/
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ spin_lock(&lun->lun_tg_pt_gp_lock);
if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
- __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+ __target_attach_tg_pt_gp(lun,
dev->t10_alua.default_tg_pt_gp);
} else
- tg_pt_gp_mem->tg_pt_gp = NULL;
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ rcu_assign_pointer(lun->lun_tg_pt_gp, NULL);
+ spin_unlock(&lun->lun_tg_pt_gp_lock);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
+ synchronize_rcu();
kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
}
-void core_alua_free_tg_pt_gp_mem(struct se_port *port)
-{
- struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
-
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!tg_pt_gp_mem)
- return;
-
- while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
- cpu_relax();
-
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if (tg_pt_gp) {
- spin_lock(&tg_pt_gp->tg_pt_gp_lock);
- if (tg_pt_gp_mem->tg_pt_gp_assoc) {
- list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
- tg_pt_gp->tg_pt_gp_members--;
- tg_pt_gp_mem->tg_pt_gp_assoc = 0;
- }
- spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
- tg_pt_gp_mem->tg_pt_gp = NULL;
- }
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-
- kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
-}
-
static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
struct se_device *dev, const char *name)
{
@@ -1578,50 +1799,79 @@ static void core_alua_put_tg_pt_gp_from_name(
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
}
-/*
- * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
- */
-void __core_alua_attach_tg_pt_gp_mem(
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
- struct t10_alua_tg_pt_gp *tg_pt_gp)
+static void __target_attach_tg_pt_gp(struct se_lun *lun,
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
{
+ struct se_dev_entry *se_deve;
+
+ assert_spin_locked(&lun->lun_tg_pt_gp_lock);
+
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
- tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
- tg_pt_gp_mem->tg_pt_gp_assoc = 1;
- list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
- &tg_pt_gp->tg_pt_gp_mem_list);
+ rcu_assign_pointer(lun->lun_tg_pt_gp, tg_pt_gp);
+ list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list);
tg_pt_gp->tg_pt_gp_members++;
+ spin_lock(&lun->lun_deve_lock);
+ list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
+ core_scsi3_ua_allocate(se_deve, 0x3f,
+ ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED);
+ spin_unlock(&lun->lun_deve_lock);
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
}
-/*
- * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
- */
-static void __core_alua_drop_tg_pt_gp_mem(
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
- struct t10_alua_tg_pt_gp *tg_pt_gp)
+void target_attach_tg_pt_gp(struct se_lun *lun,
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
+{
+ spin_lock(&lun->lun_tg_pt_gp_lock);
+ __target_attach_tg_pt_gp(lun, tg_pt_gp);
+ spin_unlock(&lun->lun_tg_pt_gp_lock);
+ synchronize_rcu();
+}
+
+static void __target_detach_tg_pt_gp(struct se_lun *lun,
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
{
+ assert_spin_locked(&lun->lun_tg_pt_gp_lock);
+
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
- list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
- tg_pt_gp_mem->tg_pt_gp = NULL;
- tg_pt_gp_mem->tg_pt_gp_assoc = 0;
+ list_del_init(&lun->lun_tg_pt_gp_link);
tg_pt_gp->tg_pt_gp_members--;
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
}
-ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
+void target_detach_tg_pt_gp(struct se_lun *lun)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
+
+ spin_lock(&lun->lun_tg_pt_gp_lock);
+ tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp,
+ lockdep_is_held(&lun->lun_tg_pt_gp_lock));
+ if (tg_pt_gp) {
+ __target_detach_tg_pt_gp(lun, tg_pt_gp);
+ rcu_assign_pointer(lun->lun_tg_pt_gp, NULL);
+ }
+ spin_unlock(&lun->lun_tg_pt_gp_lock);
+ synchronize_rcu();
+}
+
+static void target_swap_tg_pt_gp(struct se_lun *lun,
+ struct t10_alua_tg_pt_gp *old_tg_pt_gp,
+ struct t10_alua_tg_pt_gp *new_tg_pt_gp)
+{
+ assert_spin_locked(&lun->lun_tg_pt_gp_lock);
+
+ if (old_tg_pt_gp)
+ __target_detach_tg_pt_gp(lun, old_tg_pt_gp);
+ __target_attach_tg_pt_gp(lun, new_tg_pt_gp);
+}
+
+ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
{
struct config_item *tg_pt_ci;
struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
ssize_t len = 0;
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!tg_pt_gp_mem)
- return len;
-
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ rcu_read_lock();
+ tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
if (tg_pt_gp) {
tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
@@ -1629,38 +1879,37 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
"Primary Access Status: %s\nTG Port Secondary Access"
" State: %s\nTG Port Secondary Access Status: %s\n",
config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
- core_alua_dump_state(atomic_read(
- &tg_pt_gp->tg_pt_gp_alua_access_state)),
+ core_alua_dump_state(
+ tg_pt_gp->tg_pt_gp_alua_access_state),
core_alua_dump_status(
tg_pt_gp->tg_pt_gp_alua_access_status),
- (atomic_read(&port->sep_tg_pt_secondary_offline)) ?
+ atomic_read(&lun->lun_tg_pt_secondary_offline) ?
"Offline" : "None",
- core_alua_dump_status(port->sep_tg_pt_secondary_stat));
+ core_alua_dump_status(lun->lun_tg_pt_secondary_stat));
}
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ rcu_read_unlock();
return len;
}
ssize_t core_alua_store_tg_pt_gp_info(
- struct se_port *port,
+ struct se_lun *lun,
const char *page,
size_t count)
{
- struct se_portal_group *tpg;
- struct se_lun *lun;
- struct se_device *dev = port->sep_lun->lun_se_dev;
+ struct se_portal_group *tpg = lun->lun_tpg;
+ /*
+ * rcu_dereference_raw protected by se_lun->lun_group symlink
+ * reference to se_device->dev_group.
+ */
+ struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char buf[TG_PT_GROUP_NAME_BUF];
int move = 0;
- tpg = port->sep_tpg;
- lun = port->sep_lun;
-
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!tg_pt_gp_mem)
- return 0;
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
+ (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
+ return -ENODEV;
if (count > TG_PT_GROUP_NAME_BUF) {
pr_err("ALUA Target Port Group alias too large!\n");
@@ -1684,8 +1933,9 @@ ssize_t core_alua_store_tg_pt_gp_info(
return -ENODEV;
}
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ spin_lock(&lun->lun_tg_pt_gp_lock);
+ tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp,
+ lockdep_is_held(&lun->lun_tg_pt_gp_lock));
if (tg_pt_gp) {
/*
* Clearing an existing tg_pt_gp association, and replacing
@@ -1703,24 +1953,17 @@ ssize_t core_alua_store_tg_pt_gp_info(
&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id);
- __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
- __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
+ target_swap_tg_pt_gp(lun, tg_pt_gp,
dev->t10_alua.default_tg_pt_gp);
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ spin_unlock(&lun->lun_tg_pt_gp_lock);
- return count;
+ goto sync_rcu;
}
- /*
- * Removing existing association of tg_pt_gp_mem with tg_pt_gp
- */
- __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
move = 1;
}
- /*
- * Associate tg_pt_gp_mem with tg_pt_gp_new.
- */
- __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+
+ target_swap_tg_pt_gp(lun, tg_pt_gp, tg_pt_gp_new);
+ spin_unlock(&lun->lun_tg_pt_gp_lock);
pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
" Target Port Group: alua/%s, ID: %hu\n", (move) ?
"Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
@@ -1730,6 +1973,8 @@ ssize_t core_alua_store_tg_pt_gp_info(
tg_pt_gp_new->tg_pt_gp_id);
core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
+sync_rcu:
+ synchronize_rcu();
return count;
}
@@ -1737,13 +1982,13 @@ ssize_t core_alua_show_access_type(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
- if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
- (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
- return sprintf(page, "Implict and Explict\n");
- else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
- return sprintf(page, "Implict\n");
- else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
- return sprintf(page, "Explict\n");
+ if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
+ (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
+ return sprintf(page, "Implicit and Explicit\n");
+ else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
+ return sprintf(page, "Implicit\n");
+ else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
+ return sprintf(page, "Explicit\n");
else
return sprintf(page, "None\n");
}
@@ -1756,10 +2001,10 @@ ssize_t core_alua_store_access_type(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_access_type\n");
- return -EINVAL;
+ return ret;
}
if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
pr_err("Illegal value for alua_access_type:"
@@ -1768,11 +2013,11 @@ ssize_t core_alua_store_access_type(
}
if (tmp == 3)
tg_pt_gp->tg_pt_gp_alua_access_type =
- TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
+ TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
else if (tmp == 2)
- tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
+ tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
else if (tmp == 1)
- tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
+ tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
else
tg_pt_gp->tg_pt_gp_alua_access_type = 0;
@@ -1794,10 +2039,10 @@ ssize_t core_alua_store_nonop_delay_msecs(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract nonop_delay_msecs\n");
- return -EINVAL;
+ return ret;
}
if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
pr_err("Passed nonop_delay_msecs: %lu, exceeds"
@@ -1825,10 +2070,10 @@ ssize_t core_alua_store_trans_delay_msecs(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract trans_delay_msecs\n");
- return -EINVAL;
+ return ret;
}
if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
pr_err("Passed trans_delay_msecs: %lu, exceeds"
@@ -1841,14 +2086,14 @@ ssize_t core_alua_store_trans_delay_msecs(
return count;
}
-ssize_t core_alua_show_implict_trans_secs(
+ssize_t core_alua_show_implicit_trans_secs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
- return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implict_trans_secs);
+ return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
}
-ssize_t core_alua_store_implict_trans_secs(
+ssize_t core_alua_store_implicit_trans_secs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
@@ -1856,18 +2101,18 @@ ssize_t core_alua_store_implict_trans_secs(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
- pr_err("Unable to extract implict_trans_secs\n");
- return -EINVAL;
+ pr_err("Unable to extract implicit_trans_secs\n");
+ return ret;
}
- if (tmp > ALUA_MAX_IMPLICT_TRANS_SECS) {
- pr_err("Passed implict_trans_secs: %lu, exceeds"
- " ALUA_MAX_IMPLICT_TRANS_SECS: %d\n", tmp,
- ALUA_MAX_IMPLICT_TRANS_SECS);
+ if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
+ pr_err("Passed implicit_trans_secs: %lu, exceeds"
+ " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
+ ALUA_MAX_IMPLICIT_TRANS_SECS);
return -EINVAL;
}
- tg_pt_gp->tg_pt_gp_implict_trans_secs = (int)tmp;
+ tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
return count;
}
@@ -1887,10 +2132,10 @@ ssize_t core_alua_store_preferred_bit(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract preferred ALUA value\n");
- return -EINVAL;
+ return ret;
}
if ((tmp != 0) && (tmp != 1)) {
pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
@@ -1903,11 +2148,8 @@ ssize_t core_alua_store_preferred_bit(
ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
{
- if (!lun->lun_sep)
- return -ENODEV;
-
return sprintf(page, "%d\n",
- atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
+ atomic_read(&lun->lun_tg_pt_secondary_offline));
}
ssize_t core_alua_store_offline_bit(
@@ -1915,31 +2157,30 @@ ssize_t core_alua_store_offline_bit(
const char *page,
size_t count)
{
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
+ /*
+ * rcu_dereference_raw protected by se_lun->lun_group symlink
+ * reference to se_device->dev_group.
+ */
+ struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
unsigned long tmp;
int ret;
- if (!lun->lun_sep)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
+ (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
return -ENODEV;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_tg_pt_offline value\n");
- return -EINVAL;
+ return ret;
}
if ((tmp != 0) && (tmp != 1)) {
pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
tmp);
return -EINVAL;
}
- tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
- if (!tg_pt_gp_mem) {
- pr_err("Unable to locate *tg_pt_gp_mem\n");
- return -EINVAL;
- }
- ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
- lun->lun_sep, 0, (int)tmp);
+ ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int)tmp);
if (ret < 0)
return -EINVAL;
@@ -1950,7 +2191,7 @@ ssize_t core_alua_show_secondary_status(
struct se_lun *lun,
char *page)
{
- return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
+ return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat);
}
ssize_t core_alua_store_secondary_status(
@@ -1961,19 +2202,19 @@ ssize_t core_alua_store_secondary_status(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_tg_pt_status\n");
- return -EINVAL;
+ return ret;
}
if ((tmp != ALUA_STATUS_NONE) &&
- (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
- (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+ (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
+ (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
pr_err("Illegal value for alua_tg_pt_status: %lu\n",
tmp);
return -EINVAL;
}
- lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
+ lun->lun_tg_pt_secondary_stat = (int)tmp;
return count;
}
@@ -1982,8 +2223,7 @@ ssize_t core_alua_show_secondary_write_metadata(
struct se_lun *lun,
char *page)
{
- return sprintf(page, "%d\n",
- lun->lun_sep->sep_tg_pt_secondary_write_md);
+ return sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md);
}
ssize_t core_alua_store_secondary_write_metadata(
@@ -1994,24 +2234,25 @@ ssize_t core_alua_store_secondary_write_metadata(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_tg_pt_write_md\n");
- return -EINVAL;
+ return ret;
}
if ((tmp != 0) && (tmp != 1)) {
pr_err("Illegal value for alua_tg_pt_write_md:"
" %lu\n", tmp);
return -EINVAL;
}
- lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
+ lun->lun_tg_pt_secondary_write_md = (int)tmp;
return count;
}
int core_setup_alua(struct se_device *dev)
{
- if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
+ if (!(dev->transport_flags &
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
struct t10_alua_lu_gp_member *lu_gp_mem;
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index e539c3e7f4ad..fc9637cce825 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -1,35 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef TARGET_CORE_ALUA_H
#define TARGET_CORE_ALUA_H
+#include <target/target_core_base.h>
+
/*
* INQUIRY response data, TPGS Field
*
* from spc4r17 section 6.4.2 Table 135
*/
#define TPGS_NO_ALUA 0x00
-#define TPGS_IMPLICT_ALUA 0x10
-#define TPGS_EXPLICT_ALUA 0x20
+#define TPGS_IMPLICIT_ALUA 0x10
+#define TPGS_EXPLICIT_ALUA 0x20
/*
* ASYMMETRIC ACCESS STATE field
*
- * from spc4r17 section 6.27 Table 245
+ * from spc4r36j section 6.37 Table 307
*/
-#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED 0x0
+#define ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED 0x0
#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1
#define ALUA_ACCESS_STATE_STANDBY 0x2
#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3
+#define ALUA_ACCESS_STATE_LBA_DEPENDENT 0x4
#define ALUA_ACCESS_STATE_OFFLINE 0xe
#define ALUA_ACCESS_STATE_TRANSITION 0xf
/*
+ * from spc4r36j section 6.37 Table 306
+ */
+#define ALUA_T_SUP 0x80
+#define ALUA_O_SUP 0x40
+#define ALUA_LBD_SUP 0x10
+#define ALUA_U_SUP 0x08
+#define ALUA_S_SUP 0x04
+#define ALUA_AN_SUP 0x02
+#define ALUA_AO_SUP 0x01
+
+/*
* REPORT_TARGET_PORT_GROUP STATUS CODE
*
* from spc4r17 section 6.27 Table 246
*/
#define ALUA_STATUS_NONE 0x00
-#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG 0x01
-#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA 0x02
+#define ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG 0x01
+#define ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA 0x02
/*
* From spc4r17, Table D.1: ASC and ASCQ Assignement
@@ -46,39 +61,41 @@
#define ALUA_DEFAULT_NONOP_DELAY_MSECS 100
#define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */
/*
- * Used for implict and explict ALUA transitional delay, that is disabled
+ * Used for implicit and explicit ALUA transitional delay, that is disabled
* by default, and is intended to be used for debugging client side ALUA code.
*/
#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0
#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */
/*
- * Used for the recommended application client implict transition timeout
+ * Used for the recommended application client implicit transition timeout
* in seconds, returned by the REPORT_TARGET_PORT_GROUPS w/ extended header.
*/
-#define ALUA_DEFAULT_IMPLICT_TRANS_SECS 0
-#define ALUA_MAX_IMPLICT_TRANS_SECS 255
-/*
- * Used by core_alua_update_tpg_primary_metadata() and
- * core_alua_update_tpg_secondary_metadata()
- */
-#define ALUA_METADATA_PATH_LEN 512
-/*
- * Used by core_alua_update_tpg_secondary_metadata()
- */
-#define ALUA_SECONDARY_METADATA_WWN_LEN 256
+#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0
+#define ALUA_MAX_IMPLICIT_TRANS_SECS 255
+
+/* Used by core_alua_update_tpg_(primary,secondary)_metadata */
+#define ALUA_MD_BUF_LEN 1024
extern struct kmem_cache *t10_alua_lu_gp_cache;
extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
-extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+extern struct kmem_cache *t10_alua_lba_map_cache;
+extern struct kmem_cache *t10_alua_lba_map_mem_cache;
extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
+extern sense_reason_t target_emulate_report_referrals(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
- struct se_device *, struct se_port *,
+ struct se_device *, struct se_lun *,
struct se_node_acl *, int, int);
extern char *core_alua_dump_status(int);
+extern struct t10_alua_lba_map *core_alua_allocate_lba_map(
+ struct list_head *, u64, u64);
+extern int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *, int, int);
+extern void core_alua_free_lba_map(struct list_head *);
+extern void core_alua_set_lba_map(struct se_device *, struct list_head *,
+ int, int);
extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
@@ -93,14 +110,11 @@ extern void core_alua_drop_lu_gp_dev(struct se_device *);
extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
struct se_device *, const char *, int);
extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
-extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
- struct se_port *);
extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *);
-extern void core_alua_free_tg_pt_gp_mem(struct se_port *);
-extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *,
- struct t10_alua_tg_pt_gp *);
-extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *);
-extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *,
+extern void target_detach_tg_pt_gp(struct se_lun *);
+extern void target_attach_tg_pt_gp(struct se_lun *, struct t10_alua_tg_pt_gp *);
+extern ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *, char *);
+extern ssize_t core_alua_store_tg_pt_gp_info(struct se_lun *, const char *,
size_t);
extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *);
extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *,
@@ -113,9 +127,9 @@ extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
char *);
extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
const char *, size_t);
-extern ssize_t core_alua_show_implict_trans_secs(struct t10_alua_tg_pt_gp *,
+extern ssize_t core_alua_show_implicit_trans_secs(struct t10_alua_tg_pt_gp *,
char *);
-extern ssize_t core_alua_store_implict_trans_secs(struct t10_alua_tg_pt_gp *,
+extern ssize_t core_alua_store_implicit_trans_secs(struct t10_alua_tg_pt_gp *,
const char *, size_t);
extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
char *);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index e4d22933efaf..f7868b41c5e6 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1,25 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_configfs.c
*
* This file contains ConfigFS logic for the Generic Target Engine project.
*
- * (c) Copyright 2008-2012 RisingTide Systems LLC.
+ * (c) Copyright 2008-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* based on configfs Copyright (C) 2005 Oracle. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
****************************************************************************/
+#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <generated/utsrelease.h>
@@ -40,30 +33,49 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
-#include <target/configfs_macros.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_rd.h"
+#include "target_core_xcopy.h"
+
+#define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
+static void target_core_setup_##_name##_cit(struct target_backend *tb) \
+{ \
+ struct config_item_type *cit = &tb->tb_##_name##_cit; \
+ \
+ cit->ct_item_ops = _item_ops; \
+ cit->ct_group_ops = _group_ops; \
+ cit->ct_attrs = _attrs; \
+ cit->ct_owner = tb->ops->owner; \
+ pr_debug("Setup generic %s\n", __stringify(_name)); \
+}
+
+#define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \
+static void target_core_setup_##_name##_cit(struct target_backend *tb) \
+{ \
+ struct config_item_type *cit = &tb->tb_##_name##_cit; \
+ \
+ cit->ct_item_ops = _item_ops; \
+ cit->ct_group_ops = _group_ops; \
+ cit->ct_attrs = tb->ops->tb_##_name##_attrs; \
+ cit->ct_owner = tb->ops->owner; \
+ pr_debug("Setup generic %s\n", __stringify(_name)); \
+}
extern struct t10_alua_lu_gp *default_lu_gp;
static LIST_HEAD(g_tf_list);
static DEFINE_MUTEX(g_tf_lock);
-struct target_core_configfs_attribute {
- struct configfs_attribute attr;
- ssize_t (*show)(void *, char *);
- ssize_t (*store)(void *, const char *, size_t);
-};
-
static struct config_group target_core_hbagroup;
static struct config_group alua_group;
static struct config_group alua_lu_gps_group;
+static unsigned int target_devices;
+static DEFINE_MUTEX(target_devices_lock);
+
static inline struct se_hba *
item_to_hba(struct config_item *item)
{
@@ -73,24 +85,75 @@ item_to_hba(struct config_item *item)
/*
* Attributes for /sys/kernel/config/target/
*/
-static ssize_t target_core_attr_show(struct config_item *item,
- struct configfs_attribute *attr,
- char *page)
+static ssize_t target_core_item_version_show(struct config_item *item,
+ char *page)
{
return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
- " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION,
+ " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION,
utsname()->sysname, utsname()->machine);
}
-static struct configfs_item_operations target_core_fabric_item_ops = {
- .show_attribute = target_core_attr_show,
-};
+CONFIGFS_ATTR_RO(target_core_item_, version);
-static struct configfs_attribute target_core_item_attr_version = {
- .ca_owner = THIS_MODULE,
- .ca_name = "version",
- .ca_mode = S_IRUGO,
-};
+char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT;
+static char db_root_stage[DB_ROOT_LEN];
+
+static ssize_t target_core_item_dbroot_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%s\n", db_root);
+}
+
+static ssize_t target_core_item_dbroot_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ ssize_t read_bytes;
+ struct file *fp;
+ ssize_t r = -EINVAL;
+
+ mutex_lock(&target_devices_lock);
+ if (target_devices) {
+ pr_err("db_root: cannot be changed because it's in use\n");
+ goto unlock;
+ }
+
+ if (count > (DB_ROOT_LEN - 1)) {
+ pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n",
+ (int)count, DB_ROOT_LEN - 1);
+ goto unlock;
+ }
+
+ read_bytes = scnprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
+ if (!read_bytes)
+ goto unlock;
+
+ if (db_root_stage[read_bytes - 1] == '\n')
+ db_root_stage[read_bytes - 1] = '\0';
+
+ /* validate new db root before accepting it */
+ fp = filp_open(db_root_stage, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ pr_err("db_root: cannot open: %s\n", db_root_stage);
+ goto unlock;
+ }
+ if (!S_ISDIR(file_inode(fp)->i_mode)) {
+ filp_close(fp, NULL);
+ pr_err("db_root: not a directory: %s\n", db_root_stage);
+ goto unlock;
+ }
+ filp_close(fp, NULL);
+
+ strscpy(db_root, db_root_stage);
+ pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
+
+ r = read_bytes;
+
+unlock:
+ mutex_unlock(&target_devices_lock);
+ return r;
+}
+
+CONFIGFS_ATTR(target_core_item_, dbroot);
static struct target_fabric_configfs *target_core_get_fabric(
const char *name)
@@ -102,7 +165,10 @@ static struct target_fabric_configfs *target_core_get_fabric(
mutex_lock(&g_tf_lock);
list_for_each_entry(tf, &g_tf_list, tf_list) {
- if (!strcmp(tf->tf_name, name)) {
+ const char *cmp_name = tf->tf_ops->fabric_alias;
+ if (!cmp_name)
+ cmp_name = tf->tf_ops->fabric_name;
+ if (!strcmp(cmp_name, name)) {
atomic_inc(&tf->tf_access_cnt);
mutex_unlock(&g_tf_lock);
return tf;
@@ -125,78 +191,76 @@ static struct config_group *target_core_register_fabric(
pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
" %s\n", group, name);
- /*
- * Below are some hardcoded request_module() calls to automatically
- * local fabric modules when the following is called:
- *
- * mkdir -p /sys/kernel/config/target/$MODULE_NAME
- *
- * Note that this does not limit which TCM fabric module can be
- * registered, but simply provids auto loading logic for modules with
- * mkdir(2) system calls with known TCM fabric modules.
- */
- if (!strncmp(name, "iscsi", 5)) {
+
+ tf = target_core_get_fabric(name);
+ if (!tf) {
+ pr_debug("target_core_register_fabric() trying autoload for %s\n",
+ name);
+
/*
- * Automatically load the LIO Target fabric module when the
- * following is called:
+ * Below are some hardcoded request_module() calls to automatically
+ * local fabric modules when the following is called:
*
- * mkdir -p $CONFIGFS/target/iscsi
- */
- ret = request_module("iscsi_target_mod");
- if (ret < 0) {
- pr_err("request_module() failed for"
- " iscsi_target_mod.ko: %d\n", ret);
- return ERR_PTR(-EINVAL);
- }
- } else if (!strncmp(name, "loopback", 8)) {
- /*
- * Automatically load the tcm_loop fabric module when the
- * following is called:
+ * mkdir -p /sys/kernel/config/target/$MODULE_NAME
*
- * mkdir -p $CONFIGFS/target/loopback
+ * Note that this does not limit which TCM fabric module can be
+ * registered, but simply provids auto loading logic for modules with
+ * mkdir(2) system calls with known TCM fabric modules.
*/
- ret = request_module("tcm_loop");
- if (ret < 0) {
- pr_err("request_module() failed for"
- " tcm_loop.ko: %d\n", ret);
- return ERR_PTR(-EINVAL);
+
+ if (!strncmp(name, "iscsi", 5)) {
+ /*
+ * Automatically load the LIO Target fabric module when the
+ * following is called:
+ *
+ * mkdir -p $CONFIGFS/target/iscsi
+ */
+ ret = request_module("iscsi_target_mod");
+ if (ret < 0) {
+ pr_debug("request_module() failed for"
+ " iscsi_target_mod.ko: %d\n", ret);
+ return ERR_PTR(-EINVAL);
+ }
+ } else if (!strncmp(name, "loopback", 8)) {
+ /*
+ * Automatically load the tcm_loop fabric module when the
+ * following is called:
+ *
+ * mkdir -p $CONFIGFS/target/loopback
+ */
+ ret = request_module("tcm_loop");
+ if (ret < 0) {
+ pr_debug("request_module() failed for"
+ " tcm_loop.ko: %d\n", ret);
+ return ERR_PTR(-EINVAL);
+ }
}
+
+ tf = target_core_get_fabric(name);
}
- tf = target_core_get_fabric(name);
if (!tf) {
- pr_err("target_core_get_fabric() failed for %s\n",
- name);
+ pr_debug("target_core_get_fabric() failed for %s\n",
+ name);
return ERR_PTR(-EINVAL);
}
pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
- " %s\n", tf->tf_name);
+ " %s\n", tf->tf_ops->fabric_name);
/*
* On a successful target_core_get_fabric() look, the returned
* struct target_fabric_configfs *tf will contain a usage reference.
*/
pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
- &TF_CIT_TMPL(tf)->tfc_wwn_cit);
+ &tf->tf_wwn_cit);
- tf->tf_group.default_groups = tf->tf_default_groups;
- tf->tf_group.default_groups[0] = &tf->tf_disc_group;
- tf->tf_group.default_groups[1] = NULL;
+ config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit);
- config_group_init_type_name(&tf->tf_group, name,
- &TF_CIT_TMPL(tf)->tfc_wwn_cit);
config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
- &TF_CIT_TMPL(tf)->tfc_discovery_cit);
-
- pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
- " %s\n", tf->tf_group.cg_item.ci_name);
- /*
- * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
- */
- tf->tf_ops.tf_subsys = tf->tf_subsys;
- tf->tf_fabric = &tf->tf_group.cg_item;
- pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
- " for %s\n", name);
+ &tf->tf_discovery_cit);
+ configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group);
+ pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n",
+ config_item_name(&tf->tf_group.cg_item));
return &tf->tf_group;
}
@@ -209,30 +273,18 @@ static void target_core_deregister_fabric(
{
struct target_fabric_configfs *tf = container_of(
to_config_group(item), struct target_fabric_configfs, tf_group);
- struct config_group *tf_group;
- struct config_item *df_item;
- int i;
pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
" tf list\n", config_item_name(item));
pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
- " %s\n", tf->tf_name);
+ " %s\n", tf->tf_ops->fabric_name);
atomic_dec(&tf->tf_access_cnt);
- pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing"
- " tf->tf_fabric for %s\n", tf->tf_name);
- tf->tf_fabric = NULL;
-
pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
" %s\n", config_item_name(item));
- tf_group = &tf->tf_group;
- for (i = 0; tf_group->default_groups[i]; i++) {
- df_item = &tf_group->default_groups[i]->cg_item;
- tf_group->default_groups[i] = NULL;
- config_item_put(df_item);
- }
+ configfs_remove_default_groups(&tf->tf_group);
config_item_put(item);
}
@@ -246,14 +298,14 @@ static struct configfs_group_operations target_core_fabric_group_ops = {
*/
static struct configfs_attribute *target_core_fabric_item_attrs[] = {
&target_core_item_attr_version,
+ &target_core_item_attr_dbroot,
NULL,
};
/*
* Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
*/
-static struct config_item_type target_core_fabrics_item = {
- .ct_item_ops = &target_core_fabric_item_ops,
+static const struct config_item_type target_core_fabrics_item = {
.ct_group_ops = &target_core_fabric_group_ops,
.ct_attrs = target_core_fabric_item_attrs,
.ct_owner = THIS_MODULE,
@@ -268,96 +320,61 @@ static struct configfs_subsystem target_core_fabrics = {
},
};
-static struct configfs_subsystem *target_core_subsystem[] = {
- &target_core_fabrics,
- NULL,
-};
+int target_depend_item(struct config_item *item)
+{
+ return configfs_depend_item(&target_core_fabrics, item);
+}
+EXPORT_SYMBOL(target_depend_item);
+
+void target_undepend_item(struct config_item *item)
+{
+ return configfs_undepend_item(item);
+}
+EXPORT_SYMBOL(target_undepend_item);
/*##############################################################################
// Start functions called by external Target Fabrics Modules
//############################################################################*/
-
-/*
- * First function called by fabric modules to:
- *
- * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer.
- * 2) Add struct target_fabric_configfs to g_tf_list
- * 3) Return struct target_fabric_configfs to fabric module to be passed
- * into target_fabric_configfs_register().
- */
-struct target_fabric_configfs *target_fabric_configfs_init(
- struct module *fabric_mod,
- const char *name)
+static int target_disable_feature(struct se_portal_group *se_tpg)
{
- struct target_fabric_configfs *tf;
-
- if (!(name)) {
- pr_err("Unable to locate passed fabric name\n");
- return ERR_PTR(-EINVAL);
- }
- if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
- pr_err("Passed name: %s exceeds TARGET_FABRIC"
- "_NAME_SIZE\n", name);
- return ERR_PTR(-EINVAL);
- }
-
- tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
- if (!tf)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&tf->tf_list);
- atomic_set(&tf->tf_access_cnt, 0);
- /*
- * Setup the default generic struct config_item_type's (cits) in
- * struct target_fabric_configfs->tf_cit_tmpl
- */
- tf->tf_module = fabric_mod;
- target_fabric_setup_cits(tf);
-
- tf->tf_subsys = target_core_subsystem[0];
- snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name);
-
- mutex_lock(&g_tf_lock);
- list_add_tail(&tf->tf_list, &g_tf_list);
- mutex_unlock(&g_tf_lock);
+ return 0;
+}
- pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
- ">>>>>>>>>>>>>>\n");
- pr_debug("Initialized struct target_fabric_configfs: %p for"
- " %s\n", tf, tf->tf_name);
- return tf;
+static u32 target_default_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
}
-EXPORT_SYMBOL(target_fabric_configfs_init);
-/*
- * Called by fabric plugins after FAILED target_fabric_configfs_register() call.
- */
-void target_fabric_configfs_free(
- struct target_fabric_configfs *tf)
+static u32 target_default_sess_get_index(struct se_session *se_sess)
{
- mutex_lock(&g_tf_lock);
- list_del(&tf->tf_list);
- mutex_unlock(&g_tf_lock);
+ return 0;
+}
- kfree(tf);
+static void target_set_default_node_attributes(struct se_node_acl *se_acl)
+{
}
-EXPORT_SYMBOL(target_fabric_configfs_free);
-/*
- * Perform a sanity check of the passed tf->tf_ops before completing
- * TCM fabric module registration.
- */
-static int target_fabric_tf_ops_check(
- struct target_fabric_configfs *tf)
+static int target_default_get_cmd_state(struct se_cmd *se_cmd)
{
- struct target_core_fabric_ops *tfo = &tf->tf_ops;
+ return 0;
+}
- if (!tfo->get_fabric_name) {
- pr_err("Missing tfo->get_fabric_name()\n");
+static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
+{
+ if (tfo->fabric_alias) {
+ if (strlen(tfo->fabric_alias) >= TARGET_FABRIC_NAME_SIZE) {
+ pr_err("Passed alias: %s exceeds "
+ "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_alias);
+ return -EINVAL;
+ }
+ }
+ if (!tfo->fabric_name) {
+ pr_err("Missing tfo->fabric_name\n");
return -EINVAL;
}
- if (!tfo->get_fabric_proto_ident) {
- pr_err("Missing tfo->get_fabric_proto_ident()\n");
+ if (strlen(tfo->fabric_name) >= TARGET_FABRIC_NAME_SIZE) {
+ pr_err("Passed name: %s exceeds "
+ "TARGET_FABRIC_NAME_SIZE\n", tfo->fabric_name);
return -EINVAL;
}
if (!tfo->tpg_get_wwn) {
@@ -368,397 +385,1282 @@ static int target_fabric_tf_ops_check(
pr_err("Missing tfo->tpg_get_tag()\n");
return -EINVAL;
}
- if (!tfo->tpg_get_default_depth) {
- pr_err("Missing tfo->tpg_get_default_depth()\n");
+ if (!tfo->release_cmd) {
+ pr_err("Missing tfo->release_cmd()\n");
return -EINVAL;
}
- if (!tfo->tpg_get_pr_transport_id) {
- pr_err("Missing tfo->tpg_get_pr_transport_id()\n");
+ if (!tfo->write_pending) {
+ pr_err("Missing tfo->write_pending()\n");
return -EINVAL;
}
- if (!tfo->tpg_get_pr_transport_id_len) {
- pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n");
+ if (!tfo->queue_data_in) {
+ pr_err("Missing tfo->queue_data_in()\n");
return -EINVAL;
}
- if (!tfo->tpg_check_demo_mode) {
- pr_err("Missing tfo->tpg_check_demo_mode()\n");
+ if (!tfo->queue_status) {
+ pr_err("Missing tfo->queue_status()\n");
return -EINVAL;
}
- if (!tfo->tpg_check_demo_mode_cache) {
- pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
+ if (!tfo->queue_tm_rsp) {
+ pr_err("Missing tfo->queue_tm_rsp()\n");
return -EINVAL;
}
- if (!tfo->tpg_check_demo_mode_write_protect) {
- pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
+ if (!tfo->aborted_task) {
+ pr_err("Missing tfo->aborted_task()\n");
return -EINVAL;
}
- if (!tfo->tpg_check_prod_mode_write_protect) {
- pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
+ if (!tfo->check_stop_free) {
+ pr_err("Missing tfo->check_stop_free()\n");
return -EINVAL;
}
- if (!tfo->tpg_alloc_fabric_acl) {
- pr_err("Missing tfo->tpg_alloc_fabric_acl()\n");
+ /*
+ * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
+ * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
+ * target_core_fabric_configfs.c WWN+TPG group context code.
+ */
+ if (!tfo->fabric_make_wwn) {
+ pr_err("Missing tfo->fabric_make_wwn()\n");
return -EINVAL;
}
- if (!tfo->tpg_release_fabric_acl) {
- pr_err("Missing tfo->tpg_release_fabric_acl()\n");
+ if (!tfo->fabric_drop_wwn) {
+ pr_err("Missing tfo->fabric_drop_wwn()\n");
return -EINVAL;
}
- if (!tfo->tpg_get_inst_index) {
- pr_err("Missing tfo->tpg_get_inst_index()\n");
+ if (!tfo->fabric_make_tpg) {
+ pr_err("Missing tfo->fabric_make_tpg()\n");
return -EINVAL;
}
- if (!tfo->release_cmd) {
- pr_err("Missing tfo->release_cmd()\n");
+ if (!tfo->fabric_drop_tpg) {
+ pr_err("Missing tfo->fabric_drop_tpg()\n");
return -EINVAL;
}
- if (!tfo->shutdown_session) {
- pr_err("Missing tfo->shutdown_session()\n");
- return -EINVAL;
+
+ return 0;
+}
+
+static void target_set_default_ops(struct target_core_fabric_ops *tfo)
+{
+ if (!tfo->tpg_check_demo_mode)
+ tfo->tpg_check_demo_mode = target_disable_feature;
+
+ if (!tfo->tpg_check_demo_mode_cache)
+ tfo->tpg_check_demo_mode_cache = target_disable_feature;
+
+ if (!tfo->tpg_check_demo_mode_write_protect)
+ tfo->tpg_check_demo_mode_write_protect = target_disable_feature;
+
+ if (!tfo->tpg_check_prod_mode_write_protect)
+ tfo->tpg_check_prod_mode_write_protect = target_disable_feature;
+
+ if (!tfo->tpg_get_inst_index)
+ tfo->tpg_get_inst_index = target_default_get_inst_index;
+
+ if (!tfo->sess_get_index)
+ tfo->sess_get_index = target_default_sess_get_index;
+
+ if (!tfo->set_default_node_attributes)
+ tfo->set_default_node_attributes = target_set_default_node_attributes;
+
+ if (!tfo->get_cmd_state)
+ tfo->get_cmd_state = target_default_get_cmd_state;
+}
+
+int target_register_template(const struct target_core_fabric_ops *fo)
+{
+ struct target_core_fabric_ops *tfo;
+ struct target_fabric_configfs *tf;
+ int ret;
+
+ ret = target_fabric_tf_ops_check(fo);
+ if (ret)
+ return ret;
+
+ tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
+ if (!tf) {
+ pr_err("%s: could not allocate memory!\n", __func__);
+ return -ENOMEM;
+ }
+ tfo = kzalloc(sizeof(struct target_core_fabric_ops), GFP_KERNEL);
+ if (!tfo) {
+ kfree(tf);
+ pr_err("%s: could not allocate memory!\n", __func__);
+ return -ENOMEM;
+ }
+ memcpy(tfo, fo, sizeof(*tfo));
+ target_set_default_ops(tfo);
+
+ INIT_LIST_HEAD(&tf->tf_list);
+ atomic_set(&tf->tf_access_cnt, 0);
+ tf->tf_ops = tfo;
+ target_fabric_setup_cits(tf);
+
+ mutex_lock(&g_tf_lock);
+ list_add_tail(&tf->tf_list, &g_tf_list);
+ mutex_unlock(&g_tf_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(target_register_template);
+
+void target_unregister_template(const struct target_core_fabric_ops *fo)
+{
+ struct target_fabric_configfs *t;
+
+ mutex_lock(&g_tf_lock);
+ list_for_each_entry(t, &g_tf_list, tf_list) {
+ if (!strcmp(t->tf_ops->fabric_name, fo->fabric_name)) {
+ BUG_ON(atomic_read(&t->tf_access_cnt));
+ list_del(&t->tf_list);
+ mutex_unlock(&g_tf_lock);
+ /*
+ * Wait for any outstanding fabric se_deve_entry->rcu_head
+ * callbacks to complete post kfree_rcu(), before allowing
+ * fabric driver unload of TFO->module to proceed.
+ */
+ rcu_barrier();
+ kfree(t->tf_tpg_base_cit.ct_attrs);
+ kfree(t->tf_ops);
+ kfree(t);
+ return;
+ }
+ }
+ mutex_unlock(&g_tf_lock);
+}
+EXPORT_SYMBOL(target_unregister_template);
+
+/*##############################################################################
+// Stop functions called by external Target Fabrics Modules
+//############################################################################*/
+
+static inline struct se_dev_attrib *to_attrib(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct se_dev_attrib,
+ da_group);
+}
+
+/* Start functions for struct config_item_type tb_dev_attrib_cit */
+#define DEF_CONFIGFS_ATTRIB_SHOW(_name) \
+static ssize_t _name##_show(struct config_item *item, char *page) \
+{ \
+ return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \
+}
+
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_model_alias);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_dpo);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_write);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_read);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_write_cache);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_ua_intlck_ctrl);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_tas);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_pr);
+DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type);
+DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type);
+DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify);
+DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids);
+DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_rest_reord);
+DEF_CONFIGFS_ATTRIB_SHOW(force_pr_aptpl);
+DEF_CONFIGFS_ATTRIB_SHOW(hw_block_size);
+DEF_CONFIGFS_ATTRIB_SHOW(block_size);
+DEF_CONFIGFS_ATTRIB_SHOW(hw_max_sectors);
+DEF_CONFIGFS_ATTRIB_SHOW(optimal_sectors);
+DEF_CONFIGFS_ATTRIB_SHOW(hw_queue_depth);
+DEF_CONFIGFS_ATTRIB_SHOW(queue_depth);
+DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count);
+DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count);
+DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity);
+DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment);
+DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
+DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
+DEF_CONFIGFS_ATTRIB_SHOW(emulate_rsoc);
+DEF_CONFIGFS_ATTRIB_SHOW(submit_type);
+DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_len);
+DEF_CONFIGFS_ATTRIB_SHOW(atomic_alignment);
+DEF_CONFIGFS_ATTRIB_SHOW(atomic_granularity);
+DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_with_boundary);
+DEF_CONFIGFS_ATTRIB_SHOW(atomic_max_boundary);
+
+#define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \
+static ssize_t _name##_store(struct config_item *item, const char *page,\
+ size_t count) \
+{ \
+ struct se_dev_attrib *da = to_attrib(item); \
+ u32 val; \
+ int ret; \
+ \
+ ret = kstrtou32(page, 0, &val); \
+ if (ret < 0) \
+ return ret; \
+ da->_name = val; \
+ return count; \
+}
+
+DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_lba_count);
+DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_block_desc_count);
+DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity);
+DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity_alignment);
+DEF_CONFIGFS_ATTRIB_STORE_U32(max_write_same_len);
+
+#define DEF_CONFIGFS_ATTRIB_STORE_BOOL(_name) \
+static ssize_t _name##_store(struct config_item *item, const char *page, \
+ size_t count) \
+{ \
+ struct se_dev_attrib *da = to_attrib(item); \
+ bool flag; \
+ int ret; \
+ \
+ ret = kstrtobool(page, &flag); \
+ if (ret < 0) \
+ return ret; \
+ da->_name = flag; \
+ return count; \
+}
+
+DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write);
+DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw);
+DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc);
+DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_pr);
+DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids);
+DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot);
+
+#define DEF_CONFIGFS_ATTRIB_STORE_STUB(_name) \
+static ssize_t _name##_store(struct config_item *item, const char *page,\
+ size_t count) \
+{ \
+ printk_once(KERN_WARNING \
+ "ignoring deprecated %s attribute\n", \
+ __stringify(_name)); \
+ return count; \
+}
+
+DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_dpo);
+DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_fua_read);
+
+static void dev_set_t10_wwn_model_alias(struct se_device *dev)
+{
+ const char *configname;
+
+ configname = config_item_name(&dev->dev_group.cg_item);
+ if (strlen(configname) >= INQUIRY_MODEL_LEN) {
+ pr_warn("dev[%p]: Backstore name '%s' is too long for "
+ "INQUIRY_MODEL, truncating to 15 characters\n", dev,
+ configname);
}
- if (!tfo->close_session) {
- pr_err("Missing tfo->close_session()\n");
+ /*
+ * XXX We can't use sizeof(dev->t10_wwn.model) (INQUIRY_MODEL_LEN + 1)
+ * here without potentially breaking existing setups, so continue to
+ * truncate one byte shorter than what can be carried in INQUIRY.
+ */
+ strscpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN);
+}
+
+static ssize_t emulate_model_alias_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
+ bool flag;
+ int ret;
+
+ if (dev->export_count) {
+ pr_err("dev[%p]: Unable to change model alias"
+ " while export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
- if (!tfo->sess_get_index) {
- pr_err("Missing tfo->sess_get_index()\n");
+
+ ret = kstrtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
+ if (flag)
+ dev_set_t10_wwn_model_alias(dev);
+ else
+ strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod);
+ da->emulate_model_alias = flag;
+ return count;
+}
+
+static ssize_t emulate_write_cache_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ bool flag;
+ int ret;
+
+ ret = kstrtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (flag && da->da_dev->transport->get_write_cache) {
+ pr_err("emulate_write_cache not supported for this device\n");
return -EINVAL;
}
- if (!tfo->write_pending) {
- pr_err("Missing tfo->write_pending()\n");
+
+ da->emulate_write_cache = flag;
+ pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
+ da->da_dev, flag);
+ return count;
+}
+
+static ssize_t emulate_ua_intlck_ctrl_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ u32 val;
+ int ret;
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != TARGET_UA_INTLCK_CTRL_CLEAR
+ && val != TARGET_UA_INTLCK_CTRL_NO_CLEAR
+ && val != TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
+ pr_err("Illegal value %d\n", val);
return -EINVAL;
}
- if (!tfo->write_pending_status) {
- pr_err("Missing tfo->write_pending_status()\n");
+
+ if (da->da_dev->export_count) {
+ pr_err("dev[%p]: Unable to change SE Device"
+ " UA_INTRLCK_CTRL while export_count is %d\n",
+ da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
- if (!tfo->set_default_node_attributes) {
- pr_err("Missing tfo->set_default_node_attributes()\n");
+ da->emulate_ua_intlck_ctrl = val;
+ pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
+ da->da_dev, val);
+ return count;
+}
+
+static ssize_t emulate_tas_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ bool flag;
+ int ret;
+
+ ret = kstrtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (da->da_dev->export_count) {
+ pr_err("dev[%p]: Unable to change SE Device TAS while"
+ " export_count is %d\n",
+ da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
- if (!tfo->get_task_tag) {
- pr_err("Missing tfo->get_task_tag()\n");
+ da->emulate_tas = flag;
+ pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
+ da->da_dev, flag ? "Enabled" : "Disabled");
+
+ return count;
+}
+
+static int target_try_configure_unmap(struct se_device *dev,
+ const char *config_opt)
+{
+ if (!dev->transport->configure_unmap) {
+ pr_err("Generic Block Discard not supported\n");
+ return -ENOSYS;
+ }
+
+ if (!target_dev_configured(dev)) {
+ pr_err("Generic Block Discard setup for %s requires device to be configured\n",
+ config_opt);
+ return -ENODEV;
+ }
+
+ if (!dev->transport->configure_unmap(dev)) {
+ pr_err("Generic Block Discard setup for %s failed\n",
+ config_opt);
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+static ssize_t emulate_tpu_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
+ bool flag;
+ int ret;
+
+ ret = kstrtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We expect this value to be non-zero when generic Block Layer
+ * Discard supported is detected iblock_create_virtdevice().
+ */
+ if (flag && !da->max_unmap_block_desc_count) {
+ ret = target_try_configure_unmap(dev, "emulate_tpu");
+ if (ret)
+ return ret;
+ }
+
+ da->emulate_tpu = flag;
+ pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
+ da->da_dev, flag);
+ return count;
+}
+
+static ssize_t emulate_tpws_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
+ bool flag;
+ int ret;
+
+ ret = kstrtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We expect this value to be non-zero when generic Block Layer
+ * Discard supported is detected iblock_create_virtdevice().
+ */
+ if (flag && !da->max_unmap_block_desc_count) {
+ ret = target_try_configure_unmap(dev, "emulate_tpws");
+ if (ret)
+ return ret;
+ }
+
+ da->emulate_tpws = flag;
+ pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
+ da->da_dev, flag);
+ return count;
+}
+
+static ssize_t pi_prot_type_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ int old_prot = da->pi_prot_type, ret;
+ struct se_device *dev = da->da_dev;
+ u32 flag;
+
+ ret = kstrtou32(page, 0, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
+ pr_err("Illegal value %d for pi_prot_type\n", flag);
return -EINVAL;
}
- if (!tfo->get_cmd_state) {
- pr_err("Missing tfo->get_cmd_state()\n");
+ if (flag == 2) {
+ pr_err("DIF TYPE2 protection currently not supported\n");
+ return -ENOSYS;
+ }
+ if (da->hw_pi_prot_type) {
+ pr_warn("DIF protection enabled on underlying hardware,"
+ " ignoring\n");
+ return count;
+ }
+ if (!dev->transport->init_prot || !dev->transport->free_prot) {
+ /* 0 is only allowed value for non-supporting backends */
+ if (flag == 0)
+ return count;
+
+ pr_err("DIF protection not supported by backend: %s\n",
+ dev->transport->name);
+ return -ENOSYS;
+ }
+ if (!target_dev_configured(dev)) {
+ pr_err("DIF protection requires device to be configured\n");
+ return -ENODEV;
+ }
+ if (dev->export_count) {
+ pr_err("dev[%p]: Unable to change SE Device PROT type while"
+ " export_count is %d\n", dev, dev->export_count);
return -EINVAL;
}
- if (!tfo->queue_data_in) {
- pr_err("Missing tfo->queue_data_in()\n");
+
+ da->pi_prot_type = flag;
+
+ if (flag && !old_prot) {
+ ret = dev->transport->init_prot(dev);
+ if (ret) {
+ da->pi_prot_type = old_prot;
+ da->pi_prot_verify = (bool) da->pi_prot_type;
+ return ret;
+ }
+
+ } else if (!flag && old_prot) {
+ dev->transport->free_prot(dev);
+ }
+
+ da->pi_prot_verify = (bool) da->pi_prot_type;
+ pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
+ return count;
+}
+
+/* always zero, but attr needs to remain RW to avoid userspace breakage */
+static ssize_t pi_prot_format_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "0\n");
+}
+
+static ssize_t pi_prot_format_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
+ bool flag;
+ int ret;
+
+ ret = kstrtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (!flag)
+ return count;
+
+ if (!dev->transport->format_prot) {
+ pr_err("DIF protection format not supported by backend %s\n",
+ dev->transport->name);
+ return -ENOSYS;
+ }
+ if (!target_dev_configured(dev)) {
+ pr_err("DIF protection format requires device to be configured\n");
+ return -ENODEV;
+ }
+ if (dev->export_count) {
+ pr_err("dev[%p]: Unable to format SE Device PROT type while"
+ " export_count is %d\n", dev, dev->export_count);
return -EINVAL;
}
- if (!tfo->queue_status) {
- pr_err("Missing tfo->queue_status()\n");
+
+ ret = dev->transport->format_prot(dev);
+ if (ret)
+ return ret;
+
+ pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
+ return count;
+}
+
+static ssize_t pi_prot_verify_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ bool flag;
+ int ret;
+
+ ret = kstrtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (!flag) {
+ da->pi_prot_verify = flag;
+ return count;
+ }
+ if (da->hw_pi_prot_type) {
+ pr_warn("DIF protection enabled on underlying hardware,"
+ " ignoring\n");
+ return count;
+ }
+ if (!da->pi_prot_type) {
+ pr_warn("DIF protection not supported by backend, ignoring\n");
+ return count;
+ }
+ da->pi_prot_verify = flag;
+
+ return count;
+}
+
+static ssize_t force_pr_aptpl_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ bool flag;
+ int ret;
+
+ ret = kstrtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+ if (da->da_dev->export_count) {
+ pr_err("dev[%p]: Unable to set force_pr_aptpl while"
+ " export_count is %d\n",
+ da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
- if (!tfo->queue_tm_rsp) {
- pr_err("Missing tfo->queue_tm_rsp()\n");
+
+ da->force_pr_aptpl = flag;
+ pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag);
+ return count;
+}
+
+static ssize_t emulate_rest_reord_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ bool flag;
+ int ret;
+
+ ret = kstrtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (flag != 0) {
+ printk(KERN_ERR "dev[%p]: SE Device emulation of restricted"
+ " reordering not implemented\n", da->da_dev);
+ return -ENOSYS;
+ }
+ da->emulate_rest_reord = flag;
+ pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n",
+ da->da_dev, flag);
+ return count;
+}
+
+static ssize_t unmap_zeroes_data_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
+ bool flag;
+ int ret;
+
+ ret = kstrtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ if (da->da_dev->export_count) {
+ pr_err("dev[%p]: Unable to change SE Device"
+ " unmap_zeroes_data while export_count is %d\n",
+ da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
/*
- * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
- * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
- * target_core_fabric_configfs.c WWN+TPG group context code.
+ * We expect this value to be non-zero when generic Block Layer
+ * Discard supported is detected iblock_configure_device().
*/
- if (!tfo->fabric_make_wwn) {
- pr_err("Missing tfo->fabric_make_wwn()\n");
+ if (flag && !da->max_unmap_block_desc_count) {
+ ret = target_try_configure_unmap(dev, "unmap_zeroes_data");
+ if (ret)
+ return ret;
+ }
+ da->unmap_zeroes_data = flag;
+ pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
+ da->da_dev, flag);
+ return count;
+}
+
+/*
+ * Note, this can only be called on unexported SE Device Object.
+ */
+static ssize_t queue_depth_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
+ u32 val;
+ int ret;
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (dev->export_count) {
+ pr_err("dev[%p]: Unable to change SE Device TCQ while"
+ " export_count is %d\n",
+ dev, dev->export_count);
return -EINVAL;
}
- if (!tfo->fabric_drop_wwn) {
- pr_err("Missing tfo->fabric_drop_wwn()\n");
+ if (!val) {
+ pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev);
return -EINVAL;
}
- if (!tfo->fabric_make_tpg) {
- pr_err("Missing tfo->fabric_make_tpg()\n");
+
+ if (val > dev->dev_attrib.queue_depth) {
+ if (val > dev->dev_attrib.hw_queue_depth) {
+ pr_err("dev[%p]: Passed queue_depth:"
+ " %u exceeds TCM/SE_Device MAX"
+ " TCQ: %u\n", dev, val,
+ dev->dev_attrib.hw_queue_depth);
+ return -EINVAL;
+ }
+ }
+ da->queue_depth = dev->queue_depth = val;
+ pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val);
+ return count;
+}
+
+static ssize_t optimal_sectors_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ u32 val;
+ int ret;
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (da->da_dev->export_count) {
+ pr_err("dev[%p]: Unable to change SE Device"
+ " optimal_sectors while export_count is %d\n",
+ da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
- if (!tfo->fabric_drop_tpg) {
- pr_err("Missing tfo->fabric_drop_tpg()\n");
+ if (val > da->hw_max_sectors) {
+ pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
+ " greater than hw_max_sectors: %u\n",
+ da->da_dev, val, da->hw_max_sectors);
return -EINVAL;
}
- return 0;
+ da->optimal_sectors = val;
+ pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
+ da->da_dev, val);
+ return count;
}
-/*
- * Called 2nd from fabric module with returned parameter of
- * struct target_fabric_configfs * from target_fabric_configfs_init().
- *
- * Upon a successful registration, the new fabric's struct config_item is
- * return. Also, a pointer to this struct is set in the passed
- * struct target_fabric_configfs.
- */
-int target_fabric_configfs_register(
- struct target_fabric_configfs *tf)
+static ssize_t block_size_store(struct config_item *item,
+ const char *page, size_t count)
{
+ struct se_dev_attrib *da = to_attrib(item);
+ u32 val;
int ret;
- if (!tf) {
- pr_err("Unable to locate target_fabric_configfs"
- " pointer\n");
+ ret = kstrtou32(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (da->da_dev->export_count) {
+ pr_err("dev[%p]: Unable to change SE Device block_size"
+ " while export_count is %d\n",
+ da->da_dev, da->da_dev->export_count);
return -EINVAL;
}
- if (!tf->tf_subsys) {
- pr_err("Unable to target struct config_subsystem"
- " pointer\n");
+
+ if (val != 512 && val != 1024 && val != 2048 && val != 4096) {
+ pr_err("dev[%p]: Illegal value for block_device: %u"
+ " for SE device, must be 512, 1024, 2048 or 4096\n",
+ da->da_dev, val);
return -EINVAL;
}
- ret = target_fabric_tf_ops_check(tf);
+
+ da->block_size = val;
+
+ pr_debug("dev[%p]: SE Device block_size changed to %u\n",
+ da->da_dev, val);
+ return count;
+}
+
+static ssize_t alua_support_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ u8 flags = da->da_dev->transport_flags;
+
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1);
+}
+
+static ssize_t alua_support_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
+ bool flag, oldflag;
+ int ret;
+
+ ret = kstrtobool(page, &flag);
if (ret < 0)
return ret;
- pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
- ">>>>>>>>>>\n");
- return 0;
+ oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA);
+ if (flag == oldflag)
+ return count;
+
+ if (!(dev->transport->transport_flags_changeable &
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA)) {
+ pr_err("dev[%p]: Unable to change SE Device alua_support:"
+ " alua_support has fixed value\n", dev);
+ return -ENOSYS;
+ }
+
+ if (flag)
+ dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA;
+ else
+ dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_ALUA;
+ return count;
}
-EXPORT_SYMBOL(target_fabric_configfs_register);
-void target_fabric_configfs_deregister(
- struct target_fabric_configfs *tf)
+static ssize_t pgr_support_show(struct config_item *item, char *page)
{
- struct configfs_subsystem *su;
+ struct se_dev_attrib *da = to_attrib(item);
+ u8 flags = da->da_dev->transport_flags;
- if (!tf) {
- pr_err("Unable to locate passed target_fabric_"
- "configfs\n");
- return;
- }
- su = tf->tf_subsys;
- if (!su) {
- pr_err("Unable to locate passed tf->tf_subsys"
- " pointer\n");
- return;
- }
- pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
- ">>>>>>>>>>>>\n");
- mutex_lock(&g_tf_lock);
- if (atomic_read(&tf->tf_access_cnt)) {
- mutex_unlock(&g_tf_lock);
- pr_err("Non zero tf->tf_access_cnt for fabric %s\n",
- tf->tf_name);
- BUG();
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1);
+}
+
+static ssize_t pgr_support_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ struct se_device *dev = da->da_dev;
+ bool flag, oldflag;
+ int ret;
+
+ ret = kstrtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR);
+ if (flag == oldflag)
+ return count;
+
+ if (!(dev->transport->transport_flags_changeable &
+ TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
+ pr_err("dev[%p]: Unable to change SE Device pgr_support:"
+ " pgr_support has fixed value\n", dev);
+ return -ENOSYS;
}
- list_del(&tf->tf_list);
- mutex_unlock(&g_tf_lock);
- pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
- " %s\n", tf->tf_name);
- tf->tf_module = NULL;
- tf->tf_subsys = NULL;
- kfree(tf);
+ if (flag)
+ dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
+ else
+ dev->transport_flags |= TRANSPORT_FLAG_PASSTHROUGH_PGR;
+ return count;
+}
+
+static ssize_t emulate_rsoc_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ bool flag;
+ int ret;
+
+ ret = kstrtobool(page, &flag);
+ if (ret < 0)
+ return ret;
- pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
- ">>>>>\n");
+ da->emulate_rsoc = flag;
+ pr_debug("dev[%p]: SE Device REPORT_SUPPORTED_OPERATION_CODES_EMULATION flag: %d\n",
+ da->da_dev, flag);
+ return count;
}
-EXPORT_SYMBOL(target_fabric_configfs_deregister);
-/*##############################################################################
-// Stop functions called by external Target Fabrics Modules
-//############################################################################*/
+static ssize_t submit_type_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_dev_attrib *da = to_attrib(item);
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(page, 0, &val);
+ if (ret < 0)
+ return ret;
-/* Start functions for struct config_item_type target_core_dev_attrib_cit */
+ if (val > TARGET_QUEUE_SUBMIT)
+ return -EINVAL;
-#define DEF_DEV_ATTRIB_SHOW(_name) \
-static ssize_t target_core_dev_show_attr_##_name( \
- struct se_dev_attrib *da, \
- char *page) \
-{ \
- return snprintf(page, PAGE_SIZE, "%u\n", \
- (u32)da->da_dev->dev_attrib._name); \
+ da->submit_type = val;
+ return count;
}
-#define DEF_DEV_ATTRIB_STORE(_name) \
-static ssize_t target_core_dev_store_attr_##_name( \
- struct se_dev_attrib *da, \
- const char *page, \
- size_t count) \
-{ \
- unsigned long val; \
- int ret; \
- \
- ret = strict_strtoul(page, 0, &val); \
- if (ret < 0) { \
- pr_err("strict_strtoul() failed with" \
- " ret: %d\n", ret); \
- return -EINVAL; \
- } \
- ret = se_dev_set_##_name(da->da_dev, (u32)val); \
- \
- return (!ret) ? count : -EINVAL; \
+CONFIGFS_ATTR(, emulate_model_alias);
+CONFIGFS_ATTR(, emulate_dpo);
+CONFIGFS_ATTR(, emulate_fua_write);
+CONFIGFS_ATTR(, emulate_fua_read);
+CONFIGFS_ATTR(, emulate_write_cache);
+CONFIGFS_ATTR(, emulate_ua_intlck_ctrl);
+CONFIGFS_ATTR(, emulate_tas);
+CONFIGFS_ATTR(, emulate_tpu);
+CONFIGFS_ATTR(, emulate_tpws);
+CONFIGFS_ATTR(, emulate_caw);
+CONFIGFS_ATTR(, emulate_3pc);
+CONFIGFS_ATTR(, emulate_pr);
+CONFIGFS_ATTR(, emulate_rsoc);
+CONFIGFS_ATTR(, pi_prot_type);
+CONFIGFS_ATTR_RO(, hw_pi_prot_type);
+CONFIGFS_ATTR(, pi_prot_format);
+CONFIGFS_ATTR(, pi_prot_verify);
+CONFIGFS_ATTR(, enforce_pr_isids);
+CONFIGFS_ATTR(, is_nonrot);
+CONFIGFS_ATTR(, emulate_rest_reord);
+CONFIGFS_ATTR(, force_pr_aptpl);
+CONFIGFS_ATTR_RO(, hw_block_size);
+CONFIGFS_ATTR(, block_size);
+CONFIGFS_ATTR_RO(, hw_max_sectors);
+CONFIGFS_ATTR(, optimal_sectors);
+CONFIGFS_ATTR_RO(, hw_queue_depth);
+CONFIGFS_ATTR(, queue_depth);
+CONFIGFS_ATTR(, max_unmap_lba_count);
+CONFIGFS_ATTR(, max_unmap_block_desc_count);
+CONFIGFS_ATTR(, unmap_granularity);
+CONFIGFS_ATTR(, unmap_granularity_alignment);
+CONFIGFS_ATTR(, unmap_zeroes_data);
+CONFIGFS_ATTR(, max_write_same_len);
+CONFIGFS_ATTR(, alua_support);
+CONFIGFS_ATTR(, pgr_support);
+CONFIGFS_ATTR(, submit_type);
+CONFIGFS_ATTR_RO(, atomic_max_len);
+CONFIGFS_ATTR_RO(, atomic_alignment);
+CONFIGFS_ATTR_RO(, atomic_granularity);
+CONFIGFS_ATTR_RO(, atomic_max_with_boundary);
+CONFIGFS_ATTR_RO(, atomic_max_boundary);
+
+/*
+ * dev_attrib attributes for devices using the target core SBC/SPC
+ * interpreter. Any backend using spc_parse_cdb should be using
+ * these.
+ */
+struct configfs_attribute *sbc_attrib_attrs[] = {
+ &attr_emulate_model_alias,
+ &attr_emulate_dpo,
+ &attr_emulate_fua_write,
+ &attr_emulate_fua_read,
+ &attr_emulate_write_cache,
+ &attr_emulate_ua_intlck_ctrl,
+ &attr_emulate_tas,
+ &attr_emulate_tpu,
+ &attr_emulate_tpws,
+ &attr_emulate_caw,
+ &attr_emulate_3pc,
+ &attr_emulate_pr,
+ &attr_pi_prot_type,
+ &attr_hw_pi_prot_type,
+ &attr_pi_prot_format,
+ &attr_pi_prot_verify,
+ &attr_enforce_pr_isids,
+ &attr_is_nonrot,
+ &attr_emulate_rest_reord,
+ &attr_force_pr_aptpl,
+ &attr_hw_block_size,
+ &attr_block_size,
+ &attr_hw_max_sectors,
+ &attr_optimal_sectors,
+ &attr_hw_queue_depth,
+ &attr_queue_depth,
+ &attr_max_unmap_lba_count,
+ &attr_max_unmap_block_desc_count,
+ &attr_unmap_granularity,
+ &attr_unmap_granularity_alignment,
+ &attr_unmap_zeroes_data,
+ &attr_max_write_same_len,
+ &attr_alua_support,
+ &attr_pgr_support,
+ &attr_emulate_rsoc,
+ &attr_submit_type,
+ &attr_atomic_alignment,
+ &attr_atomic_max_len,
+ &attr_atomic_granularity,
+ &attr_atomic_max_with_boundary,
+ &attr_atomic_max_boundary,
+ NULL,
+};
+EXPORT_SYMBOL(sbc_attrib_attrs);
+
+/*
+ * Minimal dev_attrib attributes for devices passing through CDBs.
+ * In this case we only provide a few read-only attributes for
+ * backwards compatibility.
+ */
+struct configfs_attribute *passthrough_attrib_attrs[] = {
+ &attr_hw_pi_prot_type,
+ &attr_hw_block_size,
+ &attr_hw_max_sectors,
+ &attr_hw_queue_depth,
+ &attr_emulate_pr,
+ &attr_alua_support,
+ &attr_pgr_support,
+ &attr_submit_type,
+ NULL,
+};
+EXPORT_SYMBOL(passthrough_attrib_attrs);
+
+/*
+ * pr related dev_attrib attributes for devices passing through CDBs,
+ * but allowing in core pr emulation.
+ */
+struct configfs_attribute *passthrough_pr_attrib_attrs[] = {
+ &attr_enforce_pr_isids,
+ &attr_force_pr_aptpl,
+ NULL,
+};
+EXPORT_SYMBOL(passthrough_pr_attrib_attrs);
+
+TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL);
+TB_CIT_SETUP_DRV(dev_action, NULL, NULL);
+
+/* End functions for struct config_item_type tb_dev_attrib_cit */
+
+/* Start functions for struct config_item_type tb_dev_wwn_cit */
+
+static struct t10_wwn *to_t10_wwn(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct t10_wwn, t10_wwn_group);
}
-#define DEF_DEV_ATTRIB(_name) \
-DEF_DEV_ATTRIB_SHOW(_name); \
-DEF_DEV_ATTRIB_STORE(_name);
+static ssize_t target_check_inquiry_data(char *buf)
+{
+ size_t len;
+ int i;
-#define DEF_DEV_ATTRIB_RO(_name) \
-DEF_DEV_ATTRIB_SHOW(_name);
+ len = strlen(buf);
-CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
-#define SE_DEV_ATTR(_name, _mode) \
-static struct target_core_dev_attrib_attribute \
- target_core_dev_attrib_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- target_core_dev_show_attr_##_name, \
- target_core_dev_store_attr_##_name);
+ /*
+ * SPC 4.3.1:
+ * ASCII data fields shall contain only ASCII printable characters
+ * (i.e., code values 20h to 7Eh) and may be terminated with one or
+ * more ASCII null (00h) characters.
+ */
+ for (i = 0; i < len; i++) {
+ if (buf[i] < 0x20 || buf[i] > 0x7E) {
+ pr_err("Emulated T10 Inquiry Data contains non-ASCII-printable characters\n");
+ return -EINVAL;
+ }
+ }
-#define SE_DEV_ATTR_RO(_name); \
-static struct target_core_dev_attrib_attribute \
- target_core_dev_attrib_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- target_core_dev_show_attr_##_name);
+ return len;
+}
-DEF_DEV_ATTRIB(emulate_model_alias);
-SE_DEV_ATTR(emulate_model_alias, S_IRUGO | S_IWUSR);
+/*
+ * STANDARD and VPD page 0x83 T10 Vendor Identification
+ */
+static ssize_t target_wwn_vendor_id_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%s\n", &to_t10_wwn(item)->vendor[0]);
+}
-DEF_DEV_ATTRIB(emulate_dpo);
-SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
+static ssize_t target_wwn_vendor_id_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct t10_wwn *t10_wwn = to_t10_wwn(item);
+ struct se_device *dev = t10_wwn->t10_dev;
+ /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
+ unsigned char buf[INQUIRY_VENDOR_LEN + 2];
+ char *stripped = NULL;
+ ssize_t len;
+ ssize_t ret;
+
+ len = strscpy(buf, page);
+ if (len > 0) {
+ /* Strip any newline added from userspace. */
+ stripped = strstrip(buf);
+ len = strlen(stripped);
+ }
+ if (len < 0 || len > INQUIRY_VENDOR_LEN) {
+ pr_err("Emulated T10 Vendor Identification exceeds"
+ " INQUIRY_VENDOR_LEN: " __stringify(INQUIRY_VENDOR_LEN)
+ "\n");
+ return -EOVERFLOW;
+ }
-DEF_DEV_ATTRIB(emulate_fua_write);
-SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR);
+ ret = target_check_inquiry_data(stripped);
-DEF_DEV_ATTRIB(emulate_fua_read);
-SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR);
+ if (ret < 0)
+ return ret;
-DEF_DEV_ATTRIB(emulate_write_cache);
-SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR);
+ /*
+ * Check to see if any active exports exist. If they do exist, fail
+ * here as changing this information on the fly (underneath the
+ * initiator side OS dependent multipath code) could cause negative
+ * effects.
+ */
+ if (dev->export_count) {
+ pr_err("Unable to set T10 Vendor Identification while"
+ " active %d exports exist\n", dev->export_count);
+ return -EINVAL;
+ }
-DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl);
-SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
+ BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1);
+ strscpy(dev->t10_wwn.vendor, stripped);
-DEF_DEV_ATTRIB(emulate_tas);
-SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR);
+ pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:"
+ " %s\n", dev->t10_wwn.vendor);
-DEF_DEV_ATTRIB(emulate_tpu);
-SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
+ return count;
+}
-DEF_DEV_ATTRIB(emulate_tpws);
-SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
+static ssize_t target_wwn_product_id_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%s\n", &to_t10_wwn(item)->model[0]);
+}
-DEF_DEV_ATTRIB(enforce_pr_isids);
-SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
+static ssize_t target_wwn_product_id_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct t10_wwn *t10_wwn = to_t10_wwn(item);
+ struct se_device *dev = t10_wwn->t10_dev;
+ /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
+ unsigned char buf[INQUIRY_MODEL_LEN + 2];
+ char *stripped = NULL;
+ ssize_t len;
+ ssize_t ret;
+
+ len = strscpy(buf, page);
+ if (len > 0) {
+ /* Strip any newline added from userspace. */
+ stripped = strstrip(buf);
+ len = strlen(stripped);
+ }
+ if (len < 0 || len > INQUIRY_MODEL_LEN) {
+ pr_err("Emulated T10 Vendor exceeds INQUIRY_MODEL_LEN: "
+ __stringify(INQUIRY_MODEL_LEN)
+ "\n");
+ return -EOVERFLOW;
+ }
-DEF_DEV_ATTRIB(is_nonrot);
-SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR);
+ ret = target_check_inquiry_data(stripped);
-DEF_DEV_ATTRIB(emulate_rest_reord);
-SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR);
+ if (ret < 0)
+ return ret;
-DEF_DEV_ATTRIB_RO(hw_block_size);
-SE_DEV_ATTR_RO(hw_block_size);
+ /*
+ * Check to see if any active exports exist. If they do exist, fail
+ * here as changing this information on the fly (underneath the
+ * initiator side OS dependent multipath code) could cause negative
+ * effects.
+ */
+ if (dev->export_count) {
+ pr_err("Unable to set T10 Model while active %d exports exist\n",
+ dev->export_count);
+ return -EINVAL;
+ }
-DEF_DEV_ATTRIB(block_size);
-SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
+ BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1);
+ strscpy(dev->t10_wwn.model, stripped);
-DEF_DEV_ATTRIB_RO(hw_max_sectors);
-SE_DEV_ATTR_RO(hw_max_sectors);
+ pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n",
+ dev->t10_wwn.model);
-DEF_DEV_ATTRIB(fabric_max_sectors);
-SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR);
+ return count;
+}
-DEF_DEV_ATTRIB(optimal_sectors);
-SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
+static ssize_t target_wwn_revision_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%s\n", &to_t10_wwn(item)->revision[0]);
+}
-DEF_DEV_ATTRIB_RO(hw_queue_depth);
-SE_DEV_ATTR_RO(hw_queue_depth);
+static ssize_t target_wwn_revision_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct t10_wwn *t10_wwn = to_t10_wwn(item);
+ struct se_device *dev = t10_wwn->t10_dev;
+ /* +2 to allow for a trailing (stripped) '\n' and null-terminator */
+ unsigned char buf[INQUIRY_REVISION_LEN + 2];
+ char *stripped = NULL;
+ ssize_t len;
+ ssize_t ret;
+
+ len = strscpy(buf, page);
+ if (len > 0) {
+ /* Strip any newline added from userspace. */
+ stripped = strstrip(buf);
+ len = strlen(stripped);
+ }
+ if (len < 0 || len > INQUIRY_REVISION_LEN) {
+ pr_err("Emulated T10 Revision exceeds INQUIRY_REVISION_LEN: "
+ __stringify(INQUIRY_REVISION_LEN)
+ "\n");
+ return -EOVERFLOW;
+ }
-DEF_DEV_ATTRIB(queue_depth);
-SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
+ ret = target_check_inquiry_data(stripped);
-DEF_DEV_ATTRIB(max_unmap_lba_count);
-SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
+ if (ret < 0)
+ return ret;
-DEF_DEV_ATTRIB(max_unmap_block_desc_count);
-SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
+ /*
+ * Check to see if any active exports exist. If they do exist, fail
+ * here as changing this information on the fly (underneath the
+ * initiator side OS dependent multipath code) could cause negative
+ * effects.
+ */
+ if (dev->export_count) {
+ pr_err("Unable to set T10 Revision while active %d exports exist\n",
+ dev->export_count);
+ return -EINVAL;
+ }
-DEF_DEV_ATTRIB(unmap_granularity);
-SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
+ BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1);
+ strscpy(dev->t10_wwn.revision, stripped);
-DEF_DEV_ATTRIB(unmap_granularity_alignment);
-SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
+ pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n",
+ dev->t10_wwn.revision);
-DEF_DEV_ATTRIB(max_write_same_len);
-SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR);
+ return count;
+}
-CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
+static ssize_t
+target_wwn_company_id_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%#08x\n",
+ to_t10_wwn(item)->company_id);
+}
-static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
- &target_core_dev_attrib_emulate_model_alias.attr,
- &target_core_dev_attrib_emulate_dpo.attr,
- &target_core_dev_attrib_emulate_fua_write.attr,
- &target_core_dev_attrib_emulate_fua_read.attr,
- &target_core_dev_attrib_emulate_write_cache.attr,
- &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
- &target_core_dev_attrib_emulate_tas.attr,
- &target_core_dev_attrib_emulate_tpu.attr,
- &target_core_dev_attrib_emulate_tpws.attr,
- &target_core_dev_attrib_enforce_pr_isids.attr,
- &target_core_dev_attrib_is_nonrot.attr,
- &target_core_dev_attrib_emulate_rest_reord.attr,
- &target_core_dev_attrib_hw_block_size.attr,
- &target_core_dev_attrib_block_size.attr,
- &target_core_dev_attrib_hw_max_sectors.attr,
- &target_core_dev_attrib_fabric_max_sectors.attr,
- &target_core_dev_attrib_optimal_sectors.attr,
- &target_core_dev_attrib_hw_queue_depth.attr,
- &target_core_dev_attrib_queue_depth.attr,
- &target_core_dev_attrib_max_unmap_lba_count.attr,
- &target_core_dev_attrib_max_unmap_block_desc_count.attr,
- &target_core_dev_attrib_unmap_granularity.attr,
- &target_core_dev_attrib_unmap_granularity_alignment.attr,
- &target_core_dev_attrib_max_write_same_len.attr,
- NULL,
-};
+static ssize_t
+target_wwn_company_id_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct t10_wwn *t10_wwn = to_t10_wwn(item);
+ struct se_device *dev = t10_wwn->t10_dev;
+ u32 val;
+ int ret;
-static struct configfs_item_operations target_core_dev_attrib_ops = {
- .show_attribute = target_core_dev_attrib_attr_show,
- .store_attribute = target_core_dev_attrib_attr_store,
-};
+ /*
+ * The IEEE COMPANY_ID field should contain a 24-bit canonical
+ * form OUI assigned by the IEEE.
+ */
+ ret = kstrtou32(page, 0, &val);
+ if (ret < 0)
+ return ret;
-static struct config_item_type target_core_dev_attrib_cit = {
- .ct_item_ops = &target_core_dev_attrib_ops,
- .ct_attrs = target_core_dev_attrib_attrs,
- .ct_owner = THIS_MODULE,
-};
+ if (val >= 0x1000000)
+ return -EOVERFLOW;
-/* End functions for struct config_item_type target_core_dev_attrib_cit */
+ /*
+ * Check to see if any active exports exist. If they do exist, fail
+ * here as changing this information on the fly (underneath the
+ * initiator side OS dependent multipath code) could cause negative
+ * effects.
+ */
+ if (dev->export_count) {
+ pr_err("Unable to set Company ID while %u exports exist\n",
+ dev->export_count);
+ return -EINVAL;
+ }
-/* Start functions for struct config_item_type target_core_dev_wwn_cit */
+ t10_wwn->company_id = val;
-CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
-#define SE_DEV_WWN_ATTR(_name, _mode) \
-static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- target_core_dev_wwn_show_attr_##_name, \
- target_core_dev_wwn_store_attr_##_name);
+ pr_debug("Target_Core_ConfigFS: Set IEEE Company ID: %#08x\n",
+ t10_wwn->company_id);
-#define SE_DEV_WWN_ATTR_RO(_name); \
-do { \
- static struct target_core_dev_wwn_attribute \
- target_core_dev_wwn_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- target_core_dev_wwn_show_attr_##_name); \
-} while (0);
+ return count;
+}
/*
* VPD page 0x80 Unit serial
*/
-static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
- struct t10_wwn *t10_wwn,
- char *page)
+static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item,
+ char *page)
{
return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
- &t10_wwn->unit_serial[0]);
+ &to_t10_wwn(item)->unit_serial[0]);
}
-static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
- struct t10_wwn *t10_wwn,
- const char *page,
- size_t count)
+static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item,
+ const char *page, size_t count)
{
+ struct t10_wwn *t10_wwn = to_t10_wwn(item);
struct se_device *dev = t10_wwn->t10_dev;
- unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
+ unsigned char buf[INQUIRY_VPD_SERIAL_LEN] = { };
/*
* If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
@@ -800,7 +1702,6 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
* Also, strip any newline added from the userspace
* echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
*/
- memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
"%s", strstrip(buf));
@@ -812,21 +1713,17 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
return count;
}
-SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR);
-
/*
* VPD page 0x83 Protocol Identifier
*/
-static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
- struct t10_wwn *t10_wwn,
- char *page)
+static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item,
+ char *page)
{
+ struct t10_wwn *t10_wwn = to_t10_wwn(item);
struct t10_vpd *vpd;
- unsigned char buf[VPD_TMP_BUF_SIZE];
+ unsigned char buf[VPD_TMP_BUF_SIZE] = { };
ssize_t len = 0;
- memset(buf, 0, VPD_TMP_BUF_SIZE);
-
spin_lock(&t10_wwn->t10_vpd_lock);
list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
if (!vpd->protocol_identifier_set)
@@ -844,25 +1741,15 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
return len;
}
-static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier(
- struct t10_wwn *t10_wwn,
- const char *page,
- size_t count)
-{
- return -ENOSYS;
-}
-
-SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR);
-
/*
* Generic wrapper for dumping VPD identifiers by association.
*/
#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \
-static ssize_t target_core_dev_wwn_show_attr_##_name( \
- struct t10_wwn *t10_wwn, \
- char *page) \
+static ssize_t target_wwn_##_name##_show(struct config_item *item, \
+ char *page) \
{ \
- struct t10_vpd *vpd; \
+ struct t10_wwn *t10_wwn = to_t10_wwn(item); \
+ struct t10_vpd *vpd; \
unsigned char buf[VPD_TMP_BUF_SIZE]; \
ssize_t len = 0; \
\
@@ -894,97 +1781,54 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
return len; \
}
-/*
- * VPD page 0x83 Association: Logical Unit
- */
+/* VPD page 0x83 Association: Logical Unit */
DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
-
-static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit(
- struct t10_wwn *t10_wwn,
- const char *page,
- size_t count)
-{
- return -ENOSYS;
-}
-
-SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR);
-
-/*
- * VPD page 0x83 Association: Target Port
- */
+/* VPD page 0x83 Association: Target Port */
DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
-
-static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port(
- struct t10_wwn *t10_wwn,
- const char *page,
- size_t count)
-{
- return -ENOSYS;
-}
-
-SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR);
-
-/*
- * VPD page 0x83 Association: SCSI Target Device
- */
+/* VPD page 0x83 Association: SCSI Target Device */
DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
-static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device(
- struct t10_wwn *t10_wwn,
- const char *page,
- size_t count)
-{
- return -ENOSYS;
-}
-
-SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR);
-
-CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group);
+CONFIGFS_ATTR(target_wwn_, vendor_id);
+CONFIGFS_ATTR(target_wwn_, product_id);
+CONFIGFS_ATTR(target_wwn_, revision);
+CONFIGFS_ATTR(target_wwn_, company_id);
+CONFIGFS_ATTR(target_wwn_, vpd_unit_serial);
+CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
+CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
+CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port);
+CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device);
static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
- &target_core_dev_wwn_vpd_unit_serial.attr,
- &target_core_dev_wwn_vpd_protocol_identifier.attr,
- &target_core_dev_wwn_vpd_assoc_logical_unit.attr,
- &target_core_dev_wwn_vpd_assoc_target_port.attr,
- &target_core_dev_wwn_vpd_assoc_scsi_target_device.attr,
+ &target_wwn_attr_vendor_id,
+ &target_wwn_attr_product_id,
+ &target_wwn_attr_revision,
+ &target_wwn_attr_company_id,
+ &target_wwn_attr_vpd_unit_serial,
+ &target_wwn_attr_vpd_protocol_identifier,
+ &target_wwn_attr_vpd_assoc_logical_unit,
+ &target_wwn_attr_vpd_assoc_target_port,
+ &target_wwn_attr_vpd_assoc_scsi_target_device,
NULL,
};
-static struct configfs_item_operations target_core_dev_wwn_ops = {
- .show_attribute = target_core_dev_wwn_attr_show,
- .store_attribute = target_core_dev_wwn_attr_store,
-};
-
-static struct config_item_type target_core_dev_wwn_cit = {
- .ct_item_ops = &target_core_dev_wwn_ops,
- .ct_attrs = target_core_dev_wwn_attrs,
- .ct_owner = THIS_MODULE,
-};
+TB_CIT_SETUP(dev_wwn, NULL, NULL, target_core_dev_wwn_attrs);
-/* End functions for struct config_item_type target_core_dev_wwn_cit */
+/* End functions for struct config_item_type tb_dev_wwn_cit */
-/* Start functions for struct config_item_type target_core_dev_pr_cit */
+/* Start functions for struct config_item_type tb_dev_pr_cit */
-CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device);
-#define SE_DEV_PR_ATTR(_name, _mode) \
-static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- target_core_dev_pr_show_attr_##_name, \
- target_core_dev_pr_store_attr_##_name);
-
-#define SE_DEV_PR_ATTR_RO(_name); \
-static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- target_core_dev_pr_show_attr_##_name);
+static struct se_device *pr_to_dev(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct se_device,
+ dev_pr_group);
+}
static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
char *page)
{
struct se_node_acl *se_nacl;
struct t10_pr_registration *pr_reg;
- char i_buf[PR_REG_ISID_ID_LEN];
-
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
pr_reg = dev->dev_pr_res_holder;
if (!pr_reg)
@@ -994,21 +1838,22 @@ static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
- se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_nacl->se_tpg->se_tpg_tfo->fabric_name,
se_nacl->initiatorname, i_buf);
}
static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
char *page)
{
+ struct se_session *sess = dev->reservation_holder;
struct se_node_acl *se_nacl;
ssize_t len;
- se_nacl = dev->dev_reserved_node_acl;
- if (se_nacl) {
+ if (sess) {
+ se_nacl = sess->se_node_acl;
len = sprintf(page,
"SPC-2 Reservation: %s Initiator: %s\n",
- se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_nacl->se_tpg->se_tpg_tfo->fabric_name,
se_nacl->initiatorname);
} else {
len = sprintf(page, "No SPC-2 Reservation holder\n");
@@ -1016,12 +1861,15 @@ static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
return len;
}
-static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev,
- char *page)
+static ssize_t target_pr_res_holder_show(struct config_item *item, char *page)
{
+ struct se_device *dev = pr_to_dev(item);
int ret;
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ if (!dev->dev_attrib.emulate_pr)
+ return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
+
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return sprintf(page, "Passthrough\n");
spin_lock(&dev->dev_reservation_lock);
@@ -1033,11 +1881,10 @@ static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev,
return ret;
}
-SE_DEV_PR_ATTR_RO(res_holder);
-
-static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
- struct se_device *dev, char *page)
+static ssize_t target_pr_res_pr_all_tgt_pts_show(struct config_item *item,
+ char *page)
{
+ struct se_device *dev = pr_to_dev(item);
ssize_t len = 0;
spin_lock(&dev->dev_reservation_lock);
@@ -1055,27 +1902,21 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
return len;
}
-SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
-
-static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
- struct se_device *dev, char *page)
+static ssize_t target_pr_res_pr_generation_show(struct config_item *item,
+ char *page)
{
- return sprintf(page, "0x%08x\n", dev->t10_pr.pr_generation);
+ return sprintf(page, "0x%08x\n", pr_to_dev(item)->t10_pr.pr_generation);
}
-SE_DEV_PR_ATTR_RO(res_pr_generation);
-/*
- * res_pr_holder_tg_port
- */
-static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
- struct se_device *dev, char *page)
+static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item,
+ char *page)
{
+ struct se_device *dev = pr_to_dev(item);
struct se_node_acl *se_nacl;
- struct se_lun *lun;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg;
- struct target_core_fabric_ops *tfo;
+ const struct target_core_fabric_ops *tfo;
ssize_t len = 0;
spin_lock(&dev->dev_reservation_lock);
@@ -1087,29 +1928,28 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
se_nacl = pr_reg->pr_reg_nacl;
se_tpg = se_nacl->se_tpg;
- lun = pr_reg->pr_reg_tg_pt_lun;
tfo = se_tpg->se_tpg_tfo;
len += sprintf(page+len, "SPC-3 Reservation: %s"
- " Target Node Endpoint: %s\n", tfo->get_fabric_name(),
+ " Target Node Endpoint: %s\n", tfo->fabric_name,
tfo->tpg_get_wwn(se_tpg));
len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
" Identifier Tag: %hu %s Portal Group Tag: %hu"
- " %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
- tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
- tfo->get_fabric_name(), lun->unpacked_lun);
+ " %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi,
+ tfo->fabric_name, tfo->tpg_get_tag(se_tpg),
+ tfo->fabric_name, pr_reg->pr_aptpl_target_lun);
out_unlock:
spin_unlock(&dev->dev_reservation_lock);
return len;
}
-SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
-static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
- struct se_device *dev, char *page)
+static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item,
+ char *page)
{
- struct target_core_fabric_ops *tfo;
+ struct se_device *dev = pr_to_dev(item);
+ const struct target_core_fabric_ops *tfo;
struct t10_pr_registration *pr_reg;
unsigned char buf[384];
char i_buf[PR_REG_ISID_ID_LEN];
@@ -1128,7 +1968,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
core_pr_dump_initiator_port(pr_reg, i_buf,
PR_REG_ISID_ID_LEN);
sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
- tfo->get_fabric_name(),
+ tfo->fabric_name,
pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key,
pr_reg->pr_res_generation);
@@ -1146,11 +1986,9 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
return len;
}
-SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
-
-static ssize_t target_core_dev_pr_show_attr_res_pr_type(
- struct se_device *dev, char *page)
+static ssize_t target_pr_res_pr_type_show(struct config_item *item, char *page)
{
+ struct se_device *dev = pr_to_dev(item);
struct t10_pr_registration *pr_reg;
ssize_t len = 0;
@@ -1167,40 +2005,40 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_type(
return len;
}
-SE_DEV_PR_ATTR_RO(res_pr_type);
-
-static ssize_t target_core_dev_pr_show_attr_res_type(
- struct se_device *dev, char *page)
+static ssize_t target_pr_res_type_show(struct config_item *item, char *page)
{
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ struct se_device *dev = pr_to_dev(item);
+
+ if (!dev->dev_attrib.emulate_pr)
+ return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return sprintf(page, "SPC_PASSTHROUGH\n");
- else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return sprintf(page, "SPC2_RESERVATIONS\n");
- else
- return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
-}
-SE_DEV_PR_ATTR_RO(res_type);
+ return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
+}
-static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
- struct se_device *dev, char *page)
+static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
+ char *page)
{
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ struct se_device *dev = pr_to_dev(item);
+
+ if (!dev->dev_attrib.emulate_pr ||
+ (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return 0;
return sprintf(page, "APTPL Bit Status: %s\n",
(dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
}
-SE_DEV_PR_ATTR_RO(res_aptpl_active);
-
-/*
- * res_aptpl_metadata
- */
-static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
- struct se_device *dev, char *page)
+static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item,
+ char *page)
{
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ struct se_device *dev = pr_to_dev(item);
+
+ if (!dev->dev_attrib.emulate_pr ||
+ (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
return 0;
return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1222,35 +2060,35 @@ static match_table_t tokens = {
{Opt_res_type, "res_type=%d"},
{Opt_res_scope, "res_scope=%d"},
{Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
- {Opt_mapped_lun, "mapped_lun=%d"},
+ {Opt_mapped_lun, "mapped_lun=%u"},
{Opt_target_fabric, "target_fabric=%s"},
{Opt_target_node, "target_node=%s"},
{Opt_tpgt, "tpgt=%d"},
{Opt_port_rtpi, "port_rtpi=%d"},
- {Opt_target_lun, "target_lun=%d"},
+ {Opt_target_lun, "target_lun=%u"},
{Opt_err, NULL}
};
-static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
- struct se_device *dev,
- const char *page,
- size_t count)
+static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
+ const char *page, size_t count)
{
+ struct se_device *dev = pr_to_dev(item);
unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
unsigned char *t_fabric = NULL, *t_port = NULL;
- char *orig, *ptr, *arg_p, *opts;
+ char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
unsigned long long tmp_ll;
u64 sa_res_key = 0;
- u32 mapped_lun = 0, target_lun = 0;
+ u64 mapped_lun = 0, target_lun = 0;
int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
- u16 port_rpti = 0, tpgt = 0;
- u8 type = 0, scope;
+ u16 tpgt = 0;
+ u8 type = 0;
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
- return 0;
+ if (!dev->dev_attrib.emulate_pr ||
+ (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
+ return count;
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
- return 0;
+ return count;
if (dev->export_count) {
pr_debug("Unable to process APTPL metadata while"
@@ -1270,14 +2108,14 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_initiator_fabric:
- i_fabric = match_strdup(&args[0]);
+ i_fabric = match_strdup(args);
if (!i_fabric) {
ret = -ENOMEM;
goto out;
}
break;
case Opt_initiator_node:
- i_port = match_strdup(&args[0]);
+ i_port = match_strdup(args);
if (!i_port) {
ret = -ENOMEM;
goto out;
@@ -1291,7 +2129,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
}
break;
case Opt_initiator_sid:
- isid = match_strdup(&args[0]);
+ isid = match_strdup(args);
if (!isid) {
ret = -ENOMEM;
goto out;
@@ -1305,15 +2143,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
}
break;
case Opt_sa_res_key:
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- goto out;
- }
- ret = strict_strtoull(arg_p, 0, &tmp_ll);
+ ret = match_u64(args, &tmp_ll);
if (ret < 0) {
- pr_err("strict_strtoull() failed for"
- " sa_res_key=\n");
+ pr_err("kstrtoull() failed for sa_res_key=\n");
goto out;
}
sa_res_key = (u64)tmp_ll;
@@ -1322,37 +2154,46 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
* PR APTPL Metadata for Reservation
*/
case Opt_res_holder:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
res_holder = arg;
break;
case Opt_res_type:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
type = (u8)arg;
break;
case Opt_res_scope:
- match_int(args, &arg);
- scope = (u8)arg;
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
break;
case Opt_res_all_tg_pt:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
all_tg_pt = (int)arg;
break;
case Opt_mapped_lun:
- match_int(args, &arg);
- mapped_lun = (u32)arg;
+ ret = match_u64(args, &tmp_ll);
+ if (ret)
+ goto out;
+ mapped_lun = (u64)tmp_ll;
break;
/*
* PR APTPL Metadata for Target Port
*/
case Opt_target_fabric:
- t_fabric = match_strdup(&args[0]);
+ t_fabric = match_strdup(args);
if (!t_fabric) {
ret = -ENOMEM;
goto out;
}
break;
case Opt_target_node:
- t_port = match_strdup(&args[0]);
+ t_port = match_strdup(args);
if (!t_port) {
ret = -ENOMEM;
goto out;
@@ -1366,16 +2207,21 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
}
break;
case Opt_tpgt:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
tpgt = (u16)arg;
break;
case Opt_port_rtpi:
- match_int(args, &arg);
- port_rpti = (u16)arg;
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
break;
case Opt_target_lun:
- match_int(args, &arg);
- target_lun = (u32)arg;
+ ret = match_u64(args, &tmp_ll);
+ if (ret)
+ goto out;
+ target_lun = (u64)tmp_ll;
break;
default:
break;
@@ -1408,81 +2254,65 @@ out:
return (ret == 0) ? count : ret;
}
-SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
-CONFIGFS_EATTR_OPS(target_core_dev_pr, se_device, dev_pr_group);
+CONFIGFS_ATTR_RO(target_pr_, res_holder);
+CONFIGFS_ATTR_RO(target_pr_, res_pr_all_tgt_pts);
+CONFIGFS_ATTR_RO(target_pr_, res_pr_generation);
+CONFIGFS_ATTR_RO(target_pr_, res_pr_holder_tg_port);
+CONFIGFS_ATTR_RO(target_pr_, res_pr_registered_i_pts);
+CONFIGFS_ATTR_RO(target_pr_, res_pr_type);
+CONFIGFS_ATTR_RO(target_pr_, res_type);
+CONFIGFS_ATTR_RO(target_pr_, res_aptpl_active);
+CONFIGFS_ATTR(target_pr_, res_aptpl_metadata);
static struct configfs_attribute *target_core_dev_pr_attrs[] = {
- &target_core_dev_pr_res_holder.attr,
- &target_core_dev_pr_res_pr_all_tgt_pts.attr,
- &target_core_dev_pr_res_pr_generation.attr,
- &target_core_dev_pr_res_pr_holder_tg_port.attr,
- &target_core_dev_pr_res_pr_registered_i_pts.attr,
- &target_core_dev_pr_res_pr_type.attr,
- &target_core_dev_pr_res_type.attr,
- &target_core_dev_pr_res_aptpl_active.attr,
- &target_core_dev_pr_res_aptpl_metadata.attr,
+ &target_pr_attr_res_holder,
+ &target_pr_attr_res_pr_all_tgt_pts,
+ &target_pr_attr_res_pr_generation,
+ &target_pr_attr_res_pr_holder_tg_port,
+ &target_pr_attr_res_pr_registered_i_pts,
+ &target_pr_attr_res_pr_type,
+ &target_pr_attr_res_type,
+ &target_pr_attr_res_aptpl_active,
+ &target_pr_attr_res_aptpl_metadata,
NULL,
};
-static struct configfs_item_operations target_core_dev_pr_ops = {
- .show_attribute = target_core_dev_pr_attr_show,
- .store_attribute = target_core_dev_pr_attr_store,
-};
+TB_CIT_SETUP(dev_pr, NULL, NULL, target_core_dev_pr_attrs);
-static struct config_item_type target_core_dev_pr_cit = {
- .ct_item_ops = &target_core_dev_pr_ops,
- .ct_attrs = target_core_dev_pr_attrs,
- .ct_owner = THIS_MODULE,
-};
+/* End functions for struct config_item_type tb_dev_pr_cit */
-/* End functions for struct config_item_type target_core_dev_pr_cit */
+/* Start functions for struct config_item_type tb_dev_cit */
-/* Start functions for struct config_item_type target_core_dev_cit */
+static inline struct se_device *to_device(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct se_device, dev_group);
+}
-static ssize_t target_core_show_dev_info(void *p, char *page)
+static ssize_t target_dev_info_show(struct config_item *item, char *page)
{
- struct se_device *dev = p;
- struct se_subsystem_api *t = dev->transport;
+ struct se_device *dev = to_device(item);
int bl = 0;
ssize_t read_bytes = 0;
transport_dump_dev_state(dev, page, &bl);
read_bytes += bl;
- read_bytes += t->show_configfs_dev_params(dev, page+read_bytes);
+ read_bytes += dev->transport->show_configfs_dev_params(dev,
+ page+read_bytes);
return read_bytes;
}
-static struct target_core_configfs_attribute target_core_attr_dev_info = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "info",
- .ca_mode = S_IRUGO },
- .show = target_core_show_dev_info,
- .store = NULL,
-};
-
-static ssize_t target_core_store_dev_control(
- void *p,
- const char *page,
- size_t count)
+static ssize_t target_dev_control_store(struct config_item *item,
+ const char *page, size_t count)
{
- struct se_device *dev = p;
- struct se_subsystem_api *t = dev->transport;
+ struct se_device *dev = to_device(item);
- return t->set_configfs_dev_params(dev, page, count);
+ return dev->transport->set_configfs_dev_params(dev, page, count);
}
-static struct target_core_configfs_attribute target_core_attr_dev_control = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "control",
- .ca_mode = S_IWUSR },
- .show = NULL,
- .store = target_core_store_dev_control,
-};
-
-static ssize_t target_core_show_dev_alias(void *p, char *page)
+static ssize_t target_dev_alias_show(struct config_item *item, char *page)
{
- struct se_device *dev = p;
+ struct se_device *dev = to_device(item);
if (!(dev->dev_flags & DF_USING_ALIAS))
return 0;
@@ -1490,12 +2320,10 @@ static ssize_t target_core_show_dev_alias(void *p, char *page)
return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
}
-static ssize_t target_core_store_dev_alias(
- void *p,
- const char *page,
- size_t count)
+static ssize_t target_dev_alias_store(struct config_item *item,
+ const char *page, size_t count)
{
- struct se_device *dev = p;
+ struct se_device *dev = to_device(item);
struct se_hba *hba = dev->se_hba;
ssize_t read_bytes;
@@ -1522,17 +2350,9 @@ static ssize_t target_core_store_dev_alias(
return read_bytes;
}
-static struct target_core_configfs_attribute target_core_attr_dev_alias = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "alias",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = target_core_show_dev_alias,
- .store = target_core_store_dev_alias,
-};
-
-static ssize_t target_core_show_dev_udev_path(void *p, char *page)
+static ssize_t target_dev_udev_path_show(struct config_item *item, char *page)
{
- struct se_device *dev = p;
+ struct se_device *dev = to_device(item);
if (!(dev->dev_flags & DF_USING_UDEV_PATH))
return 0;
@@ -1540,12 +2360,10 @@ static ssize_t target_core_show_dev_udev_path(void *p, char *page)
return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
}
-static ssize_t target_core_store_dev_udev_path(
- void *p,
- const char *page,
- size_t count)
+static ssize_t target_dev_udev_path_store(struct config_item *item,
+ const char *page, size_t count)
{
- struct se_device *dev = p;
+ struct se_device *dev = to_device(item);
struct se_hba *hba = dev->se_hba;
ssize_t read_bytes;
@@ -1573,27 +2391,17 @@ static ssize_t target_core_store_dev_udev_path(
return read_bytes;
}
-static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "udev_path",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = target_core_show_dev_udev_path,
- .store = target_core_store_dev_udev_path,
-};
-
-static ssize_t target_core_show_dev_enable(void *p, char *page)
+static ssize_t target_dev_enable_show(struct config_item *item, char *page)
{
- struct se_device *dev = p;
+ struct se_device *dev = to_device(item);
- return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED));
+ return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev));
}
-static ssize_t target_core_store_dev_enable(
- void *p,
- const char *page,
- size_t count)
+static ssize_t target_dev_enable_store(struct config_item *item,
+ const char *page, size_t count)
{
- struct se_device *dev = p;
+ struct se_device *dev = to_device(item);
char *ptr;
int ret;
@@ -1610,17 +2418,9 @@ static ssize_t target_core_store_dev_enable(
return count;
}
-static struct target_core_configfs_attribute target_core_attr_dev_enable = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "enable",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = target_core_show_dev_enable,
- .store = target_core_store_dev_enable,
-};
-
-static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
+static ssize_t target_dev_alua_lu_gp_show(struct config_item *item, char *page)
{
- struct se_device *dev = p;
+ struct se_device *dev = to_device(item);
struct config_item *lu_ci;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem;
@@ -1642,27 +2442,24 @@ static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
return len;
}
-static ssize_t target_core_store_alua_lu_gp(
- void *p,
- const char *page,
- size_t count)
+static ssize_t target_dev_alua_lu_gp_store(struct config_item *item,
+ const char *page, size_t count)
{
- struct se_device *dev = p;
+ struct se_device *dev = to_device(item);
struct se_hba *hba = dev->se_hba;
struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
- unsigned char buf[LU_GROUP_NAME_BUF];
+ unsigned char buf[LU_GROUP_NAME_BUF] = { };
int move = 0;
lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!lu_gp_mem)
- return 0;
+ return count;
if (count > LU_GROUP_NAME_BUF) {
pr_err("ALUA LU Group Alias too large!\n");
return -EINVAL;
}
- memset(buf, 0, LU_GROUP_NAME_BUF);
memcpy(buf, page, count);
/*
* Any ALUA logical unit alias besides "NULL" means we will be
@@ -1724,123 +2521,236 @@ static ssize_t target_core_store_alua_lu_gp(
return count;
}
-static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "alua_lu_gp",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = target_core_show_alua_lu_gp,
- .store = target_core_store_alua_lu_gp,
-};
-
-static struct configfs_attribute *lio_core_dev_attrs[] = {
- &target_core_attr_dev_info.attr,
- &target_core_attr_dev_control.attr,
- &target_core_attr_dev_alias.attr,
- &target_core_attr_dev_udev_path.attr,
- &target_core_attr_dev_enable.attr,
- &target_core_attr_dev_alua_lu_gp.attr,
- NULL,
-};
-
-static void target_core_dev_release(struct config_item *item)
+static ssize_t target_dev_lba_map_show(struct config_item *item, char *page)
{
- struct config_group *dev_cg = to_config_group(item);
- struct se_device *dev =
- container_of(dev_cg, struct se_device, dev_group);
-
- kfree(dev_cg->default_groups);
- target_free_device(dev);
+ struct se_device *dev = to_device(item);
+ struct t10_alua_lba_map *map;
+ struct t10_alua_lba_map_member *mem;
+ char *b = page;
+ int bl = 0;
+ char state;
+
+ spin_lock(&dev->t10_alua.lba_map_lock);
+ if (!list_empty(&dev->t10_alua.lba_map_list))
+ bl += sprintf(b + bl, "%u %u\n",
+ dev->t10_alua.lba_map_segment_size,
+ dev->t10_alua.lba_map_segment_multiplier);
+ list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
+ bl += sprintf(b + bl, "%llu %llu",
+ map->lba_map_first_lba, map->lba_map_last_lba);
+ list_for_each_entry(mem, &map->lba_map_mem_list,
+ lba_map_mem_list) {
+ switch (mem->lba_map_mem_alua_state) {
+ case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
+ state = 'O';
+ break;
+ case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
+ state = 'A';
+ break;
+ case ALUA_ACCESS_STATE_STANDBY:
+ state = 'S';
+ break;
+ case ALUA_ACCESS_STATE_UNAVAILABLE:
+ state = 'U';
+ break;
+ default:
+ state = '.';
+ break;
+ }
+ bl += sprintf(b + bl, " %d:%c",
+ mem->lba_map_mem_alua_pg_id, state);
+ }
+ bl += sprintf(b + bl, "\n");
+ }
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ return bl;
}
-static ssize_t target_core_dev_show(struct config_item *item,
- struct configfs_attribute *attr,
- char *page)
+static ssize_t target_dev_lba_map_store(struct config_item *item,
+ const char *page, size_t count)
{
- struct config_group *dev_cg = to_config_group(item);
- struct se_device *dev =
- container_of(dev_cg, struct se_device, dev_group);
- struct target_core_configfs_attribute *tc_attr = container_of(
- attr, struct target_core_configfs_attribute, attr);
+ struct se_device *dev = to_device(item);
+ struct t10_alua_lba_map *lba_map = NULL;
+ struct list_head lba_list;
+ char *map_entries, *orig, *ptr;
+ char state;
+ int pg_num = -1, pg;
+ int ret = 0, num = 0, pg_id, alua_state;
+ unsigned long start_lba = -1, end_lba = -1;
+ unsigned long segment_size = -1, segment_mult = -1;
- if (!tc_attr->show)
- return -EINVAL;
+ orig = map_entries = kstrdup(page, GFP_KERNEL);
+ if (!map_entries)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&lba_list);
+ while ((ptr = strsep(&map_entries, "\n")) != NULL) {
+ if (!*ptr)
+ continue;
+
+ if (num == 0) {
+ if (sscanf(ptr, "%lu %lu\n",
+ &segment_size, &segment_mult) != 2) {
+ pr_err("Invalid line %d\n", num);
+ ret = -EINVAL;
+ break;
+ }
+ num++;
+ continue;
+ }
+ if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
+ pr_err("Invalid line %d\n", num);
+ ret = -EINVAL;
+ break;
+ }
+ ptr = strchr(ptr, ' ');
+ if (!ptr) {
+ pr_err("Invalid line %d, missing end lba\n", num);
+ ret = -EINVAL;
+ break;
+ }
+ ptr++;
+ ptr = strchr(ptr, ' ');
+ if (!ptr) {
+ pr_err("Invalid line %d, missing state definitions\n",
+ num);
+ ret = -EINVAL;
+ break;
+ }
+ ptr++;
+ lba_map = core_alua_allocate_lba_map(&lba_list,
+ start_lba, end_lba);
+ if (IS_ERR(lba_map)) {
+ ret = PTR_ERR(lba_map);
+ break;
+ }
+ pg = 0;
+ while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
+ switch (state) {
+ case 'O':
+ alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
+ break;
+ case 'A':
+ alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
+ break;
+ case 'S':
+ alua_state = ALUA_ACCESS_STATE_STANDBY;
+ break;
+ case 'U':
+ alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
+ break;
+ default:
+ pr_err("Invalid ALUA state '%c'\n", state);
+ ret = -EINVAL;
+ goto out;
+ }
- return tc_attr->show(dev, page);
+ ret = core_alua_allocate_lba_map_mem(lba_map,
+ pg_id, alua_state);
+ if (ret) {
+ pr_err("Invalid target descriptor %d:%c "
+ "at line %d\n",
+ pg_id, state, num);
+ break;
+ }
+ pg++;
+ ptr = strchr(ptr, ' ');
+ if (ptr)
+ ptr++;
+ else
+ break;
+ }
+ if (pg_num == -1)
+ pg_num = pg;
+ else if (pg != pg_num) {
+ pr_err("Only %d from %d port groups definitions "
+ "at line %d\n", pg, pg_num, num);
+ ret = -EINVAL;
+ break;
+ }
+ num++;
+ }
+out:
+ if (ret) {
+ core_alua_free_lba_map(&lba_list);
+ count = ret;
+ } else
+ core_alua_set_lba_map(dev, &lba_list,
+ segment_size, segment_mult);
+ kfree(orig);
+ return count;
}
-static ssize_t target_core_dev_store(struct config_item *item,
- struct configfs_attribute *attr,
- const char *page, size_t count)
+CONFIGFS_ATTR_RO(target_dev_, info);
+CONFIGFS_ATTR_WO(target_dev_, control);
+CONFIGFS_ATTR(target_dev_, alias);
+CONFIGFS_ATTR(target_dev_, udev_path);
+CONFIGFS_ATTR(target_dev_, enable);
+CONFIGFS_ATTR(target_dev_, alua_lu_gp);
+CONFIGFS_ATTR(target_dev_, lba_map);
+
+static struct configfs_attribute *target_core_dev_attrs[] = {
+ &target_dev_attr_info,
+ &target_dev_attr_control,
+ &target_dev_attr_alias,
+ &target_dev_attr_udev_path,
+ &target_dev_attr_enable,
+ &target_dev_attr_alua_lu_gp,
+ &target_dev_attr_lba_map,
+ NULL,
+};
+
+static void target_core_dev_release(struct config_item *item)
{
struct config_group *dev_cg = to_config_group(item);
struct se_device *dev =
container_of(dev_cg, struct se_device, dev_group);
- struct target_core_configfs_attribute *tc_attr = container_of(
- attr, struct target_core_configfs_attribute, attr);
-
- if (!tc_attr->store)
- return -EINVAL;
- return tc_attr->store(dev, page, count);
+ target_free_device(dev);
}
-static struct configfs_item_operations target_core_dev_item_ops = {
+/*
+ * Used in target_core_fabric_configfs.c to verify valid se_device symlink
+ * within target_fabric_port_link()
+ */
+struct configfs_item_operations target_core_dev_item_ops = {
.release = target_core_dev_release,
- .show_attribute = target_core_dev_show,
- .store_attribute = target_core_dev_store,
};
-static struct config_item_type target_core_dev_cit = {
- .ct_item_ops = &target_core_dev_item_ops,
- .ct_attrs = lio_core_dev_attrs,
- .ct_owner = THIS_MODULE,
-};
+TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
-/* End functions for struct config_item_type target_core_dev_cit */
+/* End functions for struct config_item_type tb_dev_cit */
/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
-CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp);
-#define SE_DEV_ALUA_LU_ATTR(_name, _mode) \
-static struct target_core_alua_lu_gp_attribute \
- target_core_alua_lu_gp_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- target_core_alua_lu_gp_show_attr_##_name, \
- target_core_alua_lu_gp_store_attr_##_name);
-
-#define SE_DEV_ALUA_LU_ATTR_RO(_name) \
-static struct target_core_alua_lu_gp_attribute \
- target_core_alua_lu_gp_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- target_core_alua_lu_gp_show_attr_##_name);
+static inline struct t10_alua_lu_gp *to_lu_gp(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct t10_alua_lu_gp,
+ lu_gp_group);
+}
-/*
- * lu_gp_id
- */
-static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(
- struct t10_alua_lu_gp *lu_gp,
- char *page)
+static ssize_t target_lu_gp_lu_gp_id_show(struct config_item *item, char *page)
{
+ struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
+
if (!lu_gp->lu_gp_valid_id)
return 0;
-
return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
}
-static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
- struct t10_alua_lu_gp *lu_gp,
- const char *page,
- size_t count)
+static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item,
+ const char *page, size_t count)
{
+ struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
unsigned long lu_gp_id;
int ret;
- ret = strict_strtoul(page, 0, &lu_gp_id);
+ ret = kstrtoul(page, 0, &lu_gp_id);
if (ret < 0) {
- pr_err("strict_strtoul() returned %d for"
+ pr_err("kstrtoul() returned %d for"
" lu_gp_id\n", ret);
- return -EINVAL;
+ return ret;
}
if (lu_gp_id > 0x0000ffff) {
pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
@@ -1860,53 +2770,35 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
return count;
}
-SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR);
-
-/*
- * members
- */
-static ssize_t target_core_alua_lu_gp_show_attr_members(
- struct t10_alua_lu_gp *lu_gp,
- char *page)
+static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
{
- struct se_device *dev;
- struct se_hba *hba;
+ struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
struct t10_alua_lu_gp_member *lu_gp_mem;
- ssize_t len = 0, cur_len;
- unsigned char buf[LU_GROUP_NAME_BUF];
-
- memset(buf, 0, LU_GROUP_NAME_BUF);
+ const char *const end = page + PAGE_SIZE;
+ char *cur = page;
spin_lock(&lu_gp->lu_gp_lock);
list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
- dev = lu_gp_mem->lu_gp_mem_dev;
- hba = dev->se_hba;
+ struct se_device *dev = lu_gp_mem->lu_gp_mem_dev;
+ struct se_hba *hba = dev->se_hba;
- cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
+ cur += scnprintf(cur, end - cur, "%s/%s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&dev->dev_group.cg_item));
- cur_len++; /* Extra byte for NULL terminator */
-
- if ((cur_len + len) > PAGE_SIZE) {
- pr_warn("Ran out of lu_gp_show_attr"
- "_members buffer\n");
+ if (WARN_ON_ONCE(cur >= end))
break;
- }
- memcpy(page+len, buf, cur_len);
- len += cur_len;
}
spin_unlock(&lu_gp->lu_gp_lock);
- return len;
+ return cur - page;
}
-SE_DEV_ALUA_LU_ATTR_RO(members);
-
-CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group);
+CONFIGFS_ATTR(target_lu_gp_, lu_gp_id);
+CONFIGFS_ATTR_RO(target_lu_gp_, members);
static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
- &target_core_alua_lu_gp_lu_gp_id.attr,
- &target_core_alua_lu_gp_members.attr,
+ &target_lu_gp_attr_lu_gp_id,
+ &target_lu_gp_attr_members,
NULL,
};
@@ -1920,11 +2812,9 @@ static void target_core_alua_lu_gp_release(struct config_item *item)
static struct configfs_item_operations target_core_alua_lu_gp_ops = {
.release = target_core_alua_lu_gp_release,
- .show_attribute = target_core_alua_lu_gp_attr_show,
- .store_attribute = target_core_alua_lu_gp_attr_store,
};
-static struct config_item_type target_core_alua_lu_gp_cit = {
+static const struct config_item_type target_core_alua_lu_gp_cit = {
.ct_item_ops = &target_core_alua_lu_gp_ops,
.ct_attrs = target_core_alua_lu_gp_attrs,
.ct_owner = THIS_MODULE,
@@ -1982,7 +2872,7 @@ static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
.drop_item = &target_core_alua_drop_lu_gp,
};
-static struct config_item_type target_core_alua_lu_gps_cit = {
+static const struct config_item_type target_core_alua_lu_gps_cit = {
.ct_item_ops = NULL,
.ct_group_ops = &target_core_alua_lu_gps_group_ops,
.ct_owner = THIS_MODULE,
@@ -1992,57 +2882,55 @@ static struct config_item_type target_core_alua_lu_gps_cit = {
/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
-CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp);
-#define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode) \
-static struct target_core_alua_tg_pt_gp_attribute \
- target_core_alua_tg_pt_gp_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- target_core_alua_tg_pt_gp_show_attr_##_name, \
- target_core_alua_tg_pt_gp_store_attr_##_name);
-
-#define SE_DEV_ALUA_TG_PT_ATTR_RO(_name) \
-static struct target_core_alua_tg_pt_gp_attribute \
- target_core_alua_tg_pt_gp_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- target_core_alua_tg_pt_gp_show_attr_##_name);
+static inline struct t10_alua_tg_pt_gp *to_tg_pt_gp(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct t10_alua_tg_pt_gp,
+ tg_pt_gp_group);
+}
-/*
- * alua_access_state
- */
-static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- char *page)
+static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
+ char *page)
{
return sprintf(page, "%d\n",
- atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state));
+ to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
}
-static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- const char *page,
- size_t count)
+static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
+ const char *page, size_t count)
{
+ struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
unsigned long tmp;
int new_state, ret;
if (!tg_pt_gp->tg_pt_gp_valid_id) {
- pr_err("Unable to do implict ALUA on non valid"
- " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
+ pr_err("Unable to do implicit ALUA on invalid tg_pt_gp ID\n");
return -EINVAL;
}
+ if (!target_dev_configured(dev)) {
+ pr_err("Unable to set alua_access_state while device is"
+ " not configured\n");
+ return -ENODEV;
+ }
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract new ALUA access state from"
" %s\n", page);
- return -EINVAL;
+ return ret;
}
new_state = (int)tmp;
- if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
- pr_err("Unable to process implict configfs ALUA"
- " transition while TPGS_IMPLICT_ALUA is disabled\n");
+ if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
+ pr_err("Unable to process implicit configfs ALUA"
+ " transition while TPGS_IMPLICIT_ALUA is disabled\n");
+ return -EINVAL;
+ }
+ if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
+ new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
+ /* LBA DEPENDENT is only allowed with implicit ALUA */
+ pr_err("Unable to process implicit configfs ALUA transition"
+ " while explicit ALUA management is enabled\n");
return -EINVAL;
}
@@ -2051,45 +2939,37 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
return (!ret) ? count : -EINVAL;
}
-SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR);
-
-/*
- * alua_access_status
- */
-static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- char *page)
+static ssize_t target_tg_pt_gp_alua_access_status_show(struct config_item *item,
+ char *page)
{
+ struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
return sprintf(page, "%s\n",
core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
}
-static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- const char *page,
- size_t count)
+static ssize_t target_tg_pt_gp_alua_access_status_store(
+ struct config_item *item, const char *page, size_t count)
{
+ struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
unsigned long tmp;
int new_status, ret;
if (!tg_pt_gp->tg_pt_gp_valid_id) {
- pr_err("Unable to do set ALUA access status on non"
- " valid tg_pt_gp ID: %hu\n",
- tg_pt_gp->tg_pt_gp_valid_id);
+ pr_err("Unable to set ALUA access status on invalid tg_pt_gp ID\n");
return -EINVAL;
}
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract new ALUA access status"
" from %s\n", page);
- return -EINVAL;
+ return ret;
}
new_status = (int)tmp;
if ((new_status != ALUA_STATUS_NONE) &&
- (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
- (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
+ (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
+ (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
pr_err("Illegal ALUA access status: 0x%02x\n",
new_status);
return -EINVAL;
@@ -2099,50 +2979,82 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
return count;
}
-SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR);
-
-/*
- * alua_access_type
- */
-static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- char *page)
+static ssize_t target_tg_pt_gp_alua_access_type_show(struct config_item *item,
+ char *page)
{
- return core_alua_show_access_type(tg_pt_gp, page);
+ return core_alua_show_access_type(to_tg_pt_gp(item), page);
}
-static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- const char *page,
- size_t count)
+static ssize_t target_tg_pt_gp_alua_access_type_store(struct config_item *item,
+ const char *page, size_t count)
{
- return core_alua_store_access_type(tg_pt_gp, page, count);
+ return core_alua_store_access_type(to_tg_pt_gp(item), page, count);
}
-SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
+#define ALUA_SUPPORTED_STATE_ATTR(_name, _bit) \
+static ssize_t target_tg_pt_gp_alua_support_##_name##_show( \
+ struct config_item *item, char *p) \
+{ \
+ struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \
+ return sprintf(p, "%d\n", \
+ !!(t->tg_pt_gp_alua_supported_states & _bit)); \
+} \
+ \
+static ssize_t target_tg_pt_gp_alua_support_##_name##_store( \
+ struct config_item *item, const char *p, size_t c) \
+{ \
+ struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \
+ unsigned long tmp; \
+ int ret; \
+ \
+ if (!t->tg_pt_gp_valid_id) { \
+ pr_err("Unable to set " #_name " ALUA state on invalid tg_pt_gp ID\n"); \
+ return -EINVAL; \
+ } \
+ \
+ ret = kstrtoul(p, 0, &tmp); \
+ if (ret < 0) { \
+ pr_err("Invalid value '%s', must be '0' or '1'\n", p); \
+ return -EINVAL; \
+ } \
+ if (tmp > 1) { \
+ pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
+ return -EINVAL; \
+ } \
+ if (tmp) \
+ t->tg_pt_gp_alua_supported_states |= _bit; \
+ else \
+ t->tg_pt_gp_alua_supported_states &= ~_bit; \
+ \
+ return c; \
+}
-/*
- * alua_write_metadata
- */
-static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- char *page)
+ALUA_SUPPORTED_STATE_ATTR(transitioning, ALUA_T_SUP);
+ALUA_SUPPORTED_STATE_ATTR(offline, ALUA_O_SUP);
+ALUA_SUPPORTED_STATE_ATTR(lba_dependent, ALUA_LBD_SUP);
+ALUA_SUPPORTED_STATE_ATTR(unavailable, ALUA_U_SUP);
+ALUA_SUPPORTED_STATE_ATTR(standby, ALUA_S_SUP);
+ALUA_SUPPORTED_STATE_ATTR(active_optimized, ALUA_AO_SUP);
+ALUA_SUPPORTED_STATE_ATTR(active_nonoptimized, ALUA_AN_SUP);
+
+static ssize_t target_tg_pt_gp_alua_write_metadata_show(
+ struct config_item *item, char *page)
{
- return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata);
+ return sprintf(page, "%d\n",
+ to_tg_pt_gp(item)->tg_pt_gp_write_metadata);
}
-static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- const char *page,
- size_t count)
+static ssize_t target_tg_pt_gp_alua_write_metadata_store(
+ struct config_item *item, const char *page, size_t count)
{
+ struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
pr_err("Unable to extract alua_write_metadata\n");
- return -EINVAL;
+ return ret;
}
if ((tmp != 0) && (tmp != 1)) {
@@ -2155,123 +3067,84 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
return count;
}
-SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR);
-
-
-
-/*
- * nonop_delay_msecs
- */
-static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- char *page)
+static ssize_t target_tg_pt_gp_nonop_delay_msecs_show(struct config_item *item,
+ char *page)
{
- return core_alua_show_nonop_delay_msecs(tg_pt_gp, page);
-
+ return core_alua_show_nonop_delay_msecs(to_tg_pt_gp(item), page);
}
-static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- const char *page,
- size_t count)
+static ssize_t target_tg_pt_gp_nonop_delay_msecs_store(struct config_item *item,
+ const char *page, size_t count)
{
- return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count);
+ return core_alua_store_nonop_delay_msecs(to_tg_pt_gp(item), page,
+ count);
}
-SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR);
-
-/*
- * trans_delay_msecs
- */
-static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- char *page)
+static ssize_t target_tg_pt_gp_trans_delay_msecs_show(struct config_item *item,
+ char *page)
{
- return core_alua_show_trans_delay_msecs(tg_pt_gp, page);
+ return core_alua_show_trans_delay_msecs(to_tg_pt_gp(item), page);
}
-static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- const char *page,
- size_t count)
+static ssize_t target_tg_pt_gp_trans_delay_msecs_store(struct config_item *item,
+ const char *page, size_t count)
{
- return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count);
+ return core_alua_store_trans_delay_msecs(to_tg_pt_gp(item), page,
+ count);
}
-SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
-
-/*
- * implict_trans_secs
- */
-static ssize_t target_core_alua_tg_pt_gp_show_attr_implict_trans_secs(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- char *page)
+static ssize_t target_tg_pt_gp_implicit_trans_secs_show(
+ struct config_item *item, char *page)
{
- return core_alua_show_implict_trans_secs(tg_pt_gp, page);
+ return core_alua_show_implicit_trans_secs(to_tg_pt_gp(item), page);
}
-static ssize_t target_core_alua_tg_pt_gp_store_attr_implict_trans_secs(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- const char *page,
- size_t count)
+static ssize_t target_tg_pt_gp_implicit_trans_secs_store(
+ struct config_item *item, const char *page, size_t count)
{
- return core_alua_store_implict_trans_secs(tg_pt_gp, page, count);
+ return core_alua_store_implicit_trans_secs(to_tg_pt_gp(item), page,
+ count);
}
-SE_DEV_ALUA_TG_PT_ATTR(implict_trans_secs, S_IRUGO | S_IWUSR);
-
-/*
- * preferred
- */
-
-static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- char *page)
+static ssize_t target_tg_pt_gp_preferred_show(struct config_item *item,
+ char *page)
{
- return core_alua_show_preferred_bit(tg_pt_gp, page);
+ return core_alua_show_preferred_bit(to_tg_pt_gp(item), page);
}
-static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- const char *page,
- size_t count)
+static ssize_t target_tg_pt_gp_preferred_store(struct config_item *item,
+ const char *page, size_t count)
{
- return core_alua_store_preferred_bit(tg_pt_gp, page, count);
+ return core_alua_store_preferred_bit(to_tg_pt_gp(item), page, count);
}
-SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR);
-
-/*
- * tg_pt_gp_id
- */
-static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- char *page)
+static ssize_t target_tg_pt_gp_tg_pt_gp_id_show(struct config_item *item,
+ char *page)
{
+ struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
+
if (!tg_pt_gp->tg_pt_gp_valid_id)
return 0;
-
return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
}
-static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- const char *page,
- size_t count)
+static ssize_t target_tg_pt_gp_tg_pt_gp_id_store(struct config_item *item,
+ const char *page, size_t count)
{
+ struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
unsigned long tg_pt_gp_id;
int ret;
- ret = strict_strtoul(page, 0, &tg_pt_gp_id);
+ ret = kstrtoul(page, 0, &tg_pt_gp_id);
if (ret < 0) {
- pr_err("strict_strtoul() returned %d for"
- " tg_pt_gp_id\n", ret);
- return -EINVAL;
+ pr_err("ALUA tg_pt_gp_id: invalid value '%s' for tg_pt_gp_id\n",
+ page);
+ return ret;
}
if (tg_pt_gp_id > 0x0000ffff) {
- pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:"
- " 0x0000ffff\n", tg_pt_gp_id);
+ pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum: 0x0000ffff\n",
+ tg_pt_gp_id);
return -EINVAL;
}
@@ -2287,33 +3160,21 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
return count;
}
-SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR);
-
-/*
- * members
- */
-static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
- struct t10_alua_tg_pt_gp *tg_pt_gp,
- char *page)
+static ssize_t target_tg_pt_gp_members_show(struct config_item *item,
+ char *page)
{
- struct se_port *port;
- struct se_portal_group *tpg;
+ struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
struct se_lun *lun;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
ssize_t len = 0, cur_len;
- unsigned char buf[TG_PT_GROUP_NAME_BUF];
-
- memset(buf, 0, TG_PT_GROUP_NAME_BUF);
+ unsigned char buf[TG_PT_GROUP_NAME_BUF] = { };
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
- list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
- tg_pt_gp_mem_list) {
- port = tg_pt_gp_mem->tg_pt;
- tpg = port->sep_tpg;
- lun = port->sep_lun;
+ list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
+ lun_tg_pt_gp_link) {
+ struct se_portal_group *tpg = lun->lun_tpg;
cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
- "/%s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ "/%s\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_wwn(tpg),
tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item));
@@ -2332,22 +3193,42 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
return len;
}
-SE_DEV_ALUA_TG_PT_ATTR_RO(members);
-
-CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp,
- tg_pt_gp_group);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_state);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_status);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_type);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_transitioning);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_offline);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_lba_dependent);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_unavailable);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_standby);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_optimized);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_nonoptimized);
+CONFIGFS_ATTR(target_tg_pt_gp_, alua_write_metadata);
+CONFIGFS_ATTR(target_tg_pt_gp_, nonop_delay_msecs);
+CONFIGFS_ATTR(target_tg_pt_gp_, trans_delay_msecs);
+CONFIGFS_ATTR(target_tg_pt_gp_, implicit_trans_secs);
+CONFIGFS_ATTR(target_tg_pt_gp_, preferred);
+CONFIGFS_ATTR(target_tg_pt_gp_, tg_pt_gp_id);
+CONFIGFS_ATTR_RO(target_tg_pt_gp_, members);
static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
- &target_core_alua_tg_pt_gp_alua_access_state.attr,
- &target_core_alua_tg_pt_gp_alua_access_status.attr,
- &target_core_alua_tg_pt_gp_alua_access_type.attr,
- &target_core_alua_tg_pt_gp_alua_write_metadata.attr,
- &target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
- &target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
- &target_core_alua_tg_pt_gp_implict_trans_secs.attr,
- &target_core_alua_tg_pt_gp_preferred.attr,
- &target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
- &target_core_alua_tg_pt_gp_members.attr,
+ &target_tg_pt_gp_attr_alua_access_state,
+ &target_tg_pt_gp_attr_alua_access_status,
+ &target_tg_pt_gp_attr_alua_access_type,
+ &target_tg_pt_gp_attr_alua_support_transitioning,
+ &target_tg_pt_gp_attr_alua_support_offline,
+ &target_tg_pt_gp_attr_alua_support_lba_dependent,
+ &target_tg_pt_gp_attr_alua_support_unavailable,
+ &target_tg_pt_gp_attr_alua_support_standby,
+ &target_tg_pt_gp_attr_alua_support_active_nonoptimized,
+ &target_tg_pt_gp_attr_alua_support_active_optimized,
+ &target_tg_pt_gp_attr_alua_write_metadata,
+ &target_tg_pt_gp_attr_nonop_delay_msecs,
+ &target_tg_pt_gp_attr_trans_delay_msecs,
+ &target_tg_pt_gp_attr_implicit_trans_secs,
+ &target_tg_pt_gp_attr_preferred,
+ &target_tg_pt_gp_attr_tg_pt_gp_id,
+ &target_tg_pt_gp_attr_members,
NULL,
};
@@ -2361,11 +3242,9 @@ static void target_core_alua_tg_pt_gp_release(struct config_item *item)
static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
.release = target_core_alua_tg_pt_gp_release,
- .show_attribute = target_core_alua_tg_pt_gp_attr_show,
- .store_attribute = target_core_alua_tg_pt_gp_attr_store,
};
-static struct config_item_type target_core_alua_tg_pt_gp_cit = {
+static const struct config_item_type target_core_alua_tg_pt_gp_cit = {
.ct_item_ops = &target_core_alua_tg_pt_gp_ops,
.ct_attrs = target_core_alua_tg_pt_gp_attrs,
.ct_owner = THIS_MODULE,
@@ -2373,7 +3252,7 @@ static struct config_item_type target_core_alua_tg_pt_gp_cit = {
/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
-/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+/* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
static struct config_group *target_core_alua_create_tg_pt_gp(
struct config_group *group,
@@ -2424,12 +3303,9 @@ static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
.drop_item = &target_core_alua_drop_tg_pt_gp,
};
-static struct config_item_type target_core_alua_tg_pt_gps_cit = {
- .ct_group_ops = &target_core_alua_tg_pt_gps_group_ops,
- .ct_owner = THIS_MODULE,
-};
+TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
-/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+/* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
/* Start functions for struct config_item_type target_core_alua_cit */
@@ -2439,7 +3315,7 @@ static struct config_item_type target_core_alua_tg_pt_gps_cit = {
* core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
* target_core_alua_cit in target_core_init_configfs() below.
*/
-static struct config_item_type target_core_alua_cit = {
+static const struct config_item_type target_core_alua_cit = {
.ct_item_ops = NULL,
.ct_attrs = NULL,
.ct_owner = THIS_MODULE,
@@ -2447,7 +3323,7 @@ static struct config_item_type target_core_alua_cit = {
/* End functions for struct config_item_type target_core_alua_cit */
-/* Start functions for struct config_item_type target_core_stat_cit */
+/* Start functions for struct config_item_type tb_dev_stat_cit */
static struct config_group *target_core_stat_mkdir(
struct config_group *group,
@@ -2468,12 +3344,9 @@ static struct configfs_group_operations target_core_stat_group_ops = {
.drop_item = &target_core_stat_rmdir,
};
-static struct config_item_type target_core_stat_cit = {
- .ct_group_ops = &target_core_stat_group_ops,
- .ct_owner = THIS_MODULE,
-};
+TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
-/* End functions for struct config_item_type target_core_stat_cit */
+/* End functions for struct config_item_type tb_dev_stat_cit */
/* Start functions for struct config_item_type target_core_hba_cit */
@@ -2482,93 +3355,74 @@ static struct config_group *target_core_make_subdev(
const char *name)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct se_subsystem_api *t;
struct config_item *hba_ci = &group->cg_item;
struct se_hba *hba = item_to_hba(hba_ci);
+ struct target_backend *tb = hba->backend;
struct se_device *dev;
- struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
- struct config_group *dev_stat_grp = NULL;
int errno = -ENOMEM, ret;
ret = mutex_lock_interruptible(&hba->hba_access_mutex);
if (ret)
return ERR_PTR(ret);
- /*
- * Locate the struct se_subsystem_api from parent's struct se_hba.
- */
- t = hba->transport;
dev = target_alloc_device(hba, name);
if (!dev)
goto out_unlock;
- dev_cg = &dev->dev_group;
+ config_group_init_type_name(&dev->dev_group, name, &tb->tb_dev_cit);
- dev_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
- GFP_KERNEL);
- if (!dev_cg->default_groups)
- goto out_free_device;
+ config_group_init_type_name(&dev->dev_action_group, "action",
+ &tb->tb_dev_action_cit);
+ configfs_add_default_group(&dev->dev_action_group, &dev->dev_group);
- config_group_init_type_name(dev_cg, name, &target_core_dev_cit);
config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
- &target_core_dev_attrib_cit);
+ &tb->tb_dev_attrib_cit);
+ configfs_add_default_group(&dev->dev_attrib.da_group, &dev->dev_group);
+
config_group_init_type_name(&dev->dev_pr_group, "pr",
- &target_core_dev_pr_cit);
+ &tb->tb_dev_pr_cit);
+ configfs_add_default_group(&dev->dev_pr_group, &dev->dev_group);
+
config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
- &target_core_dev_wwn_cit);
+ &tb->tb_dev_wwn_cit);
+ configfs_add_default_group(&dev->t10_wwn.t10_wwn_group,
+ &dev->dev_group);
+
config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
- "alua", &target_core_alua_tg_pt_gps_cit);
+ "alua", &tb->tb_dev_alua_tg_pt_gps_cit);
+ configfs_add_default_group(&dev->t10_alua.alua_tg_pt_gps_group,
+ &dev->dev_group);
+
config_group_init_type_name(&dev->dev_stat_grps.stat_group,
- "statistics", &target_core_stat_cit);
-
- dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
- dev_cg->default_groups[1] = &dev->dev_pr_group;
- dev_cg->default_groups[2] = &dev->t10_wwn.t10_wwn_group;
- dev_cg->default_groups[3] = &dev->t10_alua.alua_tg_pt_gps_group;
- dev_cg->default_groups[4] = &dev->dev_stat_grps.stat_group;
- dev_cg->default_groups[5] = NULL;
+ "statistics", &tb->tb_dev_stat_cit);
+ configfs_add_default_group(&dev->dev_stat_grps.stat_group,
+ &dev->dev_group);
+
/*
* Add core/$HBA/$DEV/alua/default_tg_pt_gp
*/
tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
if (!tg_pt_gp)
- goto out_free_dev_cg_default_groups;
+ goto out_free_device;
dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
- tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
- tg_pt_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
- GFP_KERNEL);
- if (!tg_pt_gp_cg->default_groups) {
- pr_err("Unable to allocate tg_pt_gp_cg->"
- "default_groups\n");
- goto out_free_tg_pt_gp;
- }
-
config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
- tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
- tg_pt_gp_cg->default_groups[1] = NULL;
+ configfs_add_default_group(&tg_pt_gp->tg_pt_gp_group,
+ &dev->t10_alua.alua_tg_pt_gps_group);
+
/*
* Add core/$HBA/$DEV/statistics/ default groups
*/
- dev_stat_grp = &dev->dev_stat_grps.stat_group;
- dev_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 4,
- GFP_KERNEL);
- if (!dev_stat_grp->default_groups) {
- pr_err("Unable to allocate dev_stat_grp->default_groups\n");
- goto out_free_tg_pt_gp_cg_default_groups;
- }
target_stat_setup_dev_default_groups(dev);
+ mutex_lock(&target_devices_lock);
+ target_devices++;
+ mutex_unlock(&target_devices_lock);
+
mutex_unlock(&hba->hba_access_mutex);
- return dev_cg;
+ return &dev->dev_group;
-out_free_tg_pt_gp_cg_default_groups:
- kfree(tg_pt_gp_cg->default_groups);
-out_free_tg_pt_gp:
- core_alua_free_tg_pt_gp(tg_pt_gp);
-out_free_dev_cg_default_groups:
- kfree(dev_cg->default_groups);
out_free_device:
target_free_device(dev);
out_unlock:
@@ -2584,44 +3438,31 @@ static void target_core_drop_subdev(
struct se_device *dev =
container_of(dev_cg, struct se_device, dev_group);
struct se_hba *hba;
- struct config_item *df_item;
- struct config_group *tg_pt_gp_cg, *dev_stat_grp;
- int i;
hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
mutex_lock(&hba->hba_access_mutex);
- dev_stat_grp = &dev->dev_stat_grps.stat_group;
- for (i = 0; dev_stat_grp->default_groups[i]; i++) {
- df_item = &dev_stat_grp->default_groups[i]->cg_item;
- dev_stat_grp->default_groups[i] = NULL;
- config_item_put(df_item);
- }
- kfree(dev_stat_grp->default_groups);
+ configfs_remove_default_groups(&dev->dev_stat_grps.stat_group);
+ configfs_remove_default_groups(&dev->t10_alua.alua_tg_pt_gps_group);
- tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
- for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
- df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
- tg_pt_gp_cg->default_groups[i] = NULL;
- config_item_put(df_item);
- }
- kfree(tg_pt_gp_cg->default_groups);
/*
* core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
* directly from target_core_alua_tg_pt_gp_release().
*/
dev->t10_alua.default_tg_pt_gp = NULL;
- for (i = 0; dev_cg->default_groups[i]; i++) {
- df_item = &dev_cg->default_groups[i]->cg_item;
- dev_cg->default_groups[i] = NULL;
- config_item_put(df_item);
- }
+ configfs_remove_default_groups(dev_cg);
+
/*
* se_dev is released from target_core_dev_item_ops->release()
*/
config_item_put(item);
+
+ mutex_lock(&target_devices_lock);
+ target_devices--;
+ mutex_unlock(&target_devices_lock);
+
mutex_unlock(&hba->hba_access_mutex);
}
@@ -2630,34 +3471,24 @@ static struct configfs_group_operations target_core_hba_group_ops = {
.drop_item = target_core_drop_subdev,
};
-CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba);
-#define SE_HBA_ATTR(_name, _mode) \
-static struct target_core_hba_attribute \
- target_core_hba_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- target_core_hba_show_attr_##_name, \
- target_core_hba_store_attr_##_name);
-#define SE_HBA_ATTR_RO(_name) \
-static struct target_core_hba_attribute \
- target_core_hba_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- target_core_hba_show_attr_##_name);
+static inline struct se_hba *to_hba(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct se_hba, hba_group);
+}
-static ssize_t target_core_hba_show_attr_hba_info(
- struct se_hba *hba,
- char *page)
+static ssize_t target_hba_info_show(struct config_item *item, char *page)
{
+ struct se_hba *hba = to_hba(item);
+
return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
- hba->hba_id, hba->transport->name,
- TARGET_CORE_CONFIGFS_VERSION);
+ hba->hba_id, hba->backend->ops->name,
+ TARGET_CORE_VERSION);
}
-SE_HBA_ATTR_RO(hba_info);
-
-static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba,
- char *page)
+static ssize_t target_hba_mode_show(struct config_item *item, char *page)
{
+ struct se_hba *hba = to_hba(item);
int hba_mode = 0;
if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
@@ -2666,20 +3497,20 @@ static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba,
return sprintf(page, "%d\n", hba_mode);
}
-static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
- const char *page, size_t count)
+static ssize_t target_hba_mode_store(struct config_item *item,
+ const char *page, size_t count)
{
- struct se_subsystem_api *transport = hba->transport;
+ struct se_hba *hba = to_hba(item);
unsigned long mode_flag;
int ret;
- if (transport->pmode_enable_hba == NULL)
+ if (hba->backend->ops->pmode_enable_hba == NULL)
return -EINVAL;
- ret = strict_strtoul(page, 0, &mode_flag);
+ ret = kstrtoul(page, 0, &mode_flag);
if (ret < 0) {
pr_err("Unable to extract hba mode flag: %d\n", ret);
- return -EINVAL;
+ return ret;
}
if (hba->dev_count) {
@@ -2687,7 +3518,7 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
return -EINVAL;
}
- ret = transport->pmode_enable_hba(hba, mode_flag);
+ ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag);
if (ret < 0)
return -EINVAL;
if (ret > 0)
@@ -2698,9 +3529,8 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
return count;
}
-SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
-
-CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
+CONFIGFS_ATTR_RO(target_, hba_info);
+CONFIGFS_ATTR(target_, hba_mode);
static void target_core_hba_release(struct config_item *item)
{
@@ -2710,18 +3540,16 @@ static void target_core_hba_release(struct config_item *item)
}
static struct configfs_attribute *target_core_hba_attrs[] = {
- &target_core_hba_hba_info.attr,
- &target_core_hba_hba_mode.attr,
+ &target_attr_hba_info,
+ &target_attr_hba_mode,
NULL,
};
static struct configfs_item_operations target_core_hba_item_ops = {
.release = target_core_hba_release,
- .show_attribute = target_core_hba_attr_show,
- .store_attribute = target_core_hba_attr_store,
};
-static struct config_item_type target_core_hba_cit = {
+static const struct config_item_type target_core_hba_cit = {
.ct_item_ops = &target_core_hba_item_ops,
.ct_group_ops = &target_core_hba_group_ops,
.ct_attrs = target_core_hba_attrs,
@@ -2734,11 +3562,10 @@ static struct config_group *target_core_call_addhbatotarget(
{
char *se_plugin_str, *str, *str2;
struct se_hba *hba;
- char buf[TARGET_CORE_NAME_MAX_LEN];
+ char buf[TARGET_CORE_NAME_MAX_LEN] = { };
unsigned long plugin_dep_id = 0;
int ret;
- memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
pr_err("Passed *name strlen(): %d exceeds"
" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
@@ -2767,11 +3594,11 @@ static struct config_group *target_core_call_addhbatotarget(
str++; /* Skip to start of plugin dependent ID */
}
- ret = strict_strtoul(str, 0, &plugin_dep_id);
+ ret = kstrtoul(str, 0, &plugin_dep_id);
if (ret < 0) {
- pr_err("strict_strtoul() returned %d for"
+ pr_err("kstrtoul() returned %d for"
" plugin_dep_id\n", ret);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(ret);
}
/*
* Load up TCM subsystem plugins if they have not already been loaded.
@@ -2804,7 +3631,7 @@ static struct configfs_group_operations target_core_group_ops = {
.drop_item = target_core_call_delhbafromtarget,
};
-static struct config_item_type target_core_cit = {
+static const struct config_item_type target_core_cit = {
.ct_item_ops = NULL,
.ct_group_ops = &target_core_group_ops,
.ct_attrs = NULL,
@@ -2813,11 +3640,41 @@ static struct config_item_type target_core_cit = {
/* Stop functions for struct config_item_type target_core_hba_cit */
+void target_setup_backend_cits(struct target_backend *tb)
+{
+ target_core_setup_dev_cit(tb);
+ target_core_setup_dev_action_cit(tb);
+ target_core_setup_dev_attrib_cit(tb);
+ target_core_setup_dev_pr_cit(tb);
+ target_core_setup_dev_wwn_cit(tb);
+ target_core_setup_dev_alua_tg_pt_gps_cit(tb);
+ target_core_setup_dev_stat_cit(tb);
+}
+
+static void target_init_dbroot(void)
+{
+ struct file *fp;
+
+ snprintf(db_root_stage, DB_ROOT_LEN, DB_ROOT_PREFERRED);
+ fp = filp_open(db_root_stage, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ pr_err("db_root: cannot open: %s\n", db_root_stage);
+ return;
+ }
+ if (!S_ISDIR(file_inode(fp)->i_mode)) {
+ filp_close(fp, NULL);
+ pr_err("db_root: not a valid directory: %s\n", db_root_stage);
+ return;
+ }
+ filp_close(fp, NULL);
+
+ strscpy(db_root, db_root_stage);
+ pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
+}
+
static int __init target_core_init_configfs(void)
{
- struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
- struct config_group *lu_gp_cg = NULL;
- struct configfs_subsystem *subsys;
+ struct configfs_subsystem *subsys = &target_core_fabrics;
struct t10_alua_lu_gp *lu_gp;
int ret;
@@ -2825,7 +3682,6 @@ static int __init target_core_init_configfs(void)
" Engine: %s on %s/%s on "UTS_RELEASE"\n",
TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
- subsys = target_core_subsystem[0];
config_group_init(&subsys->su_group);
mutex_init(&subsys->su_mutex);
@@ -2836,51 +3692,24 @@ static int __init target_core_init_configfs(void)
* Create $CONFIGFS/target/core default group for HBA <-> Storage Object
* and ALUA Logical Unit Group and Target Port Group infrastructure.
*/
- target_cg = &subsys->su_group;
- target_cg->default_groups = kmalloc(sizeof(struct config_group) * 2,
- GFP_KERNEL);
- if (!target_cg->default_groups) {
- pr_err("Unable to allocate target_cg->default_groups\n");
- ret = -ENOMEM;
- goto out_global;
- }
+ config_group_init_type_name(&target_core_hbagroup, "core",
+ &target_core_cit);
+ configfs_add_default_group(&target_core_hbagroup, &subsys->su_group);
- config_group_init_type_name(&target_core_hbagroup,
- "core", &target_core_cit);
- target_cg->default_groups[0] = &target_core_hbagroup;
- target_cg->default_groups[1] = NULL;
/*
* Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
*/
- hba_cg = &target_core_hbagroup;
- hba_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
- GFP_KERNEL);
- if (!hba_cg->default_groups) {
- pr_err("Unable to allocate hba_cg->default_groups\n");
- ret = -ENOMEM;
- goto out_global;
- }
- config_group_init_type_name(&alua_group,
- "alua", &target_core_alua_cit);
- hba_cg->default_groups[0] = &alua_group;
- hba_cg->default_groups[1] = NULL;
+ config_group_init_type_name(&alua_group, "alua", &target_core_alua_cit);
+ configfs_add_default_group(&alua_group, &target_core_hbagroup);
+
/*
* Add ALUA Logical Unit Group and Target Port Group ConfigFS
* groups under /sys/kernel/config/target/core/alua/
*/
- alua_cg = &alua_group;
- alua_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
- GFP_KERNEL);
- if (!alua_cg->default_groups) {
- pr_err("Unable to allocate alua_cg->default_groups\n");
- ret = -ENOMEM;
- goto out_global;
- }
+ config_group_init_type_name(&alua_lu_gps_group, "lu_gps",
+ &target_core_alua_lu_gps_cit);
+ configfs_add_default_group(&alua_lu_gps_group, &alua_group);
- config_group_init_type_name(&alua_lu_gps_group,
- "lu_gps", &target_core_alua_lu_gps_cit);
- alua_cg->default_groups[0] = &alua_lu_gps_group;
- alua_cg->default_groups[1] = NULL;
/*
* Add core/alua/lu_gps/default_lu_gp
*/
@@ -2890,20 +3719,12 @@ static int __init target_core_init_configfs(void)
goto out_global;
}
- lu_gp_cg = &alua_lu_gps_group;
- lu_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
- GFP_KERNEL);
- if (!lu_gp_cg->default_groups) {
- pr_err("Unable to allocate lu_gp_cg->default_groups\n");
- ret = -ENOMEM;
- goto out_global;
- }
-
config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
&target_core_alua_lu_gp_cit);
- lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
- lu_gp_cg->default_groups[1] = NULL;
+ configfs_add_default_group(&lu_gp->lu_gp_group, &alua_lu_gps_group);
+
default_lu_gp = lu_gp;
+
/*
* Register the target_core_mod subsystem with configfs.
*/
@@ -2914,7 +3735,7 @@ static int __init target_core_init_configfs(void)
goto out_global;
}
pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
- " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
+ " Infrastructure: "TARGET_CORE_VERSION" on %s/%s"
" on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
/*
* Register built-in RAMDISK subsystem logic for virtual LUN 0
@@ -2927,9 +3748,17 @@ static int __init target_core_init_configfs(void)
if (ret < 0)
goto out;
+ ret = target_xcopy_setup_pt();
+ if (ret < 0)
+ goto out;
+
+ scoped_with_kernel_creds()
+ target_init_dbroot();
+
return 0;
out:
+ target_xcopy_release_pt();
configfs_unregister_subsystem(subsys);
core_dev_release_virtual_lun0();
rd_module_exit();
@@ -2938,58 +3767,21 @@ out_global:
core_alua_free_lu_gp(default_lu_gp);
default_lu_gp = NULL;
}
- if (lu_gp_cg)
- kfree(lu_gp_cg->default_groups);
- if (alua_cg)
- kfree(alua_cg->default_groups);
- if (hba_cg)
- kfree(hba_cg->default_groups);
- kfree(target_cg->default_groups);
release_se_kmem_caches();
return ret;
}
static void __exit target_core_exit_configfs(void)
{
- struct configfs_subsystem *subsys;
- struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
- struct config_item *item;
- int i;
+ configfs_remove_default_groups(&alua_lu_gps_group);
+ configfs_remove_default_groups(&alua_group);
+ configfs_remove_default_groups(&target_core_hbagroup);
- subsys = target_core_subsystem[0];
-
- lu_gp_cg = &alua_lu_gps_group;
- for (i = 0; lu_gp_cg->default_groups[i]; i++) {
- item = &lu_gp_cg->default_groups[i]->cg_item;
- lu_gp_cg->default_groups[i] = NULL;
- config_item_put(item);
- }
- kfree(lu_gp_cg->default_groups);
- lu_gp_cg->default_groups = NULL;
-
- alua_cg = &alua_group;
- for (i = 0; alua_cg->default_groups[i]; i++) {
- item = &alua_cg->default_groups[i]->cg_item;
- alua_cg->default_groups[i] = NULL;
- config_item_put(item);
- }
- kfree(alua_cg->default_groups);
- alua_cg->default_groups = NULL;
-
- hba_cg = &target_core_hbagroup;
- for (i = 0; hba_cg->default_groups[i]; i++) {
- item = &hba_cg->default_groups[i]->cg_item;
- hba_cg->default_groups[i] = NULL;
- config_item_put(item);
- }
- kfree(hba_cg->default_groups);
- hba_cg->default_groups = NULL;
/*
* We expect subsys->su_group.default_groups to be released
* by configfs subsystem provider logic..
*/
- configfs_unregister_subsystem(subsys);
- kfree(subsys->su_group.default_groups);
+ configfs_unregister_subsystem(&target_core_fabrics);
core_alua_free_lu_gp(default_lu_gp);
default_lu_gp = NULL;
@@ -2999,6 +3791,7 @@ static void __exit target_core_exit_configfs(void)
core_dev_release_virtual_lun0();
rd_module_exit();
+ target_xcopy_release_pt();
release_se_kmem_caches();
}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 8f4142fe5f19..8ccb8541db1c 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1,27 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_device.c (based on iscsi_target_device.c)
*
* This file contains the TCM Virtual Device and Disk Transport
* agnostic related functions.
*
- * (c) Copyright 2003-2012 RisingTide Systems LLC.
+ * (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#include <linux/net.h>
@@ -33,10 +20,12 @@
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/export.h>
+#include <linux/t10-pi.h>
+#include <linux/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_device.h>
+#include <scsi/scsi_common.h>
+#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
@@ -47,50 +36,58 @@
#include "target_core_pr.h"
#include "target_core_ua.h"
+static DEFINE_MUTEX(device_mutex);
+static DEFINE_IDR(devices_idr);
+
static struct se_hba *lun0_hba;
/* not static, needed by tpg.c */
struct se_device *g_lun0_dev;
sense_reason_t
-transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
+transport_lookup_cmd_lun(struct se_cmd *se_cmd)
{
struct se_lun *se_lun = NULL;
struct se_session *se_sess = se_cmd->se_sess;
- struct se_device *dev;
- unsigned long flags;
-
- if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
- return TCM_NON_EXISTENT_LUN;
+ struct se_node_acl *nacl = se_sess->se_node_acl;
+ struct se_dev_entry *deve;
+ sense_reason_t ret = TCM_NO_SENSE;
- spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
- se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
- if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
- struct se_dev_entry *deve = se_cmd->se_deve;
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
+ if (deve) {
+ this_cpu_inc(deve->stats->total_cmds);
- deve->total_cmds++;
+ if (se_cmd->data_direction == DMA_TO_DEVICE)
+ this_cpu_add(deve->stats->write_bytes,
+ se_cmd->data_length);
+ else if (se_cmd->data_direction == DMA_FROM_DEVICE)
+ this_cpu_add(deve->stats->read_bytes,
+ se_cmd->data_length);
if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
- (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
+ deve->lun_access_ro) {
pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
- " Access for 0x%08x\n",
- se_cmd->se_tfo->get_fabric_name(),
- unpacked_lun);
- spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
+ " Access for 0x%08llx\n",
+ se_cmd->se_tfo->fabric_name,
+ se_cmd->orig_fe_lun);
+ rcu_read_unlock();
return TCM_WRITE_PROTECTED;
}
- if (se_cmd->data_direction == DMA_TO_DEVICE)
- deve->write_bytes += se_cmd->data_length;
- else if (se_cmd->data_direction == DMA_FROM_DEVICE)
- deve->read_bytes += se_cmd->data_length;
-
se_lun = deve->se_lun;
- se_cmd->se_lun = deve->se_lun;
+
+ if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
+ se_lun = NULL;
+ goto out_unlock;
+ }
+
+ se_cmd->se_lun = se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
- se_cmd->orig_fe_lun = unpacked_lun;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+ se_cmd->lun_ref_active = true;
}
- spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
+out_unlock:
+ rcu_read_unlock();
if (!se_lun) {
/*
@@ -98,13 +95,15 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
* REPORT_LUNS, et al to be returned when no active
* MappedLUN=0 exists for this Initiator Port.
*/
- if (unpacked_lun != 0) {
+ if (se_cmd->orig_fe_lun != 0) {
pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
- " Access for 0x%08x\n",
- se_cmd->se_tfo->get_fabric_name(),
- unpacked_lun);
+ " Access for 0x%08llx from %s\n",
+ se_cmd->se_tfo->fabric_name,
+ se_cmd->orig_fe_lun,
+ nacl->initiatorname);
return TCM_NON_EXISTENT_LUN;
}
+
/*
* Force WRITE PROTECT for virtual LUN 0
*/
@@ -112,80 +111,93 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
(se_cmd->data_direction != DMA_NONE))
return TCM_WRITE_PROTECTED;
- se_lun = &se_sess->se_tpg->tpg_virt_lun0;
- se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
- se_cmd->orig_fe_lun = 0;
+ se_lun = se_sess->se_tpg->tpg_virt_lun0;
+ if (!percpu_ref_tryget_live(&se_lun->lun_ref))
+ return TCM_NON_EXISTENT_LUN;
+
+ se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+ se_cmd->lun_ref_active = true;
}
+ /*
+ * RCU reference protected by percpu se_lun->lun_ref taken above that
+ * must drop to zero (including initial reference) before this se_lun
+ * pointer can be kfree_rcu() by the final se_lun->lun_group put via
+ * target_core_fabric_configfs.c:target_fabric_port_release
+ */
+ se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
+ this_cpu_inc(se_cmd->se_dev->stats->total_cmds);
- /* Directly associate cmd with se_dev */
- se_cmd->se_dev = se_lun->lun_se_dev;
-
- /* TODO: get rid of this and use atomics for stats */
- dev = se_lun->lun_se_dev;
- spin_lock_irqsave(&dev->stats_lock, flags);
- dev->num_cmds++;
if (se_cmd->data_direction == DMA_TO_DEVICE)
- dev->write_bytes += se_cmd->data_length;
+ this_cpu_add(se_cmd->se_dev->stats->write_bytes,
+ se_cmd->data_length);
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
- dev->read_bytes += se_cmd->data_length;
- spin_unlock_irqrestore(&dev->stats_lock, flags);
-
- spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
- list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
- spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
+ this_cpu_add(se_cmd->se_dev->stats->read_bytes,
+ se_cmd->data_length);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(transport_lookup_cmd_lun);
-int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
+int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
{
struct se_dev_entry *deve;
struct se_lun *se_lun = NULL;
struct se_session *se_sess = se_cmd->se_sess;
+ struct se_node_acl *nacl = se_sess->se_node_acl;
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
- unsigned long flags;
- if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
- return -ENODEV;
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
+ if (deve) {
+ se_lun = deve->se_lun;
- spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
- se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
- deve = se_cmd->se_deve;
+ if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
+ se_lun = NULL;
+ goto out_unlock;
+ }
- if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
- se_tmr->tmr_lun = deve->se_lun;
- se_cmd->se_lun = deve->se_lun;
- se_lun = deve->se_lun;
+ se_cmd->se_lun = se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
- se_cmd->orig_fe_lun = unpacked_lun;
+ se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+ se_cmd->lun_ref_active = true;
}
- spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
+out_unlock:
+ rcu_read_unlock();
if (!se_lun) {
pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
- " Access for 0x%08x\n",
- se_cmd->se_tfo->get_fabric_name(),
- unpacked_lun);
+ " Access for 0x%08llx for %s\n",
+ se_cmd->se_tfo->fabric_name,
+ se_cmd->orig_fe_lun,
+ nacl->initiatorname);
return -ENODEV;
}
-
- /* Directly associate cmd with se_dev */
- se_cmd->se_dev = se_lun->lun_se_dev;
- se_tmr->tmr_dev = se_lun->lun_se_dev;
-
- spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
- list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
- spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
+ se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
+ se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
return 0;
}
EXPORT_SYMBOL(transport_lookup_tmr_lun);
+bool target_lun_is_rdonly(struct se_cmd *cmd)
+{
+ struct se_session *se_sess = cmd->se_sess;
+ struct se_dev_entry *deve;
+ bool ret;
+
+ rcu_read_lock();
+ deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
+ ret = deve && deve->lun_access_ro;
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(target_lun_is_rdonly);
+
/*
* This function is called from core_scsi3_emulate_pro_register_and_move()
- * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
+ * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
* when a matching rtpi is found.
*/
struct se_dev_entry *core_get_se_deve_from_rtpi(
@@ -194,232 +206,268 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
{
struct se_dev_entry *deve;
struct se_lun *lun;
- struct se_port *port;
struct se_portal_group *tpg = nacl->se_tpg;
- u32 i;
-
- spin_lock_irq(&nacl->device_list_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = nacl->device_list[i];
-
- if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
- continue;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
lun = deve->se_lun;
if (!lun) {
pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
- tpg->se_tpg_tfo->get_fabric_name());
- continue;
- }
- port = lun->lun_sep;
- if (!port) {
- pr_err("%s device entries device pointer is"
- " NULL, but Initiator has access.\n",
- tpg->se_tpg_tfo->get_fabric_name());
+ tpg->se_tpg_tfo->fabric_name);
continue;
}
- if (port->sep_rtpi != rtpi)
+ if (lun->lun_tpg->tpg_rtpi != rtpi)
continue;
- atomic_inc(&deve->pr_ref_count);
- smp_mb__after_atomic_inc();
- spin_unlock_irq(&nacl->device_list_lock);
+ kref_get(&deve->pr_kref);
+ rcu_read_unlock();
return deve;
}
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return NULL;
}
-int core_free_device_list_for_node(
+void core_free_device_list_for_node(
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
struct se_dev_entry *deve;
- struct se_lun *lun;
- u32 i;
- if (!nacl->device_list)
- return 0;
+ mutex_lock(&nacl->lun_entry_mutex);
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
+ core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg);
+ mutex_unlock(&nacl->lun_entry_mutex);
+}
- spin_lock_irq(&nacl->device_list_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = nacl->device_list[i];
+void core_update_device_list_access(
+ u64 mapped_lun,
+ bool lun_access_ro,
+ struct se_node_acl *nacl)
+{
+ struct se_dev_entry *deve;
- if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
- continue;
+ mutex_lock(&nacl->lun_entry_mutex);
+ deve = target_nacl_find_deve(nacl, mapped_lun);
+ if (deve)
+ deve->lun_access_ro = lun_access_ro;
+ mutex_unlock(&nacl->lun_entry_mutex);
+}
- if (!deve->se_lun) {
- pr_err("%s device entries device pointer is"
- " NULL, but Initiator has access.\n",
- tpg->se_tpg_tfo->get_fabric_name());
- continue;
- }
- lun = deve->se_lun;
+/*
+ * Called with rcu_read_lock or nacl->device_list_lock held.
+ */
+struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
+{
+ struct se_dev_entry *deve;
- spin_unlock_irq(&nacl->device_list_lock);
- core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
- TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
- spin_lock_irq(&nacl->device_list_lock);
- }
- spin_unlock_irq(&nacl->device_list_lock);
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
+ if (deve->mapped_lun == mapped_lun)
+ return deve;
- array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
- nacl->device_list = NULL;
+ return NULL;
+}
+EXPORT_SYMBOL(target_nacl_find_deve);
- return 0;
+void target_pr_kref_release(struct kref *kref)
+{
+ struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
+ pr_kref);
+ complete(&deve->pr_comp);
}
-void core_update_device_list_access(
- u32 mapped_lun,
- u32 lun_access,
- struct se_node_acl *nacl)
+/*
+ * Establish UA condition on SCSI device - all LUNs
+ */
+void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq)
{
- struct se_dev_entry *deve;
+ struct se_dev_entry *se_deve;
+ struct se_lun *lun;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[mapped_lun];
- if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
- deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
- } else {
- deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+ spin_lock(&dev->se_port_lock);
+ list_for_each_entry(lun, &dev->dev_sep_list, lun_dev_link) {
+
+ spin_lock(&lun->lun_deve_lock);
+ list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
+ core_scsi3_ua_allocate(se_deve, asc, ascq);
+ spin_unlock(&lun->lun_deve_lock);
}
- spin_unlock_irq(&nacl->device_list_lock);
+ spin_unlock(&dev->se_port_lock);
+}
+
+static void
+target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
+ bool skip_new)
+{
+ struct se_dev_entry *tmp;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
+ if (skip_new && tmp == new)
+ continue;
+ core_scsi3_ua_allocate(tmp, 0x3F,
+ ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
+ }
+ rcu_read_unlock();
}
-/* core_enable_device_list_for_node():
- *
- *
- */
int core_enable_device_list_for_node(
struct se_lun *lun,
struct se_lun_acl *lun_acl,
- u32 mapped_lun,
- u32 lun_access,
+ u64 mapped_lun,
+ bool lun_access_ro,
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
- struct se_port *port = lun->lun_sep;
- struct se_dev_entry *deve;
+ struct se_dev_entry *orig, *new;
+ int ret = 0;
- spin_lock_irq(&nacl->device_list_lock);
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new) {
+ pr_err("Unable to allocate se_dev_entry memory\n");
+ return -ENOMEM;
+ }
- deve = nacl->device_list[mapped_lun];
+ new->stats = alloc_percpu(struct se_dev_entry_io_stats);
+ if (!new->stats) {
+ ret = -ENOMEM;
+ goto free_deve;
+ }
- /*
- * Check if the call is handling demo mode -> explict LUN ACL
- * transition. This transition must be for the same struct se_lun
- * + mapped_lun that was setup in demo mode..
- */
- if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
- if (deve->se_lun_acl != NULL) {
- pr_err("struct se_dev_entry->se_lun_acl"
- " already set for demo mode -> explict"
- " LUN ACL transition\n");
- spin_unlock_irq(&nacl->device_list_lock);
- return -EINVAL;
- }
- if (deve->se_lun != lun) {
- pr_err("struct se_dev_entry->se_lun does"
- " match passed struct se_lun for demo mode"
- " -> explict LUN ACL transition\n");
- spin_unlock_irq(&nacl->device_list_lock);
- return -EINVAL;
+ spin_lock_init(&new->ua_lock);
+ INIT_LIST_HEAD(&new->ua_list);
+ INIT_LIST_HEAD(&new->lun_link);
+
+ new->mapped_lun = mapped_lun;
+ kref_init(&new->pr_kref);
+ init_completion(&new->pr_comp);
+
+ new->lun_access_ro = lun_access_ro;
+ new->creation_time = get_jiffies_64();
+ new->attach_count++;
+
+ mutex_lock(&nacl->lun_entry_mutex);
+ orig = target_nacl_find_deve(nacl, mapped_lun);
+ if (orig && orig->se_lun) {
+ struct se_lun *orig_lun = orig->se_lun;
+
+ if (orig_lun != lun) {
+ pr_err("Existing orig->se_lun doesn't match new lun"
+ " for dynamic -> explicit NodeACL conversion:"
+ " %s\n", nacl->initiatorname);
+ mutex_unlock(&nacl->lun_entry_mutex);
+ ret = -EINVAL;
+ goto free_stats;
}
- deve->se_lun_acl = lun_acl;
-
- if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
- deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
- } else {
- deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+ if (orig->se_lun_acl != NULL) {
+ pr_warn_ratelimited("Detected existing explicit"
+ " se_lun_acl->se_lun_group reference for %s"
+ " mapped_lun: %llu, failing\n",
+ nacl->initiatorname, mapped_lun);
+ mutex_unlock(&nacl->lun_entry_mutex);
+ ret = -EINVAL;
+ goto free_stats;
}
- spin_unlock_irq(&nacl->device_list_lock);
- return 0;
- }
+ new->se_lun = lun;
+ new->se_lun_acl = lun_acl;
+ hlist_del_rcu(&orig->link);
+ hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
+ mutex_unlock(&nacl->lun_entry_mutex);
+
+ spin_lock(&lun->lun_deve_lock);
+ list_del(&orig->lun_link);
+ list_add_tail(&new->lun_link, &lun->lun_deve_list);
+ spin_unlock(&lun->lun_deve_lock);
+
+ kref_put(&orig->pr_kref, target_pr_kref_release);
+ wait_for_completion(&orig->pr_comp);
- deve->se_lun = lun;
- deve->se_lun_acl = lun_acl;
- deve->mapped_lun = mapped_lun;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
-
- if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
- deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
- } else {
- deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
+ target_luns_data_has_changed(nacl, new, true);
+ kfree_rcu(orig, rcu_head);
+ return 0;
}
- deve->creation_time = get_jiffies_64();
- deve->attach_count++;
- spin_unlock_irq(&nacl->device_list_lock);
+ new->se_lun = lun;
+ new->se_lun_acl = lun_acl;
+ hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
+ mutex_unlock(&nacl->lun_entry_mutex);
- spin_lock_bh(&port->sep_alua_lock);
- list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
- spin_unlock_bh(&port->sep_alua_lock);
+ spin_lock(&lun->lun_deve_lock);
+ list_add_tail(&new->lun_link, &lun->lun_deve_list);
+ spin_unlock(&lun->lun_deve_lock);
+ target_luns_data_has_changed(nacl, new, true);
return 0;
+
+free_stats:
+ free_percpu(new->stats);
+free_deve:
+ kfree(new);
+ return ret;
}
-/* core_disable_device_list_for_node():
- *
- *
- */
-int core_disable_device_list_for_node(
+static void target_free_dev_entry(struct rcu_head *head)
+{
+ struct se_dev_entry *deve = container_of(head, struct se_dev_entry,
+ rcu_head);
+ free_percpu(deve->stats);
+ kfree(deve);
+}
+
+void core_disable_device_list_for_node(
struct se_lun *lun,
- struct se_lun_acl *lun_acl,
- u32 mapped_lun,
- u32 lun_access,
+ struct se_dev_entry *orig,
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
- struct se_port *port = lun->lun_sep;
- struct se_dev_entry *deve = nacl->device_list[mapped_lun];
+ /*
+ * rcu_dereference_raw protected by se_lun->lun_group symlink
+ * reference to se_device->dev_group.
+ */
+ struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
+
+ lockdep_assert_held(&nacl->lun_entry_mutex);
/*
* If the MappedLUN entry is being disabled, the entry in
- * port->sep_alua_list must be removed now before clearing the
+ * lun->lun_deve_list must be removed now before clearing the
* struct se_dev_entry pointers below as logic in
* core_alua_do_transition_tg_pt() depends on these being present.
*
* deve->se_lun_acl will be NULL for demo-mode created LUNs
* that have not been explicitly converted to MappedLUNs ->
- * struct se_lun_acl, but we remove deve->alua_port_list from
- * port->sep_alua_list. This also means that active UAs and
+ * struct se_lun_acl, but we remove deve->lun_link from
+ * lun->lun_deve_list. This also means that active UAs and
* NodeACL context specific PR metadata for demo-mode
* MappedLUN *deve will be released below..
*/
- spin_lock_bh(&port->sep_alua_lock);
- list_del(&deve->alua_port_list);
- spin_unlock_bh(&port->sep_alua_lock);
+ spin_lock(&lun->lun_deve_lock);
+ list_del(&orig->lun_link);
+ spin_unlock(&lun->lun_deve_lock);
/*
- * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
- * PR operation to complete.
+ * Disable struct se_dev_entry LUN ACL mapping
*/
- while (atomic_read(&deve->pr_ref_count) != 0)
- cpu_relax();
+ core_scsi3_ua_release_all(orig);
- spin_lock_irq(&nacl->device_list_lock);
+ hlist_del_rcu(&orig->link);
+ clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
+ orig->lun_access_ro = false;
+ orig->creation_time = 0;
+ orig->attach_count--;
/*
- * Disable struct se_dev_entry LUN ACL mapping
+ * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
+ * or REGISTER_AND_MOVE PR operation to complete.
*/
- core_scsi3_ua_release_all(deve);
- deve->se_lun = NULL;
- deve->se_lun_acl = NULL;
- deve->lun_flags = 0;
- deve->creation_time = 0;
- deve->attach_count--;
- spin_unlock_irq(&nacl->device_list_lock);
-
- core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
- return 0;
+ kref_put(&orig->pr_kref, target_pr_kref_release);
+ wait_for_completion(&orig->pr_comp);
+
+ call_rcu(&orig->rcu_head, target_free_dev_entry);
+
+ core_scsi3_free_pr_reg_from_nacl(dev, nacl);
+ target_luns_data_has_changed(nacl, NULL, false);
}
/* core_clear_lun_from_tpg():
@@ -430,191 +478,20 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
{
struct se_node_acl *nacl;
struct se_dev_entry *deve;
- u32 i;
- spin_lock_irq(&tpg->acl_node_lock);
+ mutex_lock(&tpg->acl_node_mutex);
list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
- spin_unlock_irq(&tpg->acl_node_lock);
- spin_lock_irq(&nacl->device_list_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = nacl->device_list[i];
+ mutex_lock(&nacl->lun_entry_mutex);
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
if (lun != deve->se_lun)
continue;
- spin_unlock_irq(&nacl->device_list_lock);
- core_disable_device_list_for_node(lun, NULL,
- deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
- nacl, tpg);
-
- spin_lock_irq(&nacl->device_list_lock);
+ core_disable_device_list_for_node(lun, deve, nacl, tpg);
}
- spin_unlock_irq(&nacl->device_list_lock);
-
- spin_lock_irq(&tpg->acl_node_lock);
+ mutex_unlock(&nacl->lun_entry_mutex);
}
- spin_unlock_irq(&tpg->acl_node_lock);
-}
-
-static struct se_port *core_alloc_port(struct se_device *dev)
-{
- struct se_port *port, *port_tmp;
-
- port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
- if (!port) {
- pr_err("Unable to allocate struct se_port\n");
- return ERR_PTR(-ENOMEM);
- }
- INIT_LIST_HEAD(&port->sep_alua_list);
- INIT_LIST_HEAD(&port->sep_list);
- atomic_set(&port->sep_tg_pt_secondary_offline, 0);
- spin_lock_init(&port->sep_alua_lock);
- mutex_init(&port->sep_tg_pt_md_mutex);
-
- spin_lock(&dev->se_port_lock);
- if (dev->dev_port_count == 0x0000ffff) {
- pr_warn("Reached dev->dev_port_count =="
- " 0x0000ffff\n");
- spin_unlock(&dev->se_port_lock);
- return ERR_PTR(-ENOSPC);
- }
-again:
- /*
- * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
- * Here is the table from spc4r17 section 7.7.3.8.
- *
- * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
- *
- * Code Description
- * 0h Reserved
- * 1h Relative port 1, historically known as port A
- * 2h Relative port 2, historically known as port B
- * 3h to FFFFh Relative port 3 through 65 535
- */
- port->sep_rtpi = dev->dev_rpti_counter++;
- if (!port->sep_rtpi)
- goto again;
-
- list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
- /*
- * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
- * for 16-bit wrap..
- */
- if (port->sep_rtpi == port_tmp->sep_rtpi)
- goto again;
- }
- spin_unlock(&dev->se_port_lock);
-
- return port;
-}
-
-static void core_export_port(
- struct se_device *dev,
- struct se_portal_group *tpg,
- struct se_port *port,
- struct se_lun *lun)
-{
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
-
- spin_lock(&dev->se_port_lock);
- spin_lock(&lun->lun_sep_lock);
- port->sep_tpg = tpg;
- port->sep_lun = lun;
- lun->lun_sep = port;
- spin_unlock(&lun->lun_sep_lock);
-
- list_add_tail(&port->sep_list, &dev->dev_sep_list);
- spin_unlock(&dev->se_port_lock);
-
- if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
- !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
- tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
- if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
- pr_err("Unable to allocate t10_alua_tg_pt"
- "_gp_member_t\n");
- return;
- }
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- dev->t10_alua.default_tg_pt_gp);
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- pr_debug("%s/%s: Adding to default ALUA Target Port"
- " Group: alua/default_tg_pt_gp\n",
- dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
- }
-
- dev->dev_port_count++;
- port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
-}
-
-/*
- * Called with struct se_device->se_port_lock spinlock held.
- */
-static void core_release_port(struct se_device *dev, struct se_port *port)
- __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
-{
- /*
- * Wait for any port reference for PR ALL_TG_PT=1 operation
- * to complete in __core_scsi3_alloc_registration()
- */
- spin_unlock(&dev->se_port_lock);
- if (atomic_read(&port->sep_tg_pt_ref_cnt))
- cpu_relax();
- spin_lock(&dev->se_port_lock);
-
- core_alua_free_tg_pt_gp_mem(port);
-
- list_del(&port->sep_list);
- dev->dev_port_count--;
- kfree(port);
-}
-
-int core_dev_export(
- struct se_device *dev,
- struct se_portal_group *tpg,
- struct se_lun *lun)
-{
- struct se_hba *hba = dev->se_hba;
- struct se_port *port;
-
- port = core_alloc_port(dev);
- if (IS_ERR(port))
- return PTR_ERR(port);
-
- lun->lun_se_dev = dev;
-
- spin_lock(&hba->device_lock);
- dev->export_count++;
- spin_unlock(&hba->device_lock);
-
- core_export_port(dev, tpg, port, lun);
- return 0;
-}
-
-void core_dev_unexport(
- struct se_device *dev,
- struct se_portal_group *tpg,
- struct se_lun *lun)
-{
- struct se_hba *hba = dev->se_hba;
- struct se_port *port = lun->lun_sep;
-
- spin_lock(&lun->lun_sep_lock);
- if (lun->lun_se_dev == NULL) {
- spin_unlock(&lun->lun_sep_lock);
- return;
- }
- spin_unlock(&lun->lun_sep_lock);
-
- spin_lock(&dev->se_port_lock);
- core_release_port(dev, port);
- spin_unlock(&dev->se_port_lock);
-
- spin_lock(&hba->device_lock);
- dev->export_count--;
- spin_unlock(&hba->device_lock);
-
- lun->lun_se_dev = NULL;
+ mutex_unlock(&tpg->acl_node_mutex);
}
static void se_release_vpd_for_dev(struct se_device *dev)
@@ -648,579 +525,69 @@ static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
return aligned_max_sectors;
}
-int se_dev_set_max_unmap_lba_count(
- struct se_device *dev,
- u32 max_unmap_lba_count)
-{
- dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
- pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
- dev, dev->dev_attrib.max_unmap_lba_count);
- return 0;
-}
-
-int se_dev_set_max_unmap_block_desc_count(
- struct se_device *dev,
- u32 max_unmap_block_desc_count)
-{
- dev->dev_attrib.max_unmap_block_desc_count =
- max_unmap_block_desc_count;
- pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
- dev, dev->dev_attrib.max_unmap_block_desc_count);
- return 0;
-}
-
-int se_dev_set_unmap_granularity(
- struct se_device *dev,
- u32 unmap_granularity)
-{
- dev->dev_attrib.unmap_granularity = unmap_granularity;
- pr_debug("dev[%p]: Set unmap_granularity: %u\n",
- dev, dev->dev_attrib.unmap_granularity);
- return 0;
-}
-
-int se_dev_set_unmap_granularity_alignment(
- struct se_device *dev,
- u32 unmap_granularity_alignment)
-{
- dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
- pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
- dev, dev->dev_attrib.unmap_granularity_alignment);
- return 0;
-}
-
-int se_dev_set_max_write_same_len(
- struct se_device *dev,
- u32 max_write_same_len)
-{
- dev->dev_attrib.max_write_same_len = max_write_same_len;
- pr_debug("dev[%p]: Set max_write_same_len: %u\n",
- dev, dev->dev_attrib.max_write_same_len);
- return 0;
-}
-
-static void dev_set_t10_wwn_model_alias(struct se_device *dev)
-{
- const char *configname;
-
- configname = config_item_name(&dev->dev_group.cg_item);
- if (strlen(configname) >= 16) {
- pr_warn("dev[%p]: Backstore name '%s' is too long for "
- "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
- configname);
- }
- snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
-}
-
-int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
-{
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to change model alias"
- " while export_count is %d\n",
- dev, dev->export_count);
- return -EINVAL;
- }
-
- if (flag != 0 && flag != 1) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
-
- if (flag) {
- dev_set_t10_wwn_model_alias(dev);
- } else {
- strncpy(&dev->t10_wwn.model[0],
- dev->transport->inquiry_prod, 16);
- }
- dev->dev_attrib.emulate_model_alias = flag;
-
- return 0;
-}
-
-int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
-{
- if (flag != 0 && flag != 1) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
-
- if (flag) {
- pr_err("dpo_emulated not supported\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
-{
- if (flag != 0 && flag != 1) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
-
- if (flag &&
- dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- pr_err("emulate_fua_write not supported for pSCSI\n");
- return -EINVAL;
- }
- dev->dev_attrib.emulate_fua_write = flag;
- pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
- dev, dev->dev_attrib.emulate_fua_write);
- return 0;
-}
-
-int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
-{
- if (flag != 0 && flag != 1) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
-
- if (flag) {
- pr_err("ua read emulated not supported\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
-{
- if (flag != 0 && flag != 1) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
- if (flag &&
- dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- pr_err("emulate_write_cache not supported for pSCSI\n");
- return -EINVAL;
- }
- if (dev->transport->get_write_cache) {
- pr_warn("emulate_write_cache cannot be changed when underlying"
- " HW reports WriteCacheEnabled, ignoring request\n");
- return 0;
- }
-
- dev->dev_attrib.emulate_write_cache = flag;
- pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
- dev, dev->dev_attrib.emulate_write_cache);
- return 0;
-}
-
-int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
-{
- if ((flag != 0) && (flag != 1) && (flag != 2)) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
-
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to change SE Device"
- " UA_INTRLCK_CTRL while export_count is %d\n",
- dev, dev->export_count);
- return -EINVAL;
- }
- dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
- pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
- dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
-
- return 0;
-}
-
-int se_dev_set_emulate_tas(struct se_device *dev, int flag)
-{
- if ((flag != 0) && (flag != 1)) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
-
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to change SE Device TAS while"
- " export_count is %d\n",
- dev, dev->export_count);
- return -EINVAL;
- }
- dev->dev_attrib.emulate_tas = flag;
- pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
- dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
-
- return 0;
-}
-
-int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
-{
- if ((flag != 0) && (flag != 1)) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
- /*
- * We expect this value to be non-zero when generic Block Layer
- * Discard supported is detected iblock_create_virtdevice().
- */
- if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
- pr_err("Generic Block Discard not supported\n");
- return -ENOSYS;
- }
-
- dev->dev_attrib.emulate_tpu = flag;
- pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
- dev, flag);
- return 0;
-}
-
-int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
-{
- if ((flag != 0) && (flag != 1)) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
- /*
- * We expect this value to be non-zero when generic Block Layer
- * Discard supported is detected iblock_create_virtdevice().
- */
- if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
- pr_err("Generic Block Discard not supported\n");
- return -ENOSYS;
- }
-
- dev->dev_attrib.emulate_tpws = flag;
- pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
- dev, flag);
- return 0;
-}
-
-int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
-{
- if ((flag != 0) && (flag != 1)) {
- pr_err("Illegal value %d\n", flag);
- return -EINVAL;
- }
- dev->dev_attrib.enforce_pr_isids = flag;
- pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
- (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
- return 0;
-}
-
-int se_dev_set_is_nonrot(struct se_device *dev, int flag)
-{
- if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -EINVAL;
- }
- dev->dev_attrib.is_nonrot = flag;
- pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
- dev, flag);
- return 0;
-}
-
-int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
-{
- if (flag != 0) {
- printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
- " reordering not implemented\n", dev);
- return -ENOSYS;
- }
- dev->dev_attrib.emulate_rest_reord = flag;
- pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
- return 0;
-}
-
-/*
- * Note, this can only be called on unexported SE Device Object.
- */
-int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
-{
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to change SE Device TCQ while"
- " export_count is %d\n",
- dev, dev->export_count);
- return -EINVAL;
- }
- if (!queue_depth) {
- pr_err("dev[%p]: Illegal ZERO value for queue"
- "_depth\n", dev);
- return -EINVAL;
- }
-
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (queue_depth > dev->dev_attrib.hw_queue_depth) {
- pr_err("dev[%p]: Passed queue_depth: %u"
- " exceeds TCM/SE_Device TCQ: %u\n",
- dev, queue_depth,
- dev->dev_attrib.hw_queue_depth);
- return -EINVAL;
- }
- } else {
- if (queue_depth > dev->dev_attrib.queue_depth) {
- if (queue_depth > dev->dev_attrib.hw_queue_depth) {
- pr_err("dev[%p]: Passed queue_depth:"
- " %u exceeds TCM/SE_Device MAX"
- " TCQ: %u\n", dev, queue_depth,
- dev->dev_attrib.hw_queue_depth);
- return -EINVAL;
- }
- }
- }
-
- dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
- pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
- dev, queue_depth);
- return 0;
-}
-
-int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
-{
- int block_size = dev->dev_attrib.block_size;
-
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to change SE Device"
- " fabric_max_sectors while export_count is %d\n",
- dev, dev->export_count);
- return -EINVAL;
- }
- if (!fabric_max_sectors) {
- pr_err("dev[%p]: Illegal ZERO value for"
- " fabric_max_sectors\n", dev);
- return -EINVAL;
- }
- if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
- pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
- " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
- DA_STATUS_MAX_SECTORS_MIN);
- return -EINVAL;
- }
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
- pr_err("dev[%p]: Passed fabric_max_sectors: %u"
- " greater than TCM/SE_Device max_sectors:"
- " %u\n", dev, fabric_max_sectors,
- dev->dev_attrib.hw_max_sectors);
- return -EINVAL;
- }
- } else {
- if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
- pr_err("dev[%p]: Passed fabric_max_sectors: %u"
- " greater than DA_STATUS_MAX_SECTORS_MAX:"
- " %u\n", dev, fabric_max_sectors,
- DA_STATUS_MAX_SECTORS_MAX);
- return -EINVAL;
- }
- }
- /*
- * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
- */
- if (!block_size) {
- block_size = 512;
- pr_warn("Defaulting to 512 for zero block_size\n");
- }
- fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
- block_size);
-
- dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
- pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
- dev, fabric_max_sectors);
- return 0;
-}
-
-int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
-{
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to change SE Device"
- " optimal_sectors while export_count is %d\n",
- dev, dev->export_count);
- return -EINVAL;
- }
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- pr_err("dev[%p]: Passed optimal_sectors cannot be"
- " changed for TCM/pSCSI\n", dev);
- return -EINVAL;
- }
- if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
- pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
- " greater than fabric_max_sectors: %u\n", dev,
- optimal_sectors, dev->dev_attrib.fabric_max_sectors);
- return -EINVAL;
- }
-
- dev->dev_attrib.optimal_sectors = optimal_sectors;
- pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
- dev, optimal_sectors);
- return 0;
-}
-
-int se_dev_set_block_size(struct se_device *dev, u32 block_size)
-{
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to change SE Device block_size"
- " while export_count is %d\n",
- dev, dev->export_count);
- return -EINVAL;
- }
-
- if ((block_size != 512) &&
- (block_size != 1024) &&
- (block_size != 2048) &&
- (block_size != 4096)) {
- pr_err("dev[%p]: Illegal value for block_device: %u"
- " for SE device, must be 512, 1024, 2048 or 4096\n",
- dev, block_size);
- return -EINVAL;
- }
-
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- pr_err("dev[%p]: Not allowed to change block_size for"
- " Physical Device, use for Linux/SCSI to change"
- " block_size for underlying hardware\n", dev);
- return -EINVAL;
- }
-
- dev->dev_attrib.block_size = block_size;
- pr_debug("dev[%p]: SE Device block_size changed to %u\n",
- dev, block_size);
- return 0;
-}
-
-struct se_lun *core_dev_add_lun(
+int core_dev_add_lun(
struct se_portal_group *tpg,
struct se_device *dev,
- u32 lun)
+ struct se_lun *lun)
{
- struct se_lun *lun_p;
int rc;
- lun_p = core_tpg_pre_addlun(tpg, lun);
- if (IS_ERR(lun_p))
- return lun_p;
-
- rc = core_tpg_post_addlun(tpg, lun_p,
- TRANSPORT_LUNFLAGS_READ_WRITE, dev);
+ rc = core_tpg_add_lun(tpg, lun, false, dev);
if (rc < 0)
- return ERR_PTR(rc);
+ return rc;
- pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
- " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
- tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
- tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
+ pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
+ " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
+ tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
/*
* Update LUN maps for dynamically added initiators when
* generate_node_acl is enabled.
*/
if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
struct se_node_acl *acl;
- spin_lock_irq(&tpg->acl_node_lock);
+
+ mutex_lock(&tpg->acl_node_mutex);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (acl->dynamic_node_acl &&
(!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
- spin_unlock_irq(&tpg->acl_node_lock);
- core_tpg_add_node_to_devs(acl, tpg);
- spin_lock_irq(&tpg->acl_node_lock);
+ core_tpg_add_node_to_devs(acl, tpg, lun);
}
}
- spin_unlock_irq(&tpg->acl_node_lock);
+ mutex_unlock(&tpg->acl_node_mutex);
}
- return lun_p;
+ return 0;
}
/* core_dev_del_lun():
*
*
*/
-int core_dev_del_lun(
+void core_dev_del_lun(
struct se_portal_group *tpg,
- u32 unpacked_lun)
-{
- struct se_lun *lun;
-
- lun = core_tpg_pre_dellun(tpg, unpacked_lun);
- if (IS_ERR(lun))
- return PTR_ERR(lun);
-
- core_tpg_post_dellun(tpg, lun);
-
- pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
- " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
- tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
- tpg->se_tpg_tfo->get_fabric_name());
-
- return 0;
-}
-
-struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
-{
- struct se_lun *lun;
-
- spin_lock(&tpg->tpg_lun_lock);
- if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
- "_PER_TPG-1: %u for Target Portal Group: %hu\n",
- tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
- TRANSPORT_MAX_LUNS_PER_TPG-1,
- tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock(&tpg->tpg_lun_lock);
- return NULL;
- }
- lun = tpg->tpg_lun_list[unpacked_lun];
-
- if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
- pr_err("%s Logical Unit Number: %u is not free on"
- " Target Portal Group: %hu, ignoring request.\n",
- tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
- tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock(&tpg->tpg_lun_lock);
- return NULL;
- }
- spin_unlock(&tpg->tpg_lun_lock);
-
- return lun;
-}
-
-/* core_dev_get_lun():
- *
- *
- */
-static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
+ struct se_lun *lun)
{
- struct se_lun *lun;
-
- spin_lock(&tpg->tpg_lun_lock);
- if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
- "_TPG-1: %u for Target Portal Group: %hu\n",
- tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
- TRANSPORT_MAX_LUNS_PER_TPG-1,
- tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock(&tpg->tpg_lun_lock);
- return NULL;
- }
- lun = tpg->tpg_lun_list[unpacked_lun];
-
- if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
- pr_err("%s Logical Unit Number: %u is not active on"
- " Target Portal Group: %hu, ignoring request.\n",
- tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
- tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock(&tpg->tpg_lun_lock);
- return NULL;
- }
- spin_unlock(&tpg->tpg_lun_lock);
+ pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
+ " device object\n", tpg->se_tpg_tfo->fabric_name,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
+ tpg->se_tpg_tfo->fabric_name);
- return lun;
+ core_tpg_remove_lun(tpg, lun);
}
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_node_acl *nacl,
- u32 mapped_lun,
+ u64 mapped_lun,
int *ret)
{
struct se_lun_acl *lacl;
if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
pr_err("%s InitiatorName exceeds maximum size.\n",
- tpg->se_tpg_tfo->get_fabric_name());
+ tpg->se_tpg_tfo->fabric_name);
*ret = -EOVERFLOW;
return NULL;
}
@@ -1231,11 +598,8 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
return NULL;
}
- INIT_LIST_HEAD(&lacl->lacl_list);
lacl->mapped_lun = mapped_lun;
lacl->se_lun_nacl = nacl;
- snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
- nacl->initiatorname);
return lacl;
}
@@ -1243,85 +607,65 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
int core_dev_add_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_lun_acl *lacl,
- u32 unpacked_lun,
- u32 lun_access)
+ struct se_lun *lun,
+ bool lun_access_ro)
{
- struct se_lun *lun;
- struct se_node_acl *nacl;
-
- lun = core_dev_get_lun(tpg, unpacked_lun);
- if (!lun) {
- pr_err("%s Logical Unit Number: %u is not active on"
- " Target Portal Group: %hu, ignoring request.\n",
- tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
- tpg->se_tpg_tfo->tpg_get_tag(tpg));
- return -EINVAL;
- }
+ struct se_node_acl *nacl = lacl->se_lun_nacl;
+ /*
+ * rcu_dereference_raw protected by se_lun->lun_group symlink
+ * reference to se_device->dev_group.
+ */
+ struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
- nacl = lacl->se_lun_nacl;
if (!nacl)
return -EINVAL;
- if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
- (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
- lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+ if (lun->lun_access_ro)
+ lun_access_ro = true;
lacl->se_lun = lun;
if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
- lun_access, nacl, tpg) < 0)
+ lun_access_ro, nacl, tpg) < 0)
return -EINVAL;
- spin_lock(&lun->lun_acl_lock);
- list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
- atomic_inc(&lun->lun_acl_count);
- smp_mb__after_atomic_inc();
- spin_unlock(&lun->lun_acl_lock);
-
- pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
- " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
- tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
- (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
- lacl->initiatorname);
+ pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
+ " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
+ lun_access_ro ? "RO" : "RW",
+ nacl->initiatorname);
/*
* Check to see if there are any existing persistent reservation APTPL
* pre-registrations that need to be enabled for this LUN ACL..
*/
- core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
+ core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
+ lacl->mapped_lun);
return 0;
}
-/* core_dev_del_initiator_node_lun_acl():
- *
- *
- */
int core_dev_del_initiator_node_lun_acl(
- struct se_portal_group *tpg,
struct se_lun *lun,
struct se_lun_acl *lacl)
{
+ struct se_portal_group *tpg = lun->lun_tpg;
struct se_node_acl *nacl;
+ struct se_dev_entry *deve;
nacl = lacl->se_lun_nacl;
if (!nacl)
return -EINVAL;
- spin_lock(&lun->lun_acl_lock);
- list_del(&lacl->lacl_list);
- atomic_dec(&lun->lun_acl_count);
- smp_mb__after_atomic_dec();
- spin_unlock(&lun->lun_acl_lock);
-
- core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
- TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
-
- lacl->se_lun = NULL;
+ mutex_lock(&nacl->lun_entry_mutex);
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (deve)
+ core_disable_device_list_for_node(lun, deve, nacl, tpg);
+ mutex_unlock(&nacl->lun_entry_mutex);
- pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
- " InitiatorNode: %s Mapped LUN: %u\n",
- tpg->se_tpg_tfo->get_fabric_name(),
+ pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
+ " InitiatorNode: %s Mapped LUN: %llu\n",
+ tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
- lacl->initiatorname, lacl->mapped_lun);
+ nacl->initiatorname, lacl->mapped_lun);
return 0;
}
@@ -1331,10 +675,10 @@ void core_dev_free_initiator_node_lun_acl(
struct se_lun_acl *lacl)
{
pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
- " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
+ " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg),
- tpg->se_tpg_tfo->get_fabric_name(),
- lacl->initiatorname, lacl->mapped_lun);
+ tpg->se_tpg_tfo->fabric_name,
+ lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
kfree(lacl);
}
@@ -1342,65 +686,82 @@ void core_dev_free_initiator_node_lun_acl(
static void scsi_dump_inquiry(struct se_device *dev)
{
struct t10_wwn *wwn = &dev->t10_wwn;
- char buf[17];
- int i, device_type;
+ int device_type = dev->transport->get_device_type(dev);
+
/*
* Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
*/
- for (i = 0; i < 8; i++)
- if (wwn->vendor[i] >= 0x20)
- buf[i] = wwn->vendor[i];
- else
- buf[i] = ' ';
- buf[i] = '\0';
- pr_debug(" Vendor: %s\n", buf);
-
- for (i = 0; i < 16; i++)
- if (wwn->model[i] >= 0x20)
- buf[i] = wwn->model[i];
- else
- buf[i] = ' ';
- buf[i] = '\0';
- pr_debug(" Model: %s\n", buf);
-
- for (i = 0; i < 4; i++)
- if (wwn->revision[i] >= 0x20)
- buf[i] = wwn->revision[i];
- else
- buf[i] = ' ';
- buf[i] = '\0';
- pr_debug(" Revision: %s\n", buf);
-
- device_type = dev->transport->get_device_type(dev);
+ pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
+ wwn->vendor);
+ pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
+ wwn->model);
+ pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
+ wwn->revision);
pr_debug(" Type: %s ", scsi_device_type(device_type));
}
+static void target_non_ordered_release(struct percpu_ref *ref)
+{
+ struct se_device *dev = container_of(ref, struct se_device,
+ non_ordered);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->delayed_cmd_lock, flags);
+ if (!list_empty(&dev->delayed_cmd_list))
+ schedule_work(&dev->delayed_cmd_work);
+ spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags);
+}
+
struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
{
struct se_device *dev;
+ struct se_lun *xcopy_lun;
+ int i;
- dev = hba->transport->alloc_device(hba, name);
+ dev = hba->backend->ops->alloc_device(hba, name);
if (!dev)
return NULL;
- dev->dev_link_magic = SE_DEV_LINK_MAGIC;
+ dev->stats = alloc_percpu(struct se_dev_io_stats);
+ if (!dev->stats)
+ goto free_device;
+
+ dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
+ if (!dev->queues)
+ goto free_stats;
+
+ dev->queue_cnt = nr_cpu_ids;
+ for (i = 0; i < dev->queue_cnt; i++) {
+ struct se_device_queue *q;
+
+ q = &dev->queues[i];
+ INIT_LIST_HEAD(&q->state_list);
+ spin_lock_init(&q->lock);
+
+ init_llist_head(&q->sq.cmd_list);
+ INIT_WORK(&q->sq.work, target_queued_submit_work);
+ }
+
+ if (percpu_ref_init(&dev->non_ordered, target_non_ordered_release,
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
+ goto free_queues;
+
dev->se_hba = hba;
- dev->transport = hba->transport;
+ dev->transport = hba->backend->ops;
+ dev->transport_flags = dev->transport->transport_flags_default;
+ dev->prot_length = sizeof(struct t10_pi_tuple);
+ dev->hba_index = hba->hba_index;
- INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
- INIT_LIST_HEAD(&dev->state_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
- spin_lock_init(&dev->stats_lock);
- spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
spin_lock_init(&dev->dev_reservation_lock);
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
- atomic_set(&dev->dev_ordered_id, 0);
+ sema_init(&dev->caw_sem, 1);
INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&dev->t10_pr.registration_list);
@@ -1409,21 +770,37 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
+ INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
+ spin_lock_init(&dev->t10_alua.lba_map_lock);
+
+ INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
+ mutex_init(&dev->lun_reset_mutex);
dev->t10_wwn.t10_dev = dev;
+ /*
+ * Use OpenFabrics IEEE Company ID: 00 14 05
+ */
+ dev->t10_wwn.company_id = 0x001405;
+
dev->t10_alua.t10_dev = dev;
dev->dev_attrib.da_dev = dev;
dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
- dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
- dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
- dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
+ dev->dev_attrib.emulate_dpo = 1;
+ dev->dev_attrib.emulate_fua_write = 1;
+ dev->dev_attrib.emulate_fua_read = 1;
dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
- dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
+ dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR;
dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
+ dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
+ dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
+ dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
+ dev->dev_attrib.emulate_rsoc = DA_EMULATE_RSOC;
+ dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
+ dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
dev->dev_attrib.is_nonrot = DA_IS_NONROT;
dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
@@ -1432,28 +809,189 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
dev->dev_attrib.unmap_granularity_alignment =
DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
+ dev->dev_attrib.unmap_zeroes_data =
+ DA_UNMAP_ZEROES_DATA_DEFAULT;
dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
- dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
- dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
+ dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT;
+
+ /* Skip allocating lun_stats since we can't export them. */
+ xcopy_lun = &dev->xcopy_lun;
+ rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
+ init_completion(&xcopy_lun->lun_shutdown_comp);
+ INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
+ INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
+ mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
+ xcopy_lun->lun_tpg = &xcopy_pt_tpg;
+
+ /* Preload the default INQUIRY const values */
+ strscpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
+ strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
+ sizeof(dev->t10_wwn.model));
+ strscpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
+ sizeof(dev->t10_wwn.revision));
return dev;
+
+free_queues:
+ kfree(dev->queues);
+free_stats:
+ free_percpu(dev->stats);
+free_device:
+ hba->backend->ops->free_device(dev);
+ return NULL;
+}
+
+void target_configure_write_atomic_from_bdev(struct se_dev_attrib *attrib,
+ struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ int block_size = bdev_logical_block_size(bdev);
+
+ if (!bdev_can_atomic_write(bdev))
+ return;
+
+ attrib->atomic_max_len = queue_atomic_write_max_bytes(q) / block_size;
+ attrib->atomic_granularity = attrib->atomic_alignment =
+ queue_atomic_write_unit_min_bytes(q) / block_size;
+ attrib->atomic_max_with_boundary = 0;
+ attrib->atomic_max_boundary = 0;
+}
+EXPORT_SYMBOL_GPL(target_configure_write_atomic_from_bdev);
+
+/*
+ * Check if the underlying struct block_device supports discard and if yes
+ * configure the UNMAP parameters.
+ */
+bool target_configure_unmap_from_bdev(struct se_dev_attrib *attrib,
+ struct block_device *bdev)
+{
+ int block_size = bdev_logical_block_size(bdev);
+
+ if (!bdev_max_discard_sectors(bdev))
+ return false;
+
+ attrib->max_unmap_lba_count =
+ bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9);
+ /*
+ * Currently hardcoded to 1 in Linux/SCSI code..
+ */
+ attrib->max_unmap_block_desc_count = 1;
+ attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size;
+ attrib->unmap_granularity_alignment =
+ bdev_discard_alignment(bdev) / block_size;
+ return true;
+}
+EXPORT_SYMBOL(target_configure_unmap_from_bdev);
+
+/*
+ * Convert from blocksize advertised to the initiator to the 512 byte
+ * units unconditionally used by the Linux block layer.
+ */
+sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
+{
+ switch (dev->dev_attrib.block_size) {
+ case 4096:
+ return lb << 3;
+ case 2048:
+ return lb << 2;
+ case 1024:
+ return lb << 1;
+ default:
+ return lb;
+ }
+}
+EXPORT_SYMBOL(target_to_linux_sector);
+
+struct devices_idr_iter {
+ int (*fn)(struct se_device *dev, void *data);
+ void *data;
+};
+
+static int target_devices_idr_iter(int id, void *p, void *data)
+ __must_hold(&device_mutex)
+{
+ struct devices_idr_iter *iter = data;
+ struct se_device *dev = p;
+ struct config_item *item;
+ int ret;
+
+ /*
+ * We add the device early to the idr, so it can be used
+ * by backend modules during configuration. We do not want
+ * to allow other callers to access partially setup devices,
+ * so we skip them here.
+ */
+ if (!target_dev_configured(dev))
+ return 0;
+
+ item = config_item_get_unless_zero(&dev->dev_group.cg_item);
+ if (!item)
+ return 0;
+ mutex_unlock(&device_mutex);
+
+ ret = iter->fn(dev, iter->data);
+ config_item_put(item);
+
+ mutex_lock(&device_mutex);
+ return ret;
+}
+
+/**
+ * target_for_each_device - iterate over configured devices
+ * @fn: iterator function
+ * @data: pointer to data that will be passed to fn
+ *
+ * fn must return 0 to continue looping over devices. non-zero will break
+ * from the loop and return that value to the caller.
+ */
+int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
+ void *data)
+{
+ struct devices_idr_iter iter = { .fn = fn, .data = data };
+ int ret;
+
+ mutex_lock(&device_mutex);
+ ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
+ mutex_unlock(&device_mutex);
+ return ret;
}
int target_configure_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
- int ret;
+ int ret, id;
- if (dev->dev_flags & DF_CONFIGURED) {
+ if (target_dev_configured(dev)) {
pr_err("se_dev->se_dev_ptr already set for storage"
" object\n");
return -EEXIST;
}
+ /*
+ * Add early so modules like tcmu can use during its
+ * configuration.
+ */
+ mutex_lock(&device_mutex);
+ /*
+ * Use cyclic to try and avoid collisions with devices
+ * that were recently removed.
+ */
+ id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
+ mutex_unlock(&device_mutex);
+ if (id < 0) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ dev->dev_index = id;
+
ret = dev->transport->configure_device(dev);
if (ret)
- goto out;
- dev->dev_flags |= DF_CONFIGURED;
+ goto out_free_index;
+
+ if (dev->transport->configure_unmap &&
+ dev->transport->configure_unmap(dev)) {
+ pr_debug("Discard support available, but disabled by default.\n");
+ }
/*
* XXX: there is not much point to have two different values here..
@@ -1467,53 +1005,35 @@ int target_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors =
se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
dev->dev_attrib.hw_block_size);
+ dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
- dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
dev->creation_time = get_jiffies_64();
ret = core_setup_alua(dev);
if (ret)
- goto out;
-
- /*
- * Startup the struct se_device processing thread
- */
- dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
- dev->transport->name);
- if (!dev->tmr_wq) {
- pr_err("Unable to create tmr workqueue for %s\n",
- dev->transport->name);
- ret = -ENOMEM;
- goto out_free_alua;
- }
+ goto out_destroy_device;
/*
* Setup work_queue for QUEUE_FULL
*/
INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
- /*
- * Preload the initial INQUIRY const values if we are doing
- * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
- * passthrough because this is being provided by the backend LLD.
- */
- if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
- strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
- strncpy(&dev->t10_wwn.model[0],
- dev->transport->inquiry_prod, 16);
- strncpy(&dev->t10_wwn.revision[0],
- dev->transport->inquiry_rev, 4);
- }
-
scsi_dump_inquiry(dev);
spin_lock(&hba->device_lock);
hba->dev_count++;
spin_unlock(&hba->device_lock);
+
+ dev->dev_flags |= DF_CONFIGURED;
+
return 0;
-out_free_alua:
- core_alua_free_lu_gp_mem(dev);
+out_destroy_device:
+ dev->transport->destroy_device(dev);
+out_free_index:
+ mutex_lock(&device_mutex);
+ idr_remove(&devices_idr, dev->dev_index);
+ mutex_unlock(&device_mutex);
out:
se_release_vpd_for_dev(dev);
return ret;
@@ -1525,8 +1045,15 @@ void target_free_device(struct se_device *dev)
WARN_ON(!list_empty(&dev->dev_sep_list));
- if (dev->dev_flags & DF_CONFIGURED) {
- destroy_workqueue(dev->tmr_wq);
+ percpu_ref_exit(&dev->non_ordered);
+ cancel_work_sync(&dev->delayed_cmd_work);
+
+ if (target_dev_configured(dev)) {
+ dev->transport->destroy_device(dev);
+
+ mutex_lock(&device_mutex);
+ idr_remove(&devices_idr, dev->dev_index);
+ mutex_unlock(&device_mutex);
spin_lock(&hba->device_lock);
hba->dev_count--;
@@ -1534,9 +1061,15 @@ void target_free_device(struct se_device *dev)
}
core_alua_free_lu_gp_mem(dev);
+ core_alua_set_lba_map(dev, NULL, 0, 0);
core_scsi3_free_all_registrations(dev);
se_release_vpd_for_dev(dev);
+ if (dev->transport->free_prot)
+ dev->transport->free_prot(dev);
+
+ kfree(dev->queues);
+ free_percpu(dev->stats);
dev->transport->free_device(dev);
}
@@ -1544,7 +1077,7 @@ int core_dev_setup_virtual_lun0(void)
{
struct se_hba *hba;
struct se_device *dev;
- char buf[] = "rd_pages=8,rd_nullio=1";
+ char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1";
int ret;
hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
@@ -1557,7 +1090,7 @@ int core_dev_setup_virtual_lun0(void)
goto out_free_hba;
}
- hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
+ hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
ret = target_configure_device(dev);
if (ret)
@@ -1586,3 +1119,105 @@ void core_dev_release_virtual_lun0(void)
target_free_device(g_lun0_dev);
core_delete_hba(hba);
}
+
+/*
+ * Common CDB parsing for kernel and user passthrough.
+ */
+sense_reason_t
+passthrough_parse_cdb(struct se_cmd *cmd,
+ sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
+{
+ unsigned char *cdb = cmd->t_task_cdb;
+ struct se_device *dev = cmd->se_dev;
+ unsigned int size;
+
+ /*
+ * For REPORT LUNS we always need to emulate the response, for everything
+ * else, pass it up.
+ */
+ if (cdb[0] == REPORT_LUNS) {
+ cmd->execute_cmd = spc_emulate_report_luns;
+ return TCM_NO_SENSE;
+ }
+
+ /*
+ * With emulate_pr disabled, all reservation requests should fail,
+ * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
+ */
+ if (!dev->dev_attrib.emulate_pr &&
+ ((cdb[0] == PERSISTENT_RESERVE_IN) ||
+ (cdb[0] == PERSISTENT_RESERVE_OUT) ||
+ (cdb[0] == RELEASE_6 || cdb[0] == RELEASE_10) ||
+ (cdb[0] == RESERVE_6 || cdb[0] == RESERVE_10))) {
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ /*
+ * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
+ * emulate the response, since tcmu does not have the information
+ * required to process these commands.
+ */
+ if (!(dev->transport_flags &
+ TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
+ if (cdb[0] == PERSISTENT_RESERVE_IN) {
+ cmd->execute_cmd = target_scsi3_emulate_pr_in;
+ size = get_unaligned_be16(&cdb[7]);
+ return target_cmd_size_check(cmd, size);
+ }
+ if (cdb[0] == PERSISTENT_RESERVE_OUT) {
+ cmd->execute_cmd = target_scsi3_emulate_pr_out;
+ size = get_unaligned_be32(&cdb[5]);
+ return target_cmd_size_check(cmd, size);
+ }
+
+ if (cdb[0] == RELEASE_6 || cdb[0] == RELEASE_10) {
+ cmd->execute_cmd = target_scsi2_reservation_release;
+ if (cdb[0] == RELEASE_10)
+ size = get_unaligned_be16(&cdb[7]);
+ else
+ size = cmd->data_length;
+ return target_cmd_size_check(cmd, size);
+ }
+ if (cdb[0] == RESERVE_6 || cdb[0] == RESERVE_10) {
+ cmd->execute_cmd = target_scsi2_reservation_reserve;
+ if (cdb[0] == RESERVE_10)
+ size = get_unaligned_be16(&cdb[7]);
+ else
+ size = cmd->data_length;
+ return target_cmd_size_check(cmd, size);
+ }
+ }
+
+ /* Set DATA_CDB flag for ops that should have it */
+ switch (cdb[0]) {
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ case WRITE_VERIFY:
+ case WRITE_VERIFY_12:
+ case WRITE_VERIFY_16:
+ case COMPARE_AND_WRITE:
+ case XDWRITEREAD_10:
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ break;
+ case VARIABLE_LENGTH_CMD:
+ switch (get_unaligned_be16(&cdb[8])) {
+ case READ_32:
+ case WRITE_32:
+ case WRITE_VERIFY_32:
+ case XDWRITEREAD_32:
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ break;
+ }
+ }
+
+ cmd->execute_cmd = exec_cmd;
+
+ return TCM_NO_SENSE;
+}
+EXPORT_SYMBOL(passthrough_parse_cdb);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index eb56eb129563..13159928e365 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -1,24 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_fabric_configfs.c
*
* This file contains generic fabric module configfs infrastructure for
* TCM v4.x code
*
- * (c) Copyright 2010-2012 RisingTide Systems LLC.
+ * (c) Copyright 2010-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
****************************************************************************/
+#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/utsname.h>
@@ -34,10 +27,8 @@
#include <linux/configfs.h>
#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
-#include <target/configfs_macros.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
@@ -46,16 +37,30 @@
#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
{ \
- struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \
- struct config_item_type *cit = &tfc->tfc_##_name##_cit; \
+ struct config_item_type *cit = &tf->tf_##_name##_cit; \
\
cit->ct_item_ops = _item_ops; \
cit->ct_group_ops = _group_ops; \
cit->ct_attrs = _attrs; \
- cit->ct_owner = tf->tf_module; \
+ cit->ct_owner = tf->tf_ops->module; \
pr_debug("Setup generic %s\n", __stringify(_name)); \
}
+#define TF_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \
+static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
+{ \
+ struct config_item_type *cit = &tf->tf_##_name##_cit; \
+ struct configfs_attribute **attrs = tf->tf_ops->tfc_##_name##_attrs; \
+ \
+ cit->ct_item_ops = _item_ops; \
+ cit->ct_group_ops = _group_ops; \
+ cit->ct_attrs = attrs; \
+ cit->ct_owner = tf->tf_ops->module; \
+ pr_debug("Setup generic %s\n", __stringify(_name)); \
+}
+
+static struct configfs_item_operations target_fabric_port_item_ops;
+
/* Start of tfc_tpg_mappedlun_cit */
static int target_fabric_mappedlun_link(
@@ -63,28 +68,33 @@ static int target_fabric_mappedlun_link(
struct config_item *lun_ci)
{
struct se_dev_entry *deve;
- struct se_lun *lun = container_of(to_config_group(lun_ci),
- struct se_lun, lun_group);
+ struct se_lun *lun;
struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
struct se_lun_acl, se_lun_group);
struct se_portal_group *se_tpg;
struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
- int ret = 0, lun_access;
+ bool lun_access_ro;
- if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
- pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
- " %p to struct lun: %p\n", lun_ci, lun);
+ if (!lun_ci->ci_type ||
+ lun_ci->ci_type->ct_item_ops != &target_fabric_port_item_ops) {
+ pr_err("Bad lun_ci, not a valid lun_ci pointer: %p\n", lun_ci);
return -EFAULT;
}
+ lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
+
/*
* Ensure that the source port exists
*/
- if (!lun->lun_sep || !lun->lun_sep->sep_tpg) {
- pr_err("Source se_lun->lun_sep or lun->lun_sep->sep"
- "_tpg does not exist\n");
+ if (!lun->lun_se_dev) {
+ pr_err("Source se_lun->lun_se_dev does not exist\n");
return -EINVAL;
}
- se_tpg = lun->lun_sep->sep_tpg;
+ if (lun->lun_shutdown) {
+ pr_err("Unable to create mappedlun symlink because"
+ " lun->lun_shutdown=true\n");
+ return -EINVAL;
+ }
+ se_tpg = lun->lun_tpg;
nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
tpg_ci = &nacl_ci->ci_group->cg_item;
@@ -107,112 +117,98 @@ static int target_fabric_mappedlun_link(
}
/*
* If this struct se_node_acl was dynamically generated with
- * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
- * which be will write protected (READ-ONLY) when
+ * tpg_1/attrib/generate_node_acls=1, use the existing
+ * deve->lun_access_ro value, which will be true when
* tpg_1/attrib/demo_mode_write_protect=1
*/
- spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
- deve = lacl->se_lun_nacl->device_list[lacl->mapped_lun];
- if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
- lun_access = deve->lun_flags;
+ rcu_read_lock();
+ deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun);
+ if (deve)
+ lun_access_ro = deve->lun_access_ro;
else
- lun_access =
+ lun_access_ro =
(se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
- se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
- TRANSPORT_LUNFLAGS_READ_WRITE;
- spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
+ se_tpg)) ? true : false;
+ rcu_read_unlock();
/*
* Determine the actual mapped LUN value user wants..
*
* This value is what the SCSI Initiator actually sees the
- * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
+ * $FABRIC/$WWPN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
*/
- ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl,
- lun->unpacked_lun, lun_access);
-
- return (ret < 0) ? -EINVAL : 0;
+ return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access_ro);
}
-static int target_fabric_mappedlun_unlink(
+static void target_fabric_mappedlun_unlink(
struct config_item *lun_acl_ci,
struct config_item *lun_ci)
{
- struct se_lun *lun;
struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
struct se_lun_acl, se_lun_group);
- struct se_node_acl *nacl = lacl->se_lun_nacl;
- struct se_dev_entry *deve = nacl->device_list[lacl->mapped_lun];
- struct se_portal_group *se_tpg;
- /*
- * Determine if the underlying MappedLUN has already been released..
- */
- if (!deve->se_lun)
- return 0;
-
- lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
- se_tpg = lun->lun_sep->sep_tpg;
+ struct se_lun *lun = container_of(to_config_group(lun_ci),
+ struct se_lun, lun_group);
- core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl);
- return 0;
+ core_dev_del_initiator_node_lun_acl(lun, lacl);
}
-CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl);
-#define TCM_MAPPEDLUN_ATTR(_name, _mode) \
-static struct target_fabric_mappedlun_attribute target_fabric_mappedlun_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- target_fabric_mappedlun_show_##_name, \
- target_fabric_mappedlun_store_##_name);
+static struct se_lun_acl *item_to_lun_acl(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct se_lun_acl,
+ se_lun_group);
+}
-static ssize_t target_fabric_mappedlun_show_write_protect(
- struct se_lun_acl *lacl,
- char *page)
+static ssize_t target_fabric_mappedlun_write_protect_show(
+ struct config_item *item, char *page)
{
+ struct se_lun_acl *lacl = item_to_lun_acl(item);
struct se_node_acl *se_nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
- ssize_t len;
+ ssize_t len = 0;
- spin_lock_irq(&se_nacl->device_list_lock);
- deve = se_nacl->device_list[lacl->mapped_lun];
- len = sprintf(page, "%d\n",
- (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ?
- 1 : 0);
- spin_unlock_irq(&se_nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun);
+ if (deve) {
+ len = sprintf(page, "%d\n", deve->lun_access_ro);
+ }
+ rcu_read_unlock();
return len;
}
-static ssize_t target_fabric_mappedlun_store_write_protect(
- struct se_lun_acl *lacl,
- const char *page,
- size_t count)
+static ssize_t target_fabric_mappedlun_write_protect_store(
+ struct config_item *item, const char *page, size_t count)
{
+ struct se_lun_acl *lacl = item_to_lun_acl(item);
struct se_node_acl *se_nacl = lacl->se_lun_nacl;
struct se_portal_group *se_tpg = se_nacl->se_tpg;
- unsigned long op;
+ unsigned long wp;
+ int ret;
- if (strict_strtoul(page, 0, &op))
- return -EINVAL;
+ ret = kstrtoul(page, 0, &wp);
+ if (ret)
+ return ret;
- if ((op != 1) && (op != 0))
+ if ((wp != 1) && (wp != 0))
return -EINVAL;
- core_update_device_list_access(lacl->mapped_lun, (op) ?
- TRANSPORT_LUNFLAGS_READ_ONLY :
- TRANSPORT_LUNFLAGS_READ_WRITE,
- lacl->se_lun_nacl);
+ /* wp=1 means lun_access_ro=true */
+ core_update_device_list_access(lacl->mapped_lun, wp, lacl->se_lun_nacl);
pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
- " Mapped LUN: %u Write Protect bit to %s\n",
- se_tpg->se_tpg_tfo->get_fabric_name(),
- lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
+ " Mapped LUN: %llu Write Protect bit to %s\n",
+ se_tpg->se_tpg_tfo->fabric_name,
+ se_nacl->initiatorname, lacl->mapped_lun, (wp) ? "ON" : "OFF");
return count;
}
-TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
+CONFIGFS_ATTR(target_fabric_mappedlun_, write_protect);
-CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
+static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
+ &target_fabric_mappedlun_attr_write_protect,
+ NULL,
+};
static void target_fabric_mappedlun_release(struct config_item *item)
{
@@ -223,15 +219,8 @@ static void target_fabric_mappedlun_release(struct config_item *item)
core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
}
-static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
- &target_fabric_mappedlun_write_protect.attr,
- NULL,
-};
-
static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
.release = target_fabric_mappedlun_release,
- .show_attribute = target_fabric_mappedlun_attr_show,
- .store_attribute = target_fabric_mappedlun_attr_store,
.allow_link = target_fabric_mappedlun_link,
.drop_link = target_fabric_mappedlun_unlink,
};
@@ -267,49 +256,12 @@ TF_CIT_SETUP(tpg_mappedlun_stat, NULL, &target_fabric_mappedlun_stat_group_ops,
/* End of tfc_tpg_mappedlun_port_cit */
-/* Start of tfc_tpg_nacl_attrib_cit */
-
-CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group);
-
-static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = {
- .show_attribute = target_fabric_nacl_attrib_attr_show,
- .store_attribute = target_fabric_nacl_attrib_attr_store,
-};
-
-TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL);
-
-/* End of tfc_tpg_nacl_attrib_cit */
-
-/* Start of tfc_tpg_nacl_auth_cit */
-
-CONFIGFS_EATTR_OPS(target_fabric_nacl_auth, se_node_acl, acl_auth_group);
-
-static struct configfs_item_operations target_fabric_nacl_auth_item_ops = {
- .show_attribute = target_fabric_nacl_auth_attr_show,
- .store_attribute = target_fabric_nacl_auth_attr_store,
-};
-
-TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL);
-
-/* End of tfc_tpg_nacl_auth_cit */
-
-/* Start of tfc_tpg_nacl_param_cit */
-
-CONFIGFS_EATTR_OPS(target_fabric_nacl_param, se_node_acl, acl_param_group);
-
-static struct configfs_item_operations target_fabric_nacl_param_item_ops = {
- .show_attribute = target_fabric_nacl_param_attr_show,
- .store_attribute = target_fabric_nacl_param_attr_store,
-};
-
-TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL);
-
-/* End of tfc_tpg_nacl_param_cit */
+TF_CIT_SETUP_DRV(tpg_nacl_attrib, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_nacl_auth, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_nacl_param, NULL, NULL);
/* Start of tfc_tpg_nacl_base_cit */
-CONFIGFS_EATTR_OPS(target_fabric_nacl_base, se_node_acl, acl_group);
-
static struct config_group *target_fabric_make_mappedlun(
struct config_group *group,
const char *name)
@@ -318,19 +270,11 @@ static struct config_group *target_fabric_make_mappedlun(
struct se_node_acl, acl_group);
struct se_portal_group *se_tpg = se_nacl->se_tpg;
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
- struct se_lun_acl *lacl;
- struct config_item *acl_ci;
- struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
+ struct se_lun_acl *lacl = NULL;
char *buf;
- unsigned long mapped_lun;
+ unsigned long long mapped_lun;
int ret = 0;
- acl_ci = &group->cg_item;
- if (!acl_ci) {
- pr_err("Unable to locatel acl_ci\n");
- return NULL;
- }
-
buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
if (!buf) {
pr_err("Unable to allocate memory for name buf\n");
@@ -350,18 +294,9 @@ static struct config_group *target_fabric_make_mappedlun(
* Determine the Mapped LUN value. This is what the SCSI Initiator
* Port will actually see.
*/
- if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) {
- ret = -EINVAL;
- goto out;
- }
- if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG"
- "-1: %u for Target Portal Group: %u\n", mapped_lun,
- TRANSPORT_MAX_LUNS_PER_TPG-1,
- se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
- ret = -EINVAL;
+ ret = kstrtoull(buf + 4, 0, &mapped_lun);
+ if (ret)
goto out;
- }
lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl,
mapped_lun, &ret);
@@ -370,37 +305,20 @@ static struct config_group *target_fabric_make_mappedlun(
goto out;
}
- lacl_cg = &lacl->se_lun_group;
- lacl_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
- GFP_KERNEL);
- if (!lacl_cg->default_groups) {
- pr_err("Unable to allocate lacl_cg->default_groups\n");
- ret = -ENOMEM;
- goto out;
- }
-
config_group_init_type_name(&lacl->se_lun_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit);
+ &tf->tf_tpg_mappedlun_cit);
+
config_group_init_type_name(&lacl->ml_stat_grps.stat_group,
- "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_stat_cit);
- lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
- lacl_cg->default_groups[1] = NULL;
-
- ml_stat_grp = &lacl->ml_stat_grps.stat_group;
- ml_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 3,
- GFP_KERNEL);
- if (!ml_stat_grp->default_groups) {
- pr_err("Unable to allocate ml_stat_grp->default_groups\n");
- ret = -ENOMEM;
- goto out;
- }
+ "statistics", &tf->tf_tpg_mappedlun_stat_cit);
+ configfs_add_default_group(&lacl->ml_stat_grps.stat_group,
+ &lacl->se_lun_group);
+
target_stat_setup_mappedlun_default_groups(lacl);
kfree(buf);
return &lacl->se_lun_group;
out:
- if (lacl_cg)
- kfree(lacl_cg->default_groups);
+ kfree(lacl);
kfree(buf);
return ERR_PTR(ret);
}
@@ -411,25 +329,9 @@ static void target_fabric_drop_mappedlun(
{
struct se_lun_acl *lacl = container_of(to_config_group(item),
struct se_lun_acl, se_lun_group);
- struct config_item *df_item;
- struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
- int i;
-
- ml_stat_grp = &lacl->ml_stat_grps.stat_group;
- for (i = 0; ml_stat_grp->default_groups[i]; i++) {
- df_item = &ml_stat_grp->default_groups[i]->cg_item;
- ml_stat_grp->default_groups[i] = NULL;
- config_item_put(df_item);
- }
- kfree(ml_stat_grp->default_groups);
- lacl_cg = &lacl->se_lun_group;
- for (i = 0; lacl_cg->default_groups[i]; i++) {
- df_item = &lacl_cg->default_groups[i]->cg_item;
- lacl_cg->default_groups[i] = NULL;
- config_item_put(df_item);
- }
- kfree(lacl_cg->default_groups);
+ configfs_remove_default_groups(&lacl->ml_stat_grps.stat_group);
+ configfs_remove_default_groups(&lacl->se_lun_group);
config_item_put(item);
}
@@ -438,16 +340,13 @@ static void target_fabric_nacl_base_release(struct config_item *item)
{
struct se_node_acl *se_nacl = container_of(to_config_group(item),
struct se_node_acl, acl_group);
- struct se_portal_group *se_tpg = se_nacl->se_tpg;
- struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
- tf->tf_ops.fabric_drop_nodeacl(se_nacl);
+ configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group);
+ core_tpg_del_initiator_node_acl(se_nacl);
}
static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
.release = target_fabric_nacl_base_release,
- .show_attribute = target_fabric_nacl_base_attr_show,
- .store_attribute = target_fabric_nacl_base_attr_store,
};
static struct configfs_group_operations target_fabric_nacl_base_group_ops = {
@@ -455,8 +354,8 @@ static struct configfs_group_operations target_fabric_nacl_base_group_ops = {
.drop_item = target_fabric_drop_mappedlun,
};
-TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
- &target_fabric_nacl_base_group_ops, NULL);
+TF_CIT_SETUP_DRV(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
+ &target_fabric_nacl_base_group_ops);
/* End of tfc_tpg_nacl_base_cit */
@@ -479,36 +378,42 @@ static struct config_group *target_fabric_make_nodeacl(
struct se_portal_group, tpg_acl_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_node_acl *se_nacl;
- struct config_group *nacl_cg;
- if (!tf->tf_ops.fabric_make_nodeacl) {
- pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n");
- return ERR_PTR(-ENOSYS);
- }
-
- se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
+ se_nacl = core_tpg_add_initiator_node_acl(se_tpg, name);
if (IS_ERR(se_nacl))
return ERR_CAST(se_nacl);
- nacl_cg = &se_nacl->acl_group;
- nacl_cg->default_groups = se_nacl->acl_default_groups;
- nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
- nacl_cg->default_groups[1] = &se_nacl->acl_auth_group;
- nacl_cg->default_groups[2] = &se_nacl->acl_param_group;
- nacl_cg->default_groups[3] = &se_nacl->acl_fabric_stat_group;
- nacl_cg->default_groups[4] = NULL;
-
config_group_init_type_name(&se_nacl->acl_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit);
+ &tf->tf_tpg_nacl_base_cit);
+
config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit);
+ &tf->tf_tpg_nacl_attrib_cit);
+ configfs_add_default_group(&se_nacl->acl_attrib_group,
+ &se_nacl->acl_group);
+
config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit);
+ &tf->tf_tpg_nacl_auth_cit);
+ configfs_add_default_group(&se_nacl->acl_auth_group,
+ &se_nacl->acl_group);
+
config_group_init_type_name(&se_nacl->acl_param_group, "param",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit);
+ &tf->tf_tpg_nacl_param_cit);
+ configfs_add_default_group(&se_nacl->acl_param_group,
+ &se_nacl->acl_group);
+
config_group_init_type_name(&se_nacl->acl_fabric_stat_group,
- "fabric_statistics",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_stat_cit);
+ "fabric_statistics", &tf->tf_tpg_nacl_stat_cit);
+ configfs_add_default_group(&se_nacl->acl_fabric_stat_group,
+ &se_nacl->acl_group);
+
+ if (tf->tf_ops->fabric_init_nodeacl) {
+ int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
+ if (ret) {
+ configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group);
+ core_tpg_del_initiator_node_acl(se_nacl);
+ return ERR_PTR(ret);
+ }
+ }
return &se_nacl->acl_group;
}
@@ -519,16 +424,9 @@ static void target_fabric_drop_nodeacl(
{
struct se_node_acl *se_nacl = container_of(to_config_group(item),
struct se_node_acl, acl_group);
- struct config_item *df_item;
- struct config_group *nacl_cg;
- int i;
-
- nacl_cg = &se_nacl->acl_group;
- for (i = 0; nacl_cg->default_groups[i]; i++) {
- df_item = &nacl_cg->default_groups[i]->cg_item;
- nacl_cg->default_groups[i] = NULL;
- config_item_put(df_item);
- }
+
+ configfs_remove_default_groups(&se_nacl->acl_group);
+
/*
* struct se_node_acl free is done in target_fabric_nacl_base_release()
*/
@@ -546,8 +444,6 @@ TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
/* Start of tfc_tpg_np_base_cit */
-CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
-
static void target_fabric_np_base_release(struct config_item *item)
{
struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
@@ -555,16 +451,14 @@ static void target_fabric_np_base_release(struct config_item *item)
struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent;
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
- tf->tf_ops.fabric_drop_np(se_tpg_np);
+ tf->tf_ops->fabric_drop_np(se_tpg_np);
}
static struct configfs_item_operations target_fabric_np_base_item_ops = {
.release = target_fabric_np_base_release,
- .show_attribute = target_fabric_np_base_attr_show,
- .store_attribute = target_fabric_np_base_attr_store,
};
-TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_np_base, &target_fabric_np_base_item_ops, NULL);
/* End of tfc_tpg_np_base_cit */
@@ -579,18 +473,18 @@ static struct config_group *target_fabric_make_np(
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_tpg_np *se_tpg_np;
- if (!tf->tf_ops.fabric_make_np) {
+ if (!tf->tf_ops->fabric_make_np) {
pr_err("tf->tf_ops.fabric_make_np is NULL\n");
return ERR_PTR(-ENOSYS);
}
- se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
+ se_tpg_np = tf->tf_ops->fabric_make_np(se_tpg, group, name);
if (!se_tpg_np || IS_ERR(se_tpg_np))
return ERR_PTR(-EINVAL);
se_tpg_np->tpg_np_parent = se_tpg;
config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
+ &tf->tf_tpg_np_base_cit);
return &se_tpg_np->tpg_np_group;
}
@@ -616,132 +510,113 @@ TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL);
/* Start of tfc_tpg_port_cit */
-CONFIGFS_EATTR_STRUCT(target_fabric_port, se_lun);
-#define TCM_PORT_ATTR(_name, _mode) \
-static struct target_fabric_port_attribute target_fabric_port_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- target_fabric_port_show_attr_##_name, \
- target_fabric_port_store_attr_##_name);
-
-#define TCM_PORT_ATTOR_RO(_name) \
- __CONFIGFS_EATTR_RO(_name, \
- target_fabric_port_show_attr_##_name);
+static struct se_lun *item_to_lun(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct se_lun,
+ lun_group);
+}
-/*
- * alua_tg_pt_gp
- */
-static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
- struct se_lun *lun,
- char *page)
+static ssize_t target_fabric_port_alua_tg_pt_gp_show(struct config_item *item,
+ char *page)
{
- if (!lun || !lun->lun_sep)
+ struct se_lun *lun = item_to_lun(item);
+
+ if (!lun->lun_se_dev)
return -ENODEV;
- return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
+ return core_alua_show_tg_pt_gp_info(lun, page);
}
-static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
- struct se_lun *lun,
- const char *page,
- size_t count)
+static ssize_t target_fabric_port_alua_tg_pt_gp_store(struct config_item *item,
+ const char *page, size_t count)
{
- if (!lun || !lun->lun_sep)
+ struct se_lun *lun = item_to_lun(item);
+
+ if (!lun->lun_se_dev)
return -ENODEV;
- return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
+ return core_alua_store_tg_pt_gp_info(lun, page, count);
}
-TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR);
-
-/*
- * alua_tg_pt_offline
- */
-static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
- struct se_lun *lun,
- char *page)
+static ssize_t target_fabric_port_alua_tg_pt_offline_show(
+ struct config_item *item, char *page)
{
- if (!lun || !lun->lun_sep)
+ struct se_lun *lun = item_to_lun(item);
+
+ if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_show_offline_bit(lun, page);
}
-static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
- struct se_lun *lun,
- const char *page,
- size_t count)
+static ssize_t target_fabric_port_alua_tg_pt_offline_store(
+ struct config_item *item, const char *page, size_t count)
{
- if (!lun || !lun->lun_sep)
+ struct se_lun *lun = item_to_lun(item);
+
+ if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_store_offline_bit(lun, page, count);
}
-TCM_PORT_ATTR(alua_tg_pt_offline, S_IRUGO | S_IWUSR);
-
-/*
- * alua_tg_pt_status
- */
-static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
- struct se_lun *lun,
- char *page)
+static ssize_t target_fabric_port_alua_tg_pt_status_show(
+ struct config_item *item, char *page)
{
- if (!lun || !lun->lun_sep)
+ struct se_lun *lun = item_to_lun(item);
+
+ if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_show_secondary_status(lun, page);
}
-static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
- struct se_lun *lun,
- const char *page,
- size_t count)
+static ssize_t target_fabric_port_alua_tg_pt_status_store(
+ struct config_item *item, const char *page, size_t count)
{
- if (!lun || !lun->lun_sep)
+ struct se_lun *lun = item_to_lun(item);
+
+ if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_store_secondary_status(lun, page, count);
}
-TCM_PORT_ATTR(alua_tg_pt_status, S_IRUGO | S_IWUSR);
-
-/*
- * alua_tg_pt_write_md
- */
-static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
- struct se_lun *lun,
- char *page)
+static ssize_t target_fabric_port_alua_tg_pt_write_md_show(
+ struct config_item *item, char *page)
{
- if (!lun || !lun->lun_sep)
+ struct se_lun *lun = item_to_lun(item);
+
+ if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_show_secondary_write_metadata(lun, page);
}
-static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
- struct se_lun *lun,
- const char *page,
- size_t count)
+static ssize_t target_fabric_port_alua_tg_pt_write_md_store(
+ struct config_item *item, const char *page, size_t count)
{
- if (!lun || !lun->lun_sep)
+ struct se_lun *lun = item_to_lun(item);
+
+ if (!lun->lun_se_dev)
return -ENODEV;
return core_alua_store_secondary_write_metadata(lun, page, count);
}
-TCM_PORT_ATTR(alua_tg_pt_write_md, S_IRUGO | S_IWUSR);
-
+CONFIGFS_ATTR(target_fabric_port_, alua_tg_pt_gp);
+CONFIGFS_ATTR(target_fabric_port_, alua_tg_pt_offline);
+CONFIGFS_ATTR(target_fabric_port_, alua_tg_pt_status);
+CONFIGFS_ATTR(target_fabric_port_, alua_tg_pt_write_md);
static struct configfs_attribute *target_fabric_port_attrs[] = {
- &target_fabric_port_alua_tg_pt_gp.attr,
- &target_fabric_port_alua_tg_pt_offline.attr,
- &target_fabric_port_alua_tg_pt_status.attr,
- &target_fabric_port_alua_tg_pt_write_md.attr,
+ &target_fabric_port_attr_alua_tg_pt_gp,
+ &target_fabric_port_attr_alua_tg_pt_offline,
+ &target_fabric_port_attr_alua_tg_pt_status,
+ &target_fabric_port_attr_alua_tg_pt_write_md,
NULL,
};
-CONFIGFS_EATTR_OPS(target_fabric_port, se_lun, lun_group);
-
static int target_fabric_port_link(
struct config_item *lun_ci,
struct config_item *se_dev_ci)
@@ -749,20 +624,19 @@ static int target_fabric_port_link(
struct config_item *tpg_ci;
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
- struct se_lun *lun_p;
struct se_portal_group *se_tpg;
- struct se_device *dev =
- container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
+ struct se_device *dev;
struct target_fabric_configfs *tf;
int ret;
- if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) {
- pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:"
- " %p to struct se_device: %p\n", se_dev_ci, dev);
+ if (!se_dev_ci->ci_type ||
+ se_dev_ci->ci_type->ct_item_ops != &target_core_dev_item_ops) {
+ pr_err("Bad se_dev_ci, not a valid se_dev_ci pointer: %p\n", se_dev_ci);
return -EFAULT;
}
+ dev = container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
- if (!(dev->dev_flags & DF_CONFIGURED)) {
+ if (!target_dev_configured(dev)) {
pr_err("se_device not configured yet, cannot port link\n");
return -ENODEV;
}
@@ -777,20 +651,19 @@ static int target_fabric_port_link(
return -EEXIST;
}
- lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun);
- if (IS_ERR(lun_p)) {
- pr_err("core_dev_add_lun() failed\n");
- ret = PTR_ERR(lun_p);
+ ret = core_dev_add_lun(se_tpg, dev, lun);
+ if (ret) {
+ pr_err("core_dev_add_lun() failed: %d\n", ret);
goto out;
}
- if (tf->tf_ops.fabric_post_link) {
+ if (tf->tf_ops->fabric_post_link) {
/*
* Call the optional fabric_post_link() to allow a
* fabric module to setup any additional state once
* core_dev_add_lun() has been called..
*/
- tf->tf_ops.fabric_post_link(se_tpg, lun);
+ tf->tf_ops->fabric_post_link(se_tpg, lun);
}
return 0;
@@ -798,31 +671,37 @@ out:
return ret;
}
-static int target_fabric_port_unlink(
+static void target_fabric_port_unlink(
struct config_item *lun_ci,
struct config_item *se_dev_ci)
{
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
- struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg;
+ struct se_portal_group *se_tpg = lun->lun_tpg;
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
- if (tf->tf_ops.fabric_pre_unlink) {
+ if (tf->tf_ops->fabric_pre_unlink) {
/*
* Call the optional fabric_pre_unlink() to allow a
* fabric module to release any additional stat before
* core_dev_del_lun() is called.
*/
- tf->tf_ops.fabric_pre_unlink(se_tpg, lun);
+ tf->tf_ops->fabric_pre_unlink(se_tpg, lun);
}
- core_dev_del_lun(se_tpg, lun->unpacked_lun);
- return 0;
+ core_dev_del_lun(se_tpg, lun);
+}
+
+static void target_fabric_port_release(struct config_item *item)
+{
+ struct se_lun *lun = container_of(to_config_group(item),
+ struct se_lun, lun_group);
+
+ call_rcu(&lun->rcu_head, target_tpg_free_lun);
}
static struct configfs_item_operations target_fabric_port_item_ops = {
- .show_attribute = target_fabric_port_attr_show,
- .store_attribute = target_fabric_port_attr_store,
+ .release = target_fabric_port_release,
.allow_link = target_fabric_port_link,
.drop_link = target_fabric_port_unlink,
};
@@ -866,8 +745,7 @@ static struct config_group *target_fabric_make_lun(
struct se_portal_group *se_tpg = container_of(group,
struct se_portal_group, tpg_lun_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
- struct config_group *lun_cg = NULL, *port_stat_grp = NULL;
- unsigned long unpacked_lun;
+ unsigned long long unpacked_lun;
int errno;
if (strstr(name, "lun_") != name) {
@@ -875,43 +753,25 @@ static struct config_group *target_fabric_make_lun(
" \"lun_$LUN_NUMBER\"\n");
return ERR_PTR(-EINVAL);
}
- if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX)
- return ERR_PTR(-EINVAL);
-
- lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
- if (!lun)
- return ERR_PTR(-EINVAL);
+ errno = kstrtoull(name + 4, 0, &unpacked_lun);
+ if (errno)
+ return ERR_PTR(errno);
- lun_cg = &lun->lun_group;
- lun_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
- GFP_KERNEL);
- if (!lun_cg->default_groups) {
- pr_err("Unable to allocate lun_cg->default_groups\n");
- return ERR_PTR(-ENOMEM);
- }
+ lun = core_tpg_alloc_lun(se_tpg, unpacked_lun);
+ if (IS_ERR(lun))
+ return ERR_CAST(lun);
config_group_init_type_name(&lun->lun_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_port_cit);
+ &tf->tf_tpg_port_cit);
+
config_group_init_type_name(&lun->port_stat_grps.stat_group,
- "statistics", &TF_CIT_TMPL(tf)->tfc_tpg_port_stat_cit);
- lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
- lun_cg->default_groups[1] = NULL;
-
- port_stat_grp = &lun->port_stat_grps.stat_group;
- port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
- GFP_KERNEL);
- if (!port_stat_grp->default_groups) {
- pr_err("Unable to allocate port_stat_grp->default_groups\n");
- errno = -ENOMEM;
- goto out;
- }
+ "statistics", &tf->tf_tpg_port_stat_cit);
+ configfs_add_default_group(&lun->port_stat_grps.stat_group,
+ &lun->lun_group);
+
target_stat_setup_port_default_groups(lun);
return &lun->lun_group;
-out:
- if (lun_cg)
- kfree(lun_cg->default_groups);
- return ERR_PTR(errno);
}
static void target_fabric_drop_lun(
@@ -920,25 +780,9 @@ static void target_fabric_drop_lun(
{
struct se_lun *lun = container_of(to_config_group(item),
struct se_lun, lun_group);
- struct config_item *df_item;
- struct config_group *lun_cg, *port_stat_grp;
- int i;
-
- port_stat_grp = &lun->port_stat_grps.stat_group;
- for (i = 0; port_stat_grp->default_groups[i]; i++) {
- df_item = &port_stat_grp->default_groups[i]->cg_item;
- port_stat_grp->default_groups[i] = NULL;
- config_item_put(df_item);
- }
- kfree(port_stat_grp->default_groups);
- lun_cg = &lun->lun_group;
- for (i = 0; lun_cg->default_groups[i]; i++) {
- df_item = &lun_cg->default_groups[i]->cg_item;
- lun_cg->default_groups[i] = NULL;
- config_item_put(df_item);
- }
- kfree(lun_cg->default_groups);
+ configfs_remove_default_groups(&lun->port_stat_grps.stat_group);
+ configfs_remove_default_groups(&lun->lun_group);
config_item_put(item);
}
@@ -952,69 +796,129 @@ TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL);
/* End of tfc_tpg_lun_cit */
-/* Start of tfc_tpg_attrib_cit */
+TF_CIT_SETUP_DRV(tpg_attrib, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_auth, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_param, NULL, NULL);
-CONFIGFS_EATTR_OPS(target_fabric_tpg_attrib, se_portal_group, tpg_attrib_group);
+/* Start of tfc_tpg_base_cit */
-static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = {
- .show_attribute = target_fabric_tpg_attrib_attr_show,
- .store_attribute = target_fabric_tpg_attrib_attr_store,
-};
+static void target_fabric_tpg_release(struct config_item *item)
+{
+ struct se_portal_group *se_tpg = container_of(to_config_group(item),
+ struct se_portal_group, tpg_group);
+ struct se_wwn *wwn = se_tpg->se_tpg_wwn;
+ struct target_fabric_configfs *tf = wwn->wwn_tf;
-TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL);
+ tf->tf_ops->fabric_drop_tpg(se_tpg);
+}
-/* End of tfc_tpg_attrib_cit */
+static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
+ .release = target_fabric_tpg_release,
+};
-/* Start of tfc_tpg_auth_cit */
+static ssize_t target_fabric_tpg_base_enable_show(struct config_item *item,
+ char *page)
+{
+ return sysfs_emit(page, "%d\n", to_tpg(item)->enabled);
+}
-CONFIGFS_EATTR_OPS(target_fabric_tpg_auth, se_portal_group, tpg_auth_group);
+static ssize_t target_fabric_tpg_base_enable_store(struct config_item *item,
+ const char *page,
+ size_t count)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ int ret;
+ bool op;
-static struct configfs_item_operations target_fabric_tpg_auth_item_ops = {
- .show_attribute = target_fabric_tpg_auth_attr_show,
- .store_attribute = target_fabric_tpg_auth_attr_store,
-};
+ ret = kstrtobool(page, &op);
+ if (ret)
+ return ret;
-TF_CIT_SETUP(tpg_auth, &target_fabric_tpg_auth_item_ops, NULL, NULL);
+ if (se_tpg->enabled == op)
+ return count;
+ if (op)
+ ret = target_tpg_enable(se_tpg);
+ else
+ ret = target_tpg_disable(se_tpg);
+ if (ret)
+ return ret;
+ return count;
+}
+static ssize_t target_fabric_tpg_base_rtpi_show(struct config_item *item, char *page)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
-/* End of tfc_tpg_attrib_cit */
+ return sysfs_emit(page, "%#x\n", se_tpg->tpg_rtpi);
+}
-/* Start of tfc_tpg_param_cit */
+static ssize_t target_fabric_tpg_base_rtpi_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_portal_group *se_tpg = to_tpg(item);
+ u16 val;
+ int ret;
-CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group);
+ ret = kstrtou16(page, 0, &val);
+ if (ret < 0)
+ return ret;
+ if (val == 0)
+ return -EINVAL;
-static struct configfs_item_operations target_fabric_tpg_param_item_ops = {
- .show_attribute = target_fabric_tpg_param_attr_show,
- .store_attribute = target_fabric_tpg_param_attr_store,
-};
+ if (se_tpg->enabled) {
+ pr_info("%s_TPG[%hu] - Can not change RTPI on enabled TPG",
+ se_tpg->se_tpg_tfo->fabric_name,
+ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
+ return -EINVAL;
+ }
-TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
+ se_tpg->tpg_rtpi = val;
+ se_tpg->rtpi_manual = true;
-/* End of tfc_tpg_param_cit */
+ return count;
+}
-/* Start of tfc_tpg_base_cit */
-/*
- * For use with TF_TPG_ATTR() and TF_TPG_ATTR_RO()
- */
-CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
+CONFIGFS_ATTR(target_fabric_tpg_base_, enable);
+CONFIGFS_ATTR(target_fabric_tpg_base_, rtpi);
-static void target_fabric_tpg_release(struct config_item *item)
+static int
+target_fabric_setup_tpg_base_cit(struct target_fabric_configfs *tf)
{
- struct se_portal_group *se_tpg = container_of(to_config_group(item),
- struct se_portal_group, tpg_group);
- struct se_wwn *wwn = se_tpg->se_tpg_wwn;
- struct target_fabric_configfs *tf = wwn->wwn_tf;
+ struct config_item_type *cit = &tf->tf_tpg_base_cit;
+ struct configfs_attribute **attrs = NULL;
+ size_t nr_attrs = 0;
+ int i = 0;
- tf->tf_ops.fabric_drop_tpg(se_tpg);
-}
+ if (tf->tf_ops->tfc_tpg_base_attrs)
+ while (tf->tf_ops->tfc_tpg_base_attrs[nr_attrs] != NULL)
+ nr_attrs++;
-static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
- .release = target_fabric_tpg_release,
- .show_attribute = target_fabric_tpg_attr_show,
- .store_attribute = target_fabric_tpg_attr_store,
-};
+ if (tf->tf_ops->fabric_enable_tpg)
+ nr_attrs++;
+
+ /* + 1 for target_fabric_tpg_base_attr_rtpi */
+ nr_attrs++;
+
+ /* + 1 for final NULL in the array */
+ attrs = kcalloc(nr_attrs + 1, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
-TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL);
+ if (tf->tf_ops->tfc_tpg_base_attrs)
+ for (; tf->tf_ops->tfc_tpg_base_attrs[i] != NULL; i++)
+ attrs[i] = tf->tf_ops->tfc_tpg_base_attrs[i];
+ if (tf->tf_ops->fabric_enable_tpg)
+ attrs[i++] = &target_fabric_tpg_base_attr_enable;
+
+ attrs[i++] = &target_fabric_tpg_base_attr_rtpi;
+
+ cit->ct_item_ops = &target_fabric_tpg_base_item_ops;
+ cit->ct_attrs = attrs;
+ cit->ct_owner = tf->tf_ops->module;
+ pr_debug("Setup generic tpg_base\n");
+
+ return 0;
+}
/* End of tfc_tpg_base_cit */
/* Start of tfc_tpg_cit */
@@ -1027,40 +931,47 @@ static struct config_group *target_fabric_make_tpg(
struct target_fabric_configfs *tf = wwn->wwn_tf;
struct se_portal_group *se_tpg;
- if (!tf->tf_ops.fabric_make_tpg) {
- pr_err("tf->tf_ops.fabric_make_tpg is NULL\n");
+ if (!tf->tf_ops->fabric_make_tpg) {
+ pr_err("tf->tf_ops->fabric_make_tpg is NULL\n");
return ERR_PTR(-ENOSYS);
}
- se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
+ se_tpg = tf->tf_ops->fabric_make_tpg(wwn, name);
if (!se_tpg || IS_ERR(se_tpg))
return ERR_PTR(-EINVAL);
- /*
- * Setup default groups from pre-allocated se_tpg->tpg_default_groups
- */
- se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups;
- se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group;
- se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group;
- se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group;
- se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group;
- se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_auth_group;
- se_tpg->tpg_group.default_groups[5] = &se_tpg->tpg_param_group;
- se_tpg->tpg_group.default_groups[6] = NULL;
config_group_init_type_name(&se_tpg->tpg_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_base_cit);
+ &tf->tf_tpg_base_cit);
+
config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
- &TF_CIT_TMPL(tf)->tfc_tpg_lun_cit);
+ &tf->tf_tpg_lun_cit);
+ configfs_add_default_group(&se_tpg->tpg_lun_group,
+ &se_tpg->tpg_group);
+
config_group_init_type_name(&se_tpg->tpg_np_group, "np",
- &TF_CIT_TMPL(tf)->tfc_tpg_np_cit);
+ &tf->tf_tpg_np_cit);
+ configfs_add_default_group(&se_tpg->tpg_np_group,
+ &se_tpg->tpg_group);
+
config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
- &TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit);
+ &tf->tf_tpg_nacl_cit);
+ configfs_add_default_group(&se_tpg->tpg_acl_group,
+ &se_tpg->tpg_group);
+
config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
- &TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit);
+ &tf->tf_tpg_attrib_cit);
+ configfs_add_default_group(&se_tpg->tpg_attrib_group,
+ &se_tpg->tpg_group);
+
config_group_init_type_name(&se_tpg->tpg_auth_group, "auth",
- &TF_CIT_TMPL(tf)->tfc_tpg_auth_cit);
+ &tf->tf_tpg_auth_cit);
+ configfs_add_default_group(&se_tpg->tpg_auth_group,
+ &se_tpg->tpg_group);
+
config_group_init_type_name(&se_tpg->tpg_param_group, "param",
- &TF_CIT_TMPL(tf)->tfc_tpg_param_cit);
+ &tf->tf_tpg_param_cit);
+ configfs_add_default_group(&se_tpg->tpg_param_group,
+ &se_tpg->tpg_group);
return &se_tpg->tpg_group;
}
@@ -1071,19 +982,8 @@ static void target_fabric_drop_tpg(
{
struct se_portal_group *se_tpg = container_of(to_config_group(item),
struct se_portal_group, tpg_group);
- struct config_group *tpg_cg = &se_tpg->tpg_group;
- struct config_item *df_item;
- int i;
- /*
- * Release default groups, but do not release tpg_cg->default_groups
- * memory as it is statically allocated at se_tpg->tpg_default_groups.
- */
- for (i = 0; tpg_cg->default_groups[i]; i++) {
- df_item = &tpg_cg->default_groups[i]->cg_item;
- tpg_cg->default_groups[i] = NULL;
- config_item_put(df_item);
- }
+ configfs_remove_default_groups(&se_tpg->tpg_group);
config_item_put(item);
}
@@ -1093,7 +993,9 @@ static void target_fabric_release_wwn(struct config_item *item)
struct se_wwn, wwn_group);
struct target_fabric_configfs *tf = wwn->wwn_tf;
- tf->tf_ops.fabric_drop_wwn(wwn);
+ configfs_remove_default_groups(&wwn->fabric_stat_group);
+ configfs_remove_default_groups(&wwn->param_group);
+ tf->tf_ops->fabric_drop_wwn(wwn);
}
static struct configfs_item_operations target_fabric_tpg_item_ops = {
@@ -1119,6 +1021,81 @@ TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL);
/* End of tfc_wwn_fabric_stats_cit */
+static ssize_t
+target_fabric_wwn_cmd_completion_affinity_show(struct config_item *item,
+ char *page)
+{
+ struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
+ param_group);
+ return sprintf(page, "%d\n",
+ wwn->cmd_compl_affinity == WORK_CPU_UNBOUND ?
+ SE_COMPL_AFFINITY_CURR_CPU : wwn->cmd_compl_affinity);
+}
+
+static ssize_t
+target_fabric_wwn_cmd_completion_affinity_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
+ param_group);
+ int compl_val;
+
+ if (kstrtoint(page, 0, &compl_val))
+ return -EINVAL;
+
+ switch (compl_val) {
+ case SE_COMPL_AFFINITY_CPUID:
+ wwn->cmd_compl_affinity = compl_val;
+ break;
+ case SE_COMPL_AFFINITY_CURR_CPU:
+ wwn->cmd_compl_affinity = WORK_CPU_UNBOUND;
+ break;
+ default:
+ if (compl_val < 0 || compl_val >= nr_cpu_ids ||
+ !cpu_online(compl_val)) {
+ pr_err("Command completion value must be between %d and %d or an online CPU.\n",
+ SE_COMPL_AFFINITY_CPUID,
+ SE_COMPL_AFFINITY_CURR_CPU);
+ return -EINVAL;
+ }
+ wwn->cmd_compl_affinity = compl_val;
+ }
+
+ return count;
+}
+CONFIGFS_ATTR(target_fabric_wwn_, cmd_completion_affinity);
+
+static ssize_t
+target_fabric_wwn_default_submit_type_show(struct config_item *item,
+ char *page)
+{
+ struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
+ param_group);
+ return sysfs_emit(page, "%u\n",
+ wwn->wwn_tf->tf_ops->default_submit_type);
+}
+CONFIGFS_ATTR_RO(target_fabric_wwn_, default_submit_type);
+
+static ssize_t
+target_fabric_wwn_direct_submit_supported_show(struct config_item *item,
+ char *page)
+{
+ struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn,
+ param_group);
+ return sysfs_emit(page, "%u\n",
+ wwn->wwn_tf->tf_ops->direct_submit_supp);
+}
+CONFIGFS_ATTR_RO(target_fabric_wwn_, direct_submit_supported);
+
+static struct configfs_attribute *target_fabric_wwn_param_attrs[] = {
+ &target_fabric_wwn_attr_cmd_completion_affinity,
+ &target_fabric_wwn_attr_default_submit_type,
+ &target_fabric_wwn_attr_direct_submit_supported,
+ NULL,
+};
+
+TF_CIT_SETUP(wwn_param, NULL, NULL, target_fabric_wwn_param_attrs);
+
/* Start of tfc_wwn_cit */
static struct config_group *target_fabric_make_wwn(
@@ -1129,28 +1106,30 @@ static struct config_group *target_fabric_make_wwn(
struct target_fabric_configfs, tf_group);
struct se_wwn *wwn;
- if (!tf->tf_ops.fabric_make_wwn) {
+ if (!tf->tf_ops->fabric_make_wwn) {
pr_err("tf->tf_ops.fabric_make_wwn is NULL\n");
return ERR_PTR(-ENOSYS);
}
- wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
+ wwn = tf->tf_ops->fabric_make_wwn(tf, group, name);
if (!wwn || IS_ERR(wwn))
return ERR_PTR(-EINVAL);
+ wwn->cmd_compl_affinity = SE_COMPL_AFFINITY_CPUID;
wwn->wwn_tf = tf;
- /*
- * Setup default groups from pre-allocated wwn->wwn_default_groups
- */
- wwn->wwn_group.default_groups = wwn->wwn_default_groups;
- wwn->wwn_group.default_groups[0] = &wwn->fabric_stat_group;
- wwn->wwn_group.default_groups[1] = NULL;
- config_group_init_type_name(&wwn->wwn_group, name,
- &TF_CIT_TMPL(tf)->tfc_tpg_cit);
+ config_group_init_type_name(&wwn->wwn_group, name, &tf->tf_tpg_cit);
+
config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics",
- &TF_CIT_TMPL(tf)->tfc_wwn_fabric_stats_cit);
+ &tf->tf_wwn_fabric_stats_cit);
+ configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group);
+ config_group_init_type_name(&wwn->param_group, "param",
+ &tf->tf_wwn_param_cit);
+ configfs_add_default_group(&wwn->param_group, &wwn->wwn_group);
+
+ if (tf->tf_ops->add_wwn_groups)
+ tf->tf_ops->add_wwn_groups(wwn);
return &wwn->wwn_group;
}
@@ -1160,16 +1139,8 @@ static void target_fabric_drop_wwn(
{
struct se_wwn *wwn = container_of(to_config_group(item),
struct se_wwn, wwn_group);
- struct config_item *df_item;
- struct config_group *cg = &wwn->wwn_group;
- int i;
-
- for (i = 0; cg->default_groups[i]; i++) {
- df_item = &cg->default_groups[i]->cg_item;
- cg->default_groups[i] = NULL;
- config_item_put(df_item);
- }
+ configfs_remove_default_groups(&wwn->wwn_group);
config_item_put(item);
}
@@ -1177,41 +1148,24 @@ static struct configfs_group_operations target_fabric_wwn_group_ops = {
.make_group = target_fabric_make_wwn,
.drop_item = target_fabric_drop_wwn,
};
-/*
- * For use with TF_WWN_ATTR() and TF_WWN_ATTR_RO()
- */
-CONFIGFS_EATTR_OPS(target_fabric_wwn, target_fabric_configfs, tf_group);
-
-static struct configfs_item_operations target_fabric_wwn_item_ops = {
- .show_attribute = target_fabric_wwn_attr_show,
- .store_attribute = target_fabric_wwn_attr_store,
-};
-
-TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL);
-
-/* End of tfc_wwn_cit */
-
-/* Start of tfc_discovery_cit */
-
-CONFIGFS_EATTR_OPS(target_fabric_discovery, target_fabric_configfs,
- tf_disc_group);
-static struct configfs_item_operations target_fabric_discovery_item_ops = {
- .show_attribute = target_fabric_discovery_attr_show,
- .store_attribute = target_fabric_discovery_attr_store,
-};
-
-TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL);
-
-/* End of tfc_discovery_cit */
+TF_CIT_SETUP_DRV(wwn, NULL, &target_fabric_wwn_group_ops);
+TF_CIT_SETUP_DRV(discovery, NULL, NULL);
int target_fabric_setup_cits(struct target_fabric_configfs *tf)
{
+ int ret;
+
target_fabric_setup_discovery_cit(tf);
target_fabric_setup_wwn_cit(tf);
target_fabric_setup_wwn_fabric_stats_cit(tf);
+ target_fabric_setup_wwn_param_cit(tf);
target_fabric_setup_tpg_cit(tf);
- target_fabric_setup_tpg_base_cit(tf);
+
+ ret = target_fabric_setup_tpg_base_cit(tf);
+ if (ret)
+ return ret;
+
target_fabric_setup_tpg_port_cit(tf);
target_fabric_setup_tpg_port_stat_cit(tf);
target_fabric_setup_tpg_lun_cit(tf);
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 687b0b0a4aa6..ec7bc6e30228 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -1,157 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_fabric_lib.c
*
* This file contains generic high level protocol identifier and PR
* handlers for TCM fabric modules
*
- * (c) Copyright 2010-2012 RisingTide Systems LLC.
+ * (c) Copyright 2010-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
+/*
+ * See SPC4, section 7.5 "Protocol specific parameters" for details
+ * on the formats implemented in this file.
+ */
+
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/spinlock.h>
#include <linux/export.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
+#include <linux/unaligned.h>
+
+#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_pr.h"
-/*
- * Handlers for Serial Attached SCSI (SAS)
- */
-u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
- /*
- * Return a SAS Serial SCSI Protocol identifier for loopback operations
- * This is defined in section 7.5.1 Table 362 in spc4r17
- */
- return 0x6;
-}
-EXPORT_SYMBOL(sas_get_fabric_proto_ident);
-u32 sas_get_pr_transport_id(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
+static int sas_get_pr_transport_id(
+ struct se_node_acl *nacl,
int *format_code,
unsigned char *buf)
{
- unsigned char *ptr;
int ret;
- /*
- * Set PROTOCOL IDENTIFIER to 6h for SAS
- */
- buf[0] = 0x06;
- /*
- * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
- * over SAS Serial SCSI Protocol
- */
- ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
-
- ret = hex2bin(&buf[4], ptr, 8);
- if (ret < 0)
- pr_debug("sas transport_id: invalid hex string\n");
-
- /*
- * The SAS Transport ID is a hardcoded 24-byte length
- */
- return 24;
-}
-EXPORT_SYMBOL(sas_get_pr_transport_id);
-
-u32 sas_get_pr_transport_id_len(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code)
-{
- *format_code = 0;
- /*
- * From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
- * over SAS Serial SCSI Protocol
- *
- * The SAS Transport ID is a hardcoded 24-byte length
- */
- return 24;
-}
-EXPORT_SYMBOL(sas_get_pr_transport_id_len);
-
-/*
- * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
- * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
- */
-char *sas_parse_pr_out_transport_id(
- struct se_portal_group *se_tpg,
- const char *buf,
- u32 *out_tid_len,
- char **port_nexus_ptr)
-{
- /*
- * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
- * for initiator ports using SCSI over SAS Serial SCSI Protocol
- *
- * The TransportID for a SAS Initiator Port is of fixed size of
- * 24 bytes, and SAS does not contain a I_T nexus identifier,
- * so we return the **port_nexus_ptr set to NULL.
- */
- *port_nexus_ptr = NULL;
- *out_tid_len = 24;
-
- return (char *)&buf[4];
-}
-EXPORT_SYMBOL(sas_parse_pr_out_transport_id);
-
-/*
- * Handlers for Fibre Channel Protocol (FCP)
- */
-u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg)
-{
- return 0x0; /* 0 = fcp-2 per SPC4 section 7.5.1 */
-}
-EXPORT_SYMBOL(fc_get_fabric_proto_ident);
+ /* Skip over 'naa. prefix */
+ ret = hex2bin(&buf[4], &nacl->initiatorname[4], 8);
+ if (ret) {
+ pr_debug("%s: invalid hex string\n", __func__);
+ return ret;
+ }
-u32 fc_get_pr_transport_id_len(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
- int *format_code)
-{
- *format_code = 0;
- /*
- * The FC Transport ID is a hardcoded 24-byte length
- */
return 24;
}
-EXPORT_SYMBOL(fc_get_pr_transport_id_len);
-u32 fc_get_pr_transport_id(
- struct se_portal_group *se_tpg,
+static int fc_get_pr_transport_id(
struct se_node_acl *se_nacl,
- struct t10_pr_registration *pr_reg,
int *format_code,
unsigned char *buf)
{
@@ -160,24 +59,20 @@ u32 fc_get_pr_transport_id(
u32 off = 8;
/*
- * PROTOCOL IDENTIFIER is 0h for FCP-2
- *
- * From spc4r17, 7.5.4.2 TransportID for initiator ports using
- * SCSI over Fibre Channel
- *
* We convert the ASCII formatted N Port name into a binary
* encoded TransportID.
*/
ptr = &se_nacl->initiatorname[0];
-
- for (i = 0; i < 24; ) {
+ for (i = 0; i < 23; ) {
if (!strncmp(&ptr[i], ":", 1)) {
i++;
continue;
}
ret = hex2bin(&buf[off++], &ptr[i], 1);
- if (ret < 0)
- pr_debug("fc transport_id: invalid hex string\n");
+ if (ret < 0) {
+ pr_debug("%s: invalid hex string\n", __func__);
+ return ret;
+ }
i += 2;
}
/*
@@ -185,79 +80,79 @@ u32 fc_get_pr_transport_id(
*/
return 24;
}
-EXPORT_SYMBOL(fc_get_pr_transport_id);
-char *fc_parse_pr_out_transport_id(
- struct se_portal_group *se_tpg,
- const char *buf,
- u32 *out_tid_len,
- char **port_nexus_ptr)
+static int sbp_get_pr_transport_id(
+ struct se_node_acl *nacl,
+ int *format_code,
+ unsigned char *buf)
{
- /*
- * The TransportID for a FC N Port is of fixed size of
- * 24 bytes, and FC does not contain a I_T nexus identifier,
- * so we return the **port_nexus_ptr set to NULL.
- */
- *port_nexus_ptr = NULL;
- *out_tid_len = 24;
+ int ret;
- return (char *)&buf[8];
-}
-EXPORT_SYMBOL(fc_parse_pr_out_transport_id);
+ ret = hex2bin(&buf[8], nacl->initiatorname, 8);
+ if (ret) {
+ pr_debug("%s: invalid hex string\n", __func__);
+ return ret;
+ }
-/*
- * Handlers for Internet Small Computer Systems Interface (iSCSI)
- */
+ return 24;
+}
-u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+static int srp_get_pr_transport_id(
+ struct se_node_acl *nacl,
+ int *format_code,
+ unsigned char *buf)
{
- /*
- * This value is defined for "Internet SCSI (iSCSI)"
- * in spc4r17 section 7.5.1 Table 362
- */
- return 0x5;
+ const char *p;
+ unsigned len, count, leading_zero_bytes;
+ int rc;
+
+ p = nacl->initiatorname;
+ if (strncasecmp(p, "0x", 2) == 0)
+ p += 2;
+ len = strlen(p);
+ if (len % 2)
+ return -EINVAL;
+
+ count = min(len / 2, 16U);
+ leading_zero_bytes = 16 - count;
+ memset(buf + 8, 0, leading_zero_bytes);
+ rc = hex2bin(buf + 8 + leading_zero_bytes, p, count);
+ if (rc < 0) {
+ pr_debug("hex2bin failed for %s: %d\n", p, rc);
+ return rc;
+ }
+
+ return 24;
}
-EXPORT_SYMBOL(iscsi_get_fabric_proto_ident);
-u32 iscsi_get_pr_transport_id(
- struct se_portal_group *se_tpg,
+static int iscsi_get_pr_transport_id(
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code,
unsigned char *buf)
{
u32 off = 4, padding = 0;
+ int isid_len;
u16 len = 0;
spin_lock_irq(&se_nacl->nacl_sess_lock);
/*
- * Set PROTOCOL IDENTIFIER to 5h for iSCSI
- */
- buf[0] = 0x05;
- /*
- * From spc4r17 Section 7.5.4.6: TransportID for initiator
- * ports using SCSI over iSCSI.
+ * Only null terminate the last field.
*
- * The null-terminated, null-padded (see 4.4.2) ISCSI NAME field
- * shall contain the iSCSI name of an iSCSI initiator node (see
- * RFC 3720). The first ISCSI NAME field byte containing an ASCII
- * null character terminates the ISCSI NAME field without regard for
- * the specified length of the iSCSI TransportID or the contents of
- * the ADDITIONAL LENGTH field.
- */
- len = sprintf(&buf[off], "%s", se_nacl->initiatorname);
- /*
- * Add Extra byte for NULL terminator
- */
- len++;
- /*
- * If there is ISID present with the registration and *format code == 1
- * 1, use iSCSI Initiator port TransportID format.
+ * From spc4r37 section 7.6.4.6: TransportID for initiator ports using
+ * SCSI over iSCSI.
+ *
+ * Table 507 TPID=0 Initiator device TransportID
*
- * Otherwise use iSCSI Initiator device TransportID format that
- * does not contain the ASCII encoded iSCSI Initiator iSID value
- * provied by the iSCSi Initiator during the iSCSI login process.
+ * The null-terminated, null-padded (see 4.3.2) ISCSI NAME field shall
+ * contain the iSCSI name of an iSCSI initiator node (see RFC 7143).
+ * The first ISCSI NAME field byte containing an ASCII null character
+ * terminates the ISCSI NAME field without regard for the specified
+ * length of the iSCSI TransportID or the contents of the ADDITIONAL
+ * LENGTH field.
*/
+ len = sprintf(&buf[off], "%s", se_nacl->initiatorname);
+ off += len;
if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) {
/*
* Set FORMAT CODE 01b for iSCSI Initiator port TransportID
@@ -265,8 +160,12 @@ u32 iscsi_get_pr_transport_id(
*/
buf[0] |= 0x40;
/*
- * From spc4r17 Section 7.5.4.6: TransportID for initiator
- * ports using SCSI over iSCSI. Table 390
+ * From spc4r37 Section 7.6.4.6
+ *
+ * Table 508 TPID=1 Initiator port TransportID.
+ *
+ * The ISCSI NAME field shall not be null-terminated
+ * (see 4.3.2) and shall not be padded.
*
* The SEPARATOR field shall contain the five ASCII
* characters ",i,0x".
@@ -276,23 +175,24 @@ u32 iscsi_get_pr_transport_id(
* (see RFC 3720) in the form of ASCII characters that are the
* hexadecimal digits converted from the binary iSCSI initiator
* session identifier value. The first ISCSI INITIATOR SESSION
- * ID field byte containing an ASCII null character
+ * ID field byte containing an ASCII null character terminates
+ * the ISCSI INITIATOR SESSION ID field without regard for the
+ * specified length of the iSCSI TransportID or the contents
+ * of the ADDITIONAL LENGTH field.
*/
- buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
- buf[off+len] = 0x69; off++; /* ASCII Character: "i" */
- buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
- buf[off+len] = 0x30; off++; /* ASCII Character: "0" */
- buf[off+len] = 0x78; off++; /* ASCII Character: "x" */
+ buf[off++] = 0x2c; /* ASCII Character: "," */
+ buf[off++] = 0x69; /* ASCII Character: "i" */
+ buf[off++] = 0x2c; /* ASCII Character: "," */
+ buf[off++] = 0x30; /* ASCII Character: "0" */
+ buf[off++] = 0x78; /* ASCII Character: "x" */
len += 5;
- buf[off+len] = pr_reg->pr_reg_isid[0]; off++;
- buf[off+len] = pr_reg->pr_reg_isid[1]; off++;
- buf[off+len] = pr_reg->pr_reg_isid[2]; off++;
- buf[off+len] = pr_reg->pr_reg_isid[3]; off++;
- buf[off+len] = pr_reg->pr_reg_isid[4]; off++;
- buf[off+len] = pr_reg->pr_reg_isid[5]; off++;
- buf[off+len] = '\0'; off++;
- len += 7;
+
+ isid_len = sprintf(buf + off, "%s", pr_reg->pr_reg_isid);
+ off += isid_len;
+ len += isid_len;
}
+ buf[off] = '\0';
+ len += 1;
spin_unlock_irq(&se_nacl->nacl_sess_lock);
/*
* The ADDITIONAL LENGTH field specifies the number of bytes that follow
@@ -303,8 +203,7 @@ u32 iscsi_get_pr_transport_id(
if (padding != 0)
len += padding;
- buf[2] = ((len >> 8) & 0xff);
- buf[3] = (len & 0xff);
+ put_unaligned_be16(len, &buf[2]);
/*
* Increment value for total payload + header length for
* full status descriptor
@@ -313,10 +212,8 @@ u32 iscsi_get_pr_transport_id(
return len;
}
-EXPORT_SYMBOL(iscsi_get_pr_transport_id);
-u32 iscsi_get_pr_transport_id_len(
- struct se_portal_group *se_tpg,
+static int iscsi_get_pr_transport_id_len(
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code)
@@ -338,7 +235,7 @@ u32 iscsi_get_pr_transport_id_len(
*/
if (pr_reg->isid_present_at_reg) {
len += 5; /* For ",i,0x" ASCII separator */
- len += 7; /* For iSCSI Initiator Session ID + Null terminator */
+ len += strlen(pr_reg->pr_reg_isid);
*format_code = 1;
} else
*format_code = 0;
@@ -359,18 +256,45 @@ u32 iscsi_get_pr_transport_id_len(
return len;
}
-EXPORT_SYMBOL(iscsi_get_pr_transport_id_len);
-char *iscsi_parse_pr_out_transport_id(
+static void sas_parse_pr_out_transport_id(char *buf, char *i_str)
+{
+ char hex[17] = {};
+
+ bin2hex(hex, buf + 4, 8);
+ snprintf(i_str, TRANSPORT_IQN_LEN, "naa.%s", hex);
+}
+
+static void srp_parse_pr_out_transport_id(char *buf, char *i_str)
+{
+ char hex[33] = {};
+
+ bin2hex(hex, buf + 8, 16);
+ snprintf(i_str, TRANSPORT_IQN_LEN, "0x%s", hex);
+}
+
+static void fcp_parse_pr_out_transport_id(char *buf, char *i_str)
+{
+ snprintf(i_str, TRANSPORT_IQN_LEN, "%8phC", buf + 8);
+}
+
+static void sbp_parse_pr_out_transport_id(char *buf, char *i_str)
+{
+ char hex[17] = {};
+
+ bin2hex(hex, buf + 8, 8);
+ snprintf(i_str, TRANSPORT_IQN_LEN, "%s", hex);
+}
+
+static bool iscsi_parse_pr_out_transport_id(
struct se_portal_group *se_tpg,
- const char *buf,
+ char *buf,
u32 *out_tid_len,
- char **port_nexus_ptr)
+ char **port_nexus_ptr,
+ char *i_str)
{
char *p;
- u32 tid_len, padding;
int i;
- u16 add_len;
u8 format_code = (buf[0] & 0xc0);
/*
* Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6:
@@ -388,31 +312,19 @@ char *iscsi_parse_pr_out_transport_id(
if ((format_code != 0x00) && (format_code != 0x40)) {
pr_err("Illegal format code: 0x%02x for iSCSI"
" Initiator Transport ID\n", format_code);
- return NULL;
+ return false;
}
/*
* If the caller wants the TransportID Length, we set that value for the
* entire iSCSI Tarnsport ID now.
*/
- if (out_tid_len != NULL) {
- add_len = ((buf[2] >> 8) & 0xff);
- add_len |= (buf[3] & 0xff);
-
- tid_len = strlen(&buf[4]);
- tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
- tid_len += 1; /* Add one byte for NULL terminator */
- padding = ((-tid_len) & 3);
- if (padding != 0)
- tid_len += padding;
-
- if ((add_len + 4) != tid_len) {
- pr_debug("LIO-Target Extracted add_len: %hu "
- "does not match calculated tid_len: %u,"
- " using tid_len instead\n", add_len+4, tid_len);
- *out_tid_len = tid_len;
- } else
- *out_tid_len = (add_len + 4);
+ if (out_tid_len) {
+ /* The shift works thanks to integer promotion rules */
+ *out_tid_len = get_unaligned_be16(&buf[2]);
+ /* Add four bytes for iSCSI Transport ID header */
+ *out_tid_len += 4;
}
+
/*
* Check for ',i,0x' separator between iSCSI Name and iSCSI Initiator
* Session ID as defined in Table 390 - iSCSI initiator port TransportID
@@ -424,7 +336,7 @@ char *iscsi_parse_pr_out_transport_id(
pr_err("Unable to locate \",i,0x\" separator"
" for Initiator port identifier: %s\n",
&buf[4]);
- return NULL;
+ return false;
}
*p = '\0'; /* Terminate iSCSI Name */
p += 5; /* Skip over ",i,0x" separator */
@@ -437,6 +349,16 @@ char *iscsi_parse_pr_out_transport_id(
* iscsi_target.c:lio_sess_get_initiator_sid()
*/
for (i = 0; i < 12; i++) {
+ /*
+ * The first ISCSI INITIATOR SESSION ID field byte
+ * containing an ASCII null character terminates the
+ * ISCSI INITIATOR SESSION ID field without regard for
+ * the specified length of the iSCSI TransportID or the
+ * contents of the ADDITIONAL LENGTH field.
+ */
+ if (*p == '\0')
+ break;
+
if (isdigit(*p)) {
p++;
continue;
@@ -444,8 +366,87 @@ char *iscsi_parse_pr_out_transport_id(
*p = tolower(*p);
p++;
}
+ } else
+ *port_nexus_ptr = NULL;
+
+ strscpy(i_str, &buf[4], TRANSPORT_IQN_LEN);
+ return true;
+}
+
+int target_get_pr_transport_id_len(struct se_node_acl *nacl,
+ struct t10_pr_registration *pr_reg, int *format_code)
+{
+ switch (nacl->se_tpg->proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ case SCSI_PROTOCOL_SBP:
+ case SCSI_PROTOCOL_SRP:
+ case SCSI_PROTOCOL_SAS:
+ break;
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_get_pr_transport_id_len(nacl, pr_reg, format_code);
+ default:
+ pr_err("Unknown proto_id: 0x%02x\n", nacl->se_tpg->proto_id);
+ return -EINVAL;
+ }
+
+ /*
+ * Most transports use a fixed length 24 byte identifier.
+ */
+ *format_code = 0;
+ return 24;
+}
+
+int target_get_pr_transport_id(struct se_node_acl *nacl,
+ struct t10_pr_registration *pr_reg, int *format_code,
+ unsigned char *buf)
+{
+ switch (nacl->se_tpg->proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return sas_get_pr_transport_id(nacl, format_code, buf);
+ case SCSI_PROTOCOL_SBP:
+ return sbp_get_pr_transport_id(nacl, format_code, buf);
+ case SCSI_PROTOCOL_SRP:
+ return srp_get_pr_transport_id(nacl, format_code, buf);
+ case SCSI_PROTOCOL_FCP:
+ return fc_get_pr_transport_id(nacl, format_code, buf);
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_get_pr_transport_id(nacl, pr_reg, format_code,
+ buf);
+ default:
+ pr_err("Unknown proto_id: 0x%02x\n", nacl->se_tpg->proto_id);
+ return -EINVAL;
}
+}
- return (char *)&buf[4];
+bool target_parse_pr_out_transport_id(struct se_portal_group *tpg,
+ char *buf, u32 *out_tid_len, char **port_nexus_ptr, char *i_str)
+{
+ switch (tpg->proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ /*
+ * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
+ * for initiator ports using SCSI over SAS Serial SCSI Protocol.
+ */
+ sas_parse_pr_out_transport_id(buf, i_str);
+ break;
+ case SCSI_PROTOCOL_SRP:
+ srp_parse_pr_out_transport_id(buf, i_str);
+ break;
+ case SCSI_PROTOCOL_FCP:
+ fcp_parse_pr_out_transport_id(buf, i_str);
+ break;
+ case SCSI_PROTOCOL_SBP:
+ sbp_parse_pr_out_transport_id(buf, i_str);
+ break;
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_parse_pr_out_transport_id(tpg, buf, out_tid_len,
+ port_nexus_ptr, i_str);
+ default:
+ pr_err("Unknown proto_id: 0x%02x\n", tpg->proto_id);
+ return false;
+ }
+
+ *port_nexus_ptr = NULL;
+ *out_tid_len = 24;
+ return true;
}
-EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index b11890d85120..b2610073e8cc 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -1,26 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_file.c
*
* This file contains the Storage Engine <-> FILEIO transport specific functions
*
- * (c) Copyright 2005-2012 RisingTide Systems LLC.
+ * (c) Copyright 2005-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#include <linux/string.h>
@@ -30,10 +17,12 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/module.h>
+#include <linux/vmalloc.h>
#include <linux/falloc.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <asm/unaligned.h>
+#include <linux/uio.h>
+#include <linux/scatterlist.h>
+#include <scsi/scsi_proto.h>
+#include <linux/unaligned.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
@@ -45,10 +34,6 @@ static inline struct fd_dev *FD_DEV(struct se_device *dev)
return container_of(dev, struct fd_dev, dev);
}
-/* fd_attach_hba(): (Part of se_subsystem_api_t template)
- *
- *
- */
static int fd_attach_hba(struct se_hba *hba, u32 host_id)
{
struct fd_host *fd_host;
@@ -65,10 +50,9 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
- TARGET_CORE_MOD_VERSION);
- pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
- " MaxSectors: %u\n",
- hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
+ TARGET_CORE_VERSION);
+ pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
+ hba->hba_id, fd_host->fd_host_id);
return 0;
}
@@ -102,6 +86,24 @@ static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
return &fd_dev->dev;
}
+static bool fd_configure_unmap(struct se_device *dev)
+{
+ struct file *file = FD_DEV(dev)->fd_file;
+ struct inode *inode = file->f_mapping->host;
+
+ if (S_ISBLK(inode->i_mode))
+ return target_configure_unmap_from_bdev(&dev->dev_attrib,
+ I_BDEV(inode));
+
+ /* Limit UNMAP emulation to 8k Number of LBAs (NoLB) */
+ dev->dev_attrib.max_unmap_lba_count = 0x2000;
+ /* Currently hardcoded to 1 in Linux/SCSI code. */
+ dev->dev_attrib.max_unmap_block_desc_count = 1;
+ dev->dev_attrib.unmap_granularity = 1;
+ dev->dev_attrib.unmap_granularity_alignment = 0;
+ return true;
+}
+
static int fd_configure_device(struct se_device *dev)
{
struct fd_dev *fd_dev = FD_DEV(dev);
@@ -150,10 +152,10 @@ static int fd_configure_device(struct se_device *dev)
*/
inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
- struct request_queue *q = bdev_get_queue(inode->i_bdev);
+ struct block_device *bdev = I_BDEV(inode);
unsigned long long dev_size;
- fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
+ fd_dev->fd_block_size = bdev_logical_block_size(bdev);
/*
* Determine the number of bytes from i_size_read() minus
* one (1) logical sector from underlying struct block_device
@@ -166,31 +168,12 @@ static int fd_configure_device(struct se_device *dev)
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
/*
- * Check if the underlying struct block_device request_queue supports
- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
- * in ATA and we need to set TPE=1
- */
- if (blk_queue_discard(q)) {
- dev->dev_attrib.max_unmap_lba_count =
- q->limits.max_discard_sectors;
- /*
- * Currently hardcoded to 1 in Linux/SCSI code..
- */
- dev->dev_attrib.max_unmap_block_desc_count = 1;
- dev->dev_attrib.unmap_granularity =
- q->limits.discard_granularity >> 9;
- dev->dev_attrib.unmap_granularity_alignment =
- q->limits.discard_alignment;
- pr_debug("IFILE: BLOCK Discard support available,"
- " disabled by default\n");
- }
- /*
* Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count.
*/
dev->dev_attrib.max_write_same_len = 0xFFFF;
- if (blk_queue_nonrot(q))
+ if (bdev_nonrot(bdev))
dev->dev_attrib.is_nonrot = 1;
} else {
if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
@@ -201,16 +184,6 @@ static int fd_configure_device(struct se_device *dev)
}
fd_dev->fd_block_size = FD_BLOCKSIZE;
- /*
- * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
- */
- dev->dev_attrib.max_unmap_lba_count = 0x2000;
- /*
- * Currently hardcoded to 1 in Linux/SCSI code..
- */
- dev->dev_attrib.max_unmap_block_desc_count = 1;
- dev->dev_attrib.unmap_granularity = 1;
- dev->dev_attrib.unmap_granularity_alignment = 0;
/*
* Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
@@ -220,7 +193,7 @@ static int fd_configure_device(struct se_device *dev)
}
dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
- dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
+ dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
@@ -245,60 +218,130 @@ fail:
return ret;
}
+static void fd_dev_call_rcu(struct rcu_head *p)
+{
+ struct se_device *dev = container_of(p, struct se_device, rcu_head);
+ struct fd_dev *fd_dev = FD_DEV(dev);
+
+ kfree(fd_dev);
+}
+
static void fd_free_device(struct se_device *dev)
{
+ call_rcu(&dev->rcu_head, fd_dev_call_rcu);
+}
+
+static void fd_destroy_device(struct se_device *dev)
+{
struct fd_dev *fd_dev = FD_DEV(dev);
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
fd_dev->fd_file = NULL;
}
+}
- kfree(fd_dev);
+struct target_core_file_cmd {
+ unsigned long len;
+ struct se_cmd *cmd;
+ struct kiocb iocb;
+ struct bio_vec bvecs[];
+};
+
+static void cmd_rw_aio_complete(struct kiocb *iocb, long ret)
+{
+ struct target_core_file_cmd *cmd;
+
+ cmd = container_of(iocb, struct target_core_file_cmd, iocb);
+
+ if (ret != cmd->len)
+ target_complete_cmd(cmd->cmd, SAM_STAT_CHECK_CONDITION);
+ else
+ target_complete_cmd(cmd->cmd, SAM_STAT_GOOD);
+
+ kfree(cmd);
}
-static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
- u32 sgl_nents, int is_write)
+static sense_reason_t
+fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ enum dma_data_direction data_direction)
{
- struct se_device *se_dev = cmd->se_dev;
- struct fd_dev *dev = FD_DEV(se_dev);
- struct file *fd = dev->fd_file;
+ int is_write = !(data_direction == DMA_FROM_DEVICE);
+ struct se_device *dev = cmd->se_dev;
+ struct fd_dev *fd_dev = FD_DEV(dev);
+ struct file *file = fd_dev->fd_file;
+ struct target_core_file_cmd *aio_cmd;
+ struct iov_iter iter;
struct scatterlist *sg;
- struct iovec *iov;
- mm_segment_t old_fs;
- loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
+ ssize_t len = 0;
int ret = 0, i;
- iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
- if (!iov) {
- pr_err("Unable to allocate fd_do_readv iov[]\n");
- return -ENOMEM;
- }
+ aio_cmd = kmalloc(struct_size(aio_cmd, bvecs, sgl_nents), GFP_KERNEL);
+ if (!aio_cmd)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
for_each_sg(sgl, sg, sgl_nents, i) {
- iov[i].iov_len = sg->length;
- iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
+ bvec_set_page(&aio_cmd->bvecs[i], sg_page(sg), sg->length,
+ sg->offset);
+ len += sg->length;
}
- old_fs = get_fs();
- set_fs(get_ds());
+ iov_iter_bvec(&iter, is_write, aio_cmd->bvecs, sgl_nents, len);
+
+ aio_cmd->cmd = cmd;
+ aio_cmd->len = len;
+ aio_cmd->iocb.ki_pos = cmd->t_task_lba * dev->dev_attrib.block_size;
+ aio_cmd->iocb.ki_filp = file;
+ aio_cmd->iocb.ki_complete = cmd_rw_aio_complete;
+ aio_cmd->iocb.ki_flags = IOCB_DIRECT;
+
+ if (is_write && (cmd->se_cmd_flags & SCF_FUA))
+ aio_cmd->iocb.ki_flags |= IOCB_DSYNC;
if (is_write)
- ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
+ ret = file->f_op->write_iter(&aio_cmd->iocb, &iter);
else
- ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
+ ret = file->f_op->read_iter(&aio_cmd->iocb, &iter);
+
+ if (ret != -EIOCBQUEUED)
+ cmd_rw_aio_complete(&aio_cmd->iocb, ret);
+
+ return 0;
+}
- set_fs(old_fs);
+static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
+ u32 block_size, struct scatterlist *sgl,
+ u32 sgl_nents, u32 data_length, int is_write)
+{
+ struct scatterlist *sg;
+ struct iov_iter iter;
+ struct bio_vec *bvec;
+ ssize_t len = 0;
+ loff_t pos = (cmd->t_task_lba * block_size);
+ int ret = 0, i;
+
+ bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
+ if (!bvec) {
+ pr_err("Unable to allocate fd_do_readv iov[]\n");
+ return -ENOMEM;
+ }
- for_each_sg(sgl, sg, sgl_nents, i)
- kunmap(sg_page(sg));
+ for_each_sg(sgl, sg, sgl_nents, i) {
+ bvec_set_page(&bvec[i], sg_page(sg), sg->length, sg->offset);
+ len += sg->length;
+ }
- kfree(iov);
+ iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len);
+ if (is_write)
+ ret = vfs_iter_write(fd, &iter, &pos, 0);
+ else
+ ret = vfs_iter_read(fd, &iter, &pos, 0);
if (is_write) {
- if (ret < 0 || ret != cmd->data_length) {
+ if (ret < 0 || ret != data_length) {
pr_err("%s() write returned %d\n", __func__, ret);
- return (ret < 0 ? ret : -EINVAL);
+ if (ret >= 0)
+ ret = -EINVAL;
}
} else {
/*
@@ -307,21 +350,33 @@ static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
* block_device.
*/
if (S_ISBLK(file_inode(fd)->i_mode)) {
- if (ret < 0 || ret != cmd->data_length) {
+ if (ret < 0 || ret != data_length) {
pr_err("%s() returned %d, expecting %u for "
"S_ISBLK\n", __func__, ret,
- cmd->data_length);
- return (ret < 0 ? ret : -EINVAL);
+ data_length);
+ if (ret >= 0)
+ ret = -EINVAL;
}
} else {
if (ret < 0) {
pr_err("%s() returned %d for non S_ISBLK\n",
__func__, ret);
- return ret;
+ } else if (ret != data_length) {
+ /*
+ * Short read case:
+ * Probably some one truncate file under us.
+ * We must explicitly zero sg-pages to prevent
+ * expose uninizialized pages to userspace.
+ */
+ if (ret < data_length)
+ ret += iov_iter_zero(data_length - ret, &iter);
+ else
+ ret = -EINVAL;
}
}
}
- return 1;
+ kfree(bvec);
+ return ret;
}
static sense_reason_t
@@ -349,7 +404,7 @@ fd_execute_sync_cache(struct se_cmd *cmd)
} else {
start = cmd->t_task_lba * dev->dev_attrib.block_size;
if (cmd->data_length)
- end = start + cmd->data_length;
+ end = start + cmd->data_length - 1;
else
end = LLONG_MAX;
}
@@ -369,127 +424,137 @@ fd_execute_sync_cache(struct se_cmd *cmd)
return 0;
}
-static unsigned char *
-fd_setup_write_same_buf(struct se_cmd *cmd, struct scatterlist *sg,
- unsigned int len)
-{
- struct se_device *se_dev = cmd->se_dev;
- unsigned int block_size = se_dev->dev_attrib.block_size;
- unsigned int i = 0, end;
- unsigned char *buf, *p, *kmap_buf;
-
- buf = kzalloc(min_t(unsigned int, len, PAGE_SIZE), GFP_KERNEL);
- if (!buf) {
- pr_err("Unable to allocate fd_execute_write_same buf\n");
- return NULL;
- }
-
- kmap_buf = kmap(sg_page(sg)) + sg->offset;
- if (!kmap_buf) {
- pr_err("kmap() failed in fd_setup_write_same\n");
- kfree(buf);
- return NULL;
- }
- /*
- * Fill local *buf to contain multiple WRITE_SAME blocks up to
- * min(len, PAGE_SIZE)
- */
- p = buf;
- end = min_t(unsigned int, len, PAGE_SIZE);
-
- while (i < end) {
- memcpy(p, kmap_buf, block_size);
-
- i += block_size;
- p += block_size;
- }
- kunmap(sg_page(sg));
-
- return buf;
-}
-
static sense_reason_t
fd_execute_write_same(struct se_cmd *cmd)
{
struct se_device *se_dev = cmd->se_dev;
struct fd_dev *fd_dev = FD_DEV(se_dev);
- struct file *f = fd_dev->fd_file;
- struct scatterlist *sg;
- struct iovec *iov;
- mm_segment_t old_fs;
- sector_t nolb = sbc_get_write_same_sectors(cmd);
loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
- unsigned int len, len_tmp, iov_num;
- int i, rc;
- unsigned char *buf;
-
- if (!nolb) {
- target_complete_cmd(cmd, SAM_STAT_GOOD);
- return 0;
+ sector_t nolb = sbc_get_write_same_sectors(cmd);
+ struct iov_iter iter;
+ struct bio_vec *bvec;
+ unsigned int len = 0, i;
+ ssize_t ret;
+
+ if (cmd->prot_op) {
+ pr_err("WRITE_SAME: Protection information with FILEIO"
+ " backends not supported\n");
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- sg = &cmd->t_data_sg[0];
+
+ if (!cmd->t_data_nents)
+ return TCM_INVALID_CDB_FIELD;
if (cmd->t_data_nents > 1 ||
- sg->length != cmd->se_dev->dev_attrib.block_size) {
+ cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) {
pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
- " block_size: %u\n", cmd->t_data_nents, sg->length,
+ " block_size: %u\n",
+ cmd->t_data_nents,
+ cmd->t_data_sg[0].length,
cmd->se_dev->dev_attrib.block_size);
return TCM_INVALID_CDB_FIELD;
}
- len = len_tmp = nolb * se_dev->dev_attrib.block_size;
- iov_num = DIV_ROUND_UP(len, PAGE_SIZE);
-
- buf = fd_setup_write_same_buf(cmd, sg, len);
- if (!buf)
+ bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL);
+ if (!bvec)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- iov = vzalloc(sizeof(struct iovec) * iov_num);
- if (!iov) {
- pr_err("Unable to allocate fd_execute_write_same iovecs\n");
- kfree(buf);
+ for (i = 0; i < nolb; i++) {
+ bvec_set_page(&bvec[i], sg_page(&cmd->t_data_sg[0]),
+ cmd->t_data_sg[0].length,
+ cmd->t_data_sg[0].offset);
+ len += se_dev->dev_attrib.block_size;
+ }
+
+ iov_iter_bvec(&iter, ITER_SOURCE, bvec, nolb, len);
+ ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0);
+
+ kfree(bvec);
+ if (ret < 0 || ret != len) {
+ pr_err("vfs_iter_write() returned %zd for write same\n", ret);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- /*
- * Map the single fabric received scatterlist block now populated
- * in *buf into each iovec for I/O submission.
- */
- for (i = 0; i < iov_num; i++) {
- iov[i].iov_base = buf;
- iov[i].iov_len = min_t(unsigned int, len_tmp, PAGE_SIZE);
- len_tmp -= iov[i].iov_len;
+
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
+}
+
+static int
+fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
+ void *buf, size_t bufsize)
+{
+ struct fd_dev *fd_dev = FD_DEV(se_dev);
+ struct file *prot_fd = fd_dev->fd_prot_file;
+ sector_t prot_length, prot;
+ loff_t pos = lba * se_dev->prot_length;
+
+ if (!prot_fd) {
+ pr_err("Unable to locate fd_dev->fd_prot_file\n");
+ return -ENODEV;
}
- old_fs = get_fs();
- set_fs(get_ds());
- rc = vfs_writev(f, &iov[0], iov_num, &pos);
- set_fs(old_fs);
+ prot_length = nolb * se_dev->prot_length;
- vfree(iov);
- kfree(buf);
+ memset(buf, 0xff, bufsize);
+ for (prot = 0; prot < prot_length;) {
+ sector_t len = min_t(sector_t, bufsize, prot_length - prot);
+ ssize_t ret = kernel_write(prot_fd, buf, len, &pos);
- if (rc < 0 || rc != len) {
- pr_err("vfs_writev() returned %d for write same\n", rc);
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ if (ret != len) {
+ pr_err("vfs_write to prot file failed: %zd\n", ret);
+ return ret < 0 ? ret : -ENODEV;
+ }
+ prot += ret;
}
- target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
+static int
+fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
+{
+ void *buf;
+ int rc;
+
+ buf = (void *)__get_free_page(GFP_KERNEL);
+ if (!buf) {
+ pr_err("Unable to allocate FILEIO prot buf\n");
+ return -ENOMEM;
+ }
+
+ rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
+
+ free_page((unsigned long)buf);
+
+ return rc;
+}
+
static sense_reason_t
-fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
+fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
{
- struct file *file = priv;
+ struct file *file = FD_DEV(cmd->se_dev)->fd_file;
struct inode *inode = file->f_mapping->host;
int ret;
+ if (!nolb) {
+ return 0;
+ }
+
+ if (cmd->se_dev->dev_attrib.pi_prot_type) {
+ ret = fd_do_prot_unmap(cmd, lba, nolb);
+ if (ret)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
if (S_ISBLK(inode->i_mode)) {
/* The backend is block device, use discard */
- struct block_device *bdev = inode->i_bdev;
+ struct block_device *bdev = I_BDEV(inode);
+ struct se_device *dev = cmd->se_dev;
- ret = blkdev_issue_discard(bdev, lba,
- nolb, GFP_KERNEL, 0);
+ ret = blkdev_issue_discard(bdev,
+ target_to_linux_sector(dev, lba),
+ target_to_linux_sector(dev, nolb),
+ GFP_KERNEL);
if (ret < 0) {
pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
ret);
@@ -516,86 +581,122 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
}
static sense_reason_t
-fd_execute_write_same_unmap(struct se_cmd *cmd)
+fd_execute_rw_buffered(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ enum dma_data_direction data_direction)
{
- struct se_device *se_dev = cmd->se_dev;
- struct fd_dev *fd_dev = FD_DEV(se_dev);
- struct file *file = fd_dev->fd_file;
- sector_t lba = cmd->t_task_lba;
- sector_t nolb = sbc_get_write_same_sectors(cmd);
- int ret;
-
- if (!nolb) {
- target_complete_cmd(cmd, SAM_STAT_GOOD);
- return 0;
- }
-
- ret = fd_do_unmap(cmd, file, lba, nolb);
- if (ret)
- return ret;
-
- target_complete_cmd(cmd, GOOD);
- return 0;
-}
-
-static sense_reason_t
-fd_execute_unmap(struct se_cmd *cmd)
-{
- struct file *file = FD_DEV(cmd->se_dev)->fd_file;
-
- return sbc_execute_unmap(cmd, fd_do_unmap, file);
-}
-
-static sense_reason_t
-fd_execute_rw(struct se_cmd *cmd)
-{
- struct scatterlist *sgl = cmd->t_data_sg;
- u32 sgl_nents = cmd->t_data_nents;
- enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *dev = cmd->se_dev;
+ struct fd_dev *fd_dev = FD_DEV(dev);
+ struct file *file = fd_dev->fd_file;
+ struct file *pfile = fd_dev->fd_prot_file;
+ sense_reason_t rc;
int ret = 0;
-
/*
* Call vectorized fileio functions to map struct scatterlist
* physical memory addresses to struct iovec virtual memory.
*/
if (data_direction == DMA_FROM_DEVICE) {
- ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
+ if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
+ ret = fd_do_rw(cmd, pfile, dev->prot_length,
+ cmd->t_prot_sg, cmd->t_prot_nents,
+ cmd->prot_length, 0);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
+ sgl, sgl_nents, cmd->data_length, 0);
+
+ if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type &&
+ dev->dev_attrib.pi_prot_verify) {
+ u32 sectors = cmd->data_length >>
+ ilog2(dev->dev_attrib.block_size);
+
+ rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
+ 0, cmd->t_prot_sg, 0);
+ if (rc)
+ return rc;
+ }
} else {
- ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
+ if (cmd->prot_type && dev->dev_attrib.pi_prot_type &&
+ dev->dev_attrib.pi_prot_verify) {
+ u32 sectors = cmd->data_length >>
+ ilog2(dev->dev_attrib.block_size);
+
+ rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
+ 0, cmd->t_prot_sg, 0);
+ if (rc)
+ return rc;
+ }
+
+ ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
+ sgl, sgl_nents, cmd->data_length, 1);
/*
- * Perform implict vfs_fsync_range() for fd_do_writev() ops
+ * Perform implicit vfs_fsync_range() for fd_do_writev() ops
* for SCSI WRITEs with Forced Unit Access (FUA) set.
* Allow this to happen independent of WCE=0 setting.
*/
- if (ret > 0 &&
- dev->dev_attrib.emulate_fua_write > 0 &&
- (cmd->se_cmd_flags & SCF_FUA)) {
- struct fd_dev *fd_dev = FD_DEV(dev);
+ if (ret > 0 && (cmd->se_cmd_flags & SCF_FUA)) {
loff_t start = cmd->t_task_lba *
dev->dev_attrib.block_size;
- loff_t end = start + cmd->data_length;
+ loff_t end;
+
+ if (cmd->data_length)
+ end = start + cmd->data_length - 1;
+ else
+ end = LLONG_MAX;
vfs_fsync_range(fd_dev->fd_file, start, end, 1);
}
+
+ if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
+ ret = fd_do_rw(cmd, pfile, dev->prot_length,
+ cmd->t_prot_sg, cmd->t_prot_nents,
+ cmd->prot_length, 1);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
}
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- if (ret)
- target_complete_cmd(cmd, SAM_STAT_GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
+static sense_reason_t
+fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ enum dma_data_direction data_direction)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct fd_dev *fd_dev = FD_DEV(dev);
+
+ /*
+ * We are currently limited by the number of iovecs (2048) per
+ * single vfs_[writev,readv] call.
+ */
+ if (cmd->data_length > FD_MAX_BYTES) {
+ pr_err("FILEIO: Not able to process I/O of %u bytes due to"
+ "FD_MAX_BYTES: %u iovec count limitation\n",
+ cmd->data_length, FD_MAX_BYTES);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ if (fd_dev->fbd_flags & FDBD_HAS_ASYNC_IO)
+ return fd_execute_rw_aio(cmd, sgl, sgl_nents, data_direction);
+ return fd_execute_rw_buffered(cmd, sgl, sgl_nents, data_direction);
+}
+
enum {
- Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
+ Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io,
+ Opt_fd_async_io, Opt_err
};
static match_table_t tokens = {
{Opt_fd_dev_name, "fd_dev_name=%s"},
{Opt_fd_dev_size, "fd_dev_size=%s"},
{Opt_fd_buffered_io, "fd_buffered_io=%d"},
+ {Opt_fd_async_io, "fd_async_io=%d"},
{Opt_err, NULL}
};
@@ -635,10 +736,10 @@ static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
ret = -ENOMEM;
break;
}
- ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
+ ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size);
kfree(arg_p);
if (ret < 0) {
- pr_err("strict_strtoull() failed for"
+ pr_err("kstrtoull() failed for"
" fd_dev_size=\n");
goto out;
}
@@ -647,7 +748,9 @@ static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
fd_dev->fbd_flags |= FBDF_HAS_SIZE;
break;
case Opt_fd_buffered_io:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
if (arg != 1) {
pr_err("bogus fd_buffered_io=%d value\n", arg);
ret = -EINVAL;
@@ -659,6 +762,21 @@ static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
break;
+ case Opt_fd_async_io:
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
+ if (arg != 1) {
+ pr_err("bogus fd_async_io=%d value\n", arg);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ pr_debug("FILEIO: Using async I/O"
+ " operations for struct fd_dev\n");
+
+ fd_dev->fbd_flags |= FDBD_HAS_ASYNC_IO;
+ break;
default:
break;
}
@@ -675,10 +793,11 @@ static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
ssize_t bl = 0;
bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
- bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
+ bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s Async: %d\n",
fd_dev->fd_dev_name, fd_dev->fd_dev_size,
(fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
- "Buffered-WCE" : "O_DSYNC");
+ "Buffered-WCE" : "O_DSYNC",
+ !!(fd_dev->fbd_flags & FDBD_HAS_ASYNC_IO));
return bl;
}
@@ -702,46 +821,125 @@ static sector_t fd_get_blocks(struct se_device *dev)
dev->dev_attrib.block_size);
}
-static struct sbc_ops fd_sbc_ops = {
+static int fd_init_prot(struct se_device *dev)
+{
+ struct fd_dev *fd_dev = FD_DEV(dev);
+ struct file *prot_file, *file = fd_dev->fd_file;
+ struct inode *inode;
+ int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
+ char buf[FD_MAX_DEV_PROT_NAME];
+
+ if (!file) {
+ pr_err("Unable to locate fd_dev->fd_file\n");
+ return -ENODEV;
+ }
+
+ inode = file->f_mapping->host;
+ if (S_ISBLK(inode->i_mode)) {
+ pr_err("FILEIO Protection emulation only supported on"
+ " !S_ISBLK\n");
+ return -ENOSYS;
+ }
+
+ if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE)
+ flags &= ~O_DSYNC;
+
+ snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection",
+ fd_dev->fd_dev_name);
+
+ prot_file = filp_open(buf, flags, 0600);
+ if (IS_ERR(prot_file)) {
+ pr_err("filp_open(%s) failed\n", buf);
+ ret = PTR_ERR(prot_file);
+ return ret;
+ }
+ fd_dev->fd_prot_file = prot_file;
+
+ return 0;
+}
+
+static int fd_format_prot(struct se_device *dev)
+{
+ unsigned char *buf;
+ int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
+ int ret;
+
+ if (!dev->dev_attrib.pi_prot_type) {
+ pr_err("Unable to format_prot while pi_prot_type == 0\n");
+ return -ENODEV;
+ }
+
+ buf = vzalloc(unit_size);
+ if (!buf) {
+ pr_err("Unable to allocate FILEIO prot buf\n");
+ return -ENOMEM;
+ }
+
+ pr_debug("Using FILEIO prot_length: %llu\n",
+ (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
+ dev->prot_length);
+
+ ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
+ buf, unit_size);
+ vfree(buf);
+ return ret;
+}
+
+static void fd_free_prot(struct se_device *dev)
+{
+ struct fd_dev *fd_dev = FD_DEV(dev);
+
+ if (!fd_dev->fd_prot_file)
+ return;
+
+ filp_close(fd_dev->fd_prot_file, NULL);
+ fd_dev->fd_prot_file = NULL;
+}
+
+static struct exec_cmd_ops fd_exec_cmd_ops = {
.execute_rw = fd_execute_rw,
.execute_sync_cache = fd_execute_sync_cache,
.execute_write_same = fd_execute_write_same,
- .execute_write_same_unmap = fd_execute_write_same_unmap,
.execute_unmap = fd_execute_unmap,
};
static sense_reason_t
fd_parse_cdb(struct se_cmd *cmd)
{
- return sbc_parse_cdb(cmd, &fd_sbc_ops);
+ return sbc_parse_cdb(cmd, &fd_exec_cmd_ops);
}
-static struct se_subsystem_api fileio_template = {
+static const struct target_backend_ops fileio_ops = {
.name = "fileio",
.inquiry_prod = "FILEIO",
.inquiry_rev = FD_VERSION,
.owner = THIS_MODULE,
- .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
.attach_hba = fd_attach_hba,
.detach_hba = fd_detach_hba,
.alloc_device = fd_alloc_device,
.configure_device = fd_configure_device,
+ .destroy_device = fd_destroy_device,
.free_device = fd_free_device,
+ .configure_unmap = fd_configure_unmap,
.parse_cdb = fd_parse_cdb,
.set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = fd_get_blocks,
+ .init_prot = fd_init_prot,
+ .format_prot = fd_format_prot,
+ .free_prot = fd_free_prot,
+ .tb_dev_attrib_attrs = sbc_attrib_attrs,
};
static int __init fileio_module_init(void)
{
- return transport_subsystem_register(&fileio_template);
+ return transport_backend_register(&fileio_ops);
}
static void __exit fileio_module_exit(void)
{
- transport_subsystem_release(&fileio_template);
+ target_backend_unregister(&fileio_ops);
}
MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 37ffc5bd2399..929b1ecd544e 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -1,13 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef TARGET_CORE_FILE_H
#define TARGET_CORE_FILE_H
+#include <target/target_core_base.h>
+
#define FD_VERSION "4.0"
#define FD_MAX_DEV_NAME 256
+#define FD_MAX_DEV_PROT_NAME FD_MAX_DEV_NAME + 16
#define FD_DEVICE_QUEUE_DEPTH 32
#define FD_MAX_DEVICE_QUEUE_DEPTH 128
#define FD_BLOCKSIZE 512
-#define FD_MAX_SECTORS 2048
+/*
+ * Limited by the number of iovecs (2048) per vfs_[writev,readv] call
+ */
+#define FD_MAX_BYTES 8388608
#define RRF_EMULATE_CDB 0x01
#define RRF_GOT_LBA 0x02
@@ -15,6 +22,8 @@
#define FBDF_HAS_PATH 0x01
#define FBDF_HAS_SIZE 0x02
#define FDBD_HAS_BUFFERED_IO_WCE 0x04
+#define FDBD_HAS_ASYNC_IO 0x08
+#define FDBD_FORMAT_UNIT_SIZE 2048
struct fd_dev {
struct se_device dev;
@@ -29,6 +38,7 @@ struct fd_dev {
u32 fd_block_size;
unsigned long long fd_dev_size;
struct file *fd_file;
+ struct file *fd_prot_file;
/* FILEIO HBA device is connected to */
struct fd_host *fd_host;
} ____cacheline_aligned;
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index d2616cd48f1e..d508b343ba7b 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -1,26 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_hba.c
*
* This file contains the TCM HBA Transport related functions.
*
- * (c) Copyright 2003-2012 RisingTide Systems LLC.
+ * (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#include <linux/net.h>
@@ -39,63 +26,83 @@
#include "target_core_internal.h"
-static LIST_HEAD(subsystem_list);
-static DEFINE_MUTEX(subsystem_mutex);
+static LIST_HEAD(backend_list);
+static DEFINE_MUTEX(backend_mutex);
static u32 hba_id_counter;
static DEFINE_SPINLOCK(hba_lock);
static LIST_HEAD(hba_list);
-int transport_subsystem_register(struct se_subsystem_api *sub_api)
-{
- struct se_subsystem_api *s;
-
- INIT_LIST_HEAD(&sub_api->sub_api_list);
- mutex_lock(&subsystem_mutex);
- list_for_each_entry(s, &subsystem_list, sub_api_list) {
- if (!strcmp(s->name, sub_api->name)) {
- pr_err("%p is already registered with"
- " duplicate name %s, unable to process"
- " request\n", s, s->name);
- mutex_unlock(&subsystem_mutex);
+int transport_backend_register(const struct target_backend_ops *ops)
+{
+ struct target_backend *tb, *old;
+
+ tb = kzalloc(sizeof(*tb), GFP_KERNEL);
+ if (!tb)
+ return -ENOMEM;
+ tb->ops = ops;
+
+ mutex_lock(&backend_mutex);
+ list_for_each_entry(old, &backend_list, list) {
+ if (!strcmp(old->ops->name, ops->name)) {
+ pr_err("backend %s already registered.\n", ops->name);
+ mutex_unlock(&backend_mutex);
+ kfree(tb);
return -EEXIST;
}
}
- list_add_tail(&sub_api->sub_api_list, &subsystem_list);
- mutex_unlock(&subsystem_mutex);
+ target_setup_backend_cits(tb);
+ list_add_tail(&tb->list, &backend_list);
+ mutex_unlock(&backend_mutex);
- pr_debug("TCM: Registered subsystem plugin: %s struct module:"
- " %p\n", sub_api->name, sub_api->owner);
+ pr_debug("TCM: Registered subsystem plugin: %s struct module: %p\n",
+ ops->name, ops->owner);
return 0;
}
-EXPORT_SYMBOL(transport_subsystem_register);
+EXPORT_SYMBOL(transport_backend_register);
-void transport_subsystem_release(struct se_subsystem_api *sub_api)
+void target_backend_unregister(const struct target_backend_ops *ops)
{
- mutex_lock(&subsystem_mutex);
- list_del(&sub_api->sub_api_list);
- mutex_unlock(&subsystem_mutex);
+ struct target_backend *tb;
+
+ mutex_lock(&backend_mutex);
+ list_for_each_entry(tb, &backend_list, list) {
+ if (tb->ops == ops) {
+ list_del(&tb->list);
+ mutex_unlock(&backend_mutex);
+ /*
+ * Wait for any outstanding backend driver ->rcu_head
+ * callbacks to complete post TBO->free_device() ->
+ * call_rcu(), before allowing backend driver module
+ * unload of target_backend_ops->owner to proceed.
+ */
+ rcu_barrier();
+ kfree(tb);
+ return;
+ }
+ }
+ mutex_unlock(&backend_mutex);
}
-EXPORT_SYMBOL(transport_subsystem_release);
+EXPORT_SYMBOL(target_backend_unregister);
-static struct se_subsystem_api *core_get_backend(const char *sub_name)
+static struct target_backend *core_get_backend(const char *name)
{
- struct se_subsystem_api *s;
+ struct target_backend *tb;
- mutex_lock(&subsystem_mutex);
- list_for_each_entry(s, &subsystem_list, sub_api_list) {
- if (!strcmp(s->name, sub_name))
+ mutex_lock(&backend_mutex);
+ list_for_each_entry(tb, &backend_list, list) {
+ if (!strcmp(tb->ops->name, name))
goto found;
}
- mutex_unlock(&subsystem_mutex);
+ mutex_unlock(&backend_mutex);
return NULL;
found:
- if (s->owner && !try_module_get(s->owner))
- s = NULL;
- mutex_unlock(&subsystem_mutex);
- return s;
+ if (tb->ops->owner && !try_module_get(tb->ops->owner))
+ tb = NULL;
+ mutex_unlock(&backend_mutex);
+ return tb;
}
struct se_hba *
@@ -116,13 +123,13 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
hba->hba_flags |= hba_flags;
- hba->transport = core_get_backend(plugin_name);
- if (!hba->transport) {
+ hba->backend = core_get_backend(plugin_name);
+ if (!hba->backend) {
ret = -EINVAL;
goto out_free_hba;
}
- ret = hba->transport->attach_hba(hba, plugin_dep_id);
+ ret = hba->backend->ops->attach_hba(hba, plugin_dep_id);
if (ret < 0)
goto out_module_put;
@@ -137,9 +144,8 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
return hba;
out_module_put:
- if (hba->transport->owner)
- module_put(hba->transport->owner);
- hba->transport = NULL;
+ module_put(hba->backend->ops->owner);
+ hba->backend = NULL;
out_free_hba:
kfree(hba);
return ERR_PTR(ret);
@@ -150,7 +156,7 @@ core_delete_hba(struct se_hba *hba)
{
WARN_ON(hba->dev_count);
- hba->transport->detach_hba(hba);
+ hba->backend->ops->detach_hba(hba);
spin_lock(&hba_lock);
list_del(&hba->hba_node);
@@ -159,10 +165,14 @@ core_delete_hba(struct se_hba *hba)
pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
" Core\n", hba->hba_id);
- if (hba->transport->owner)
- module_put(hba->transport->owner);
+ module_put(hba->backend->ops->owner);
- hba->transport = NULL;
+ hba->backend = NULL;
kfree(hba);
return 0;
}
+
+bool target_sense_desc_format(struct se_device *dev)
+{
+ return (dev) ? dev->transport->get_blocks(dev) > U32_MAX : false;
+}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index aa1620abec6d..8ec7b534ad76 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -1,27 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_iblock.c
*
* This file contains the Storage Engine <-> Linux BlockIO transport
* specific functions.
*
- * (c) Copyright 2003-2012 RisingTide Systems LLC.
+ * (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#include <linux/string.h>
@@ -29,20 +16,23 @@
#include <linux/timer.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/bio.h>
-#include <linux/genhd.h>
#include <linux/file.h>
#include <linux/module.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <asm/unaligned.h>
+#include <linux/scatterlist.h>
+#include <linux/pr.h>
+#include <scsi/scsi_proto.h>
+#include <scsi/scsi_common.h>
+#include <linux/unaligned.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include "target_core_iblock.h"
+#include "target_core_pr.h"
#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
#define IBLOCK_BIO_POOL_SIZE 128
@@ -53,17 +43,11 @@ static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
}
-static struct se_subsystem_api iblock_template;
-
-/* iblock_attach_hba(): (Part of se_subsystem_api_t template)
- *
- *
- */
static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
{
pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
- IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
+ IBLOCK_VERSION, TARGET_CORE_VERSION);
return 0;
}
@@ -80,109 +64,183 @@ static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *nam
pr_err("Unable to allocate struct iblock_dev\n");
return NULL;
}
+ ib_dev->ibd_exclusive = true;
+
+ ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug),
+ GFP_KERNEL);
+ if (!ib_dev->ibd_plug)
+ goto free_dev;
pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
return &ib_dev->dev;
+
+free_dev:
+ kfree(ib_dev);
+ return NULL;
+}
+
+static bool iblock_configure_unmap(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+
+ return target_configure_unmap_from_bdev(&dev->dev_attrib,
+ ib_dev->ibd_bd);
}
static int iblock_configure_device(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct request_queue *q;
- struct block_device *bd = NULL;
- fmode_t mode;
- int ret = -ENOMEM;
+ struct file *bdev_file;
+ struct block_device *bd;
+ struct blk_integrity *bi;
+ blk_mode_t mode = BLK_OPEN_READ;
+ void *holder = ib_dev;
+ unsigned int max_write_zeroes_sectors;
+ int ret;
if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
pr_err("Missing udev_path= parameters for IBLOCK\n");
return -EINVAL;
}
- ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
- if (!ib_dev->ibd_bio_set) {
+ ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
+ if (ret) {
pr_err("IBLOCK: Unable to create bioset\n");
goto out;
}
- pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
- ib_dev->ibd_udev_path);
+ pr_debug("IBLOCK: Claiming struct block_device: %s: %d\n",
+ ib_dev->ibd_udev_path, ib_dev->ibd_exclusive);
- mode = FMODE_READ|FMODE_EXCL;
if (!ib_dev->ibd_readonly)
- mode |= FMODE_WRITE;
+ mode |= BLK_OPEN_WRITE;
+ else
+ dev->dev_flags |= DF_READ_ONLY;
- bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
- if (IS_ERR(bd)) {
- ret = PTR_ERR(bd);
+ if (!ib_dev->ibd_exclusive)
+ holder = NULL;
+
+ bdev_file = bdev_file_open_by_path(ib_dev->ibd_udev_path, mode, holder,
+ NULL);
+ if (IS_ERR(bdev_file)) {
+ ret = PTR_ERR(bdev_file);
goto out_free_bioset;
}
- ib_dev->ibd_bd = bd;
+ ib_dev->ibd_bdev_file = bdev_file;
+ ib_dev->ibd_bd = bd = file_bdev(bdev_file);
q = bdev_get_queue(bd);
dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
- dev->dev_attrib.hw_max_sectors = UINT_MAX;
+ dev->dev_attrib.hw_max_sectors = mult_frac(queue_max_hw_sectors(q),
+ SECTOR_SIZE,
+ dev->dev_attrib.hw_block_size);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
/*
- * Check if the underlying struct block_device request_queue supports
- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
- * in ATA and we need to set TPE=1
- */
- if (blk_queue_discard(q)) {
- dev->dev_attrib.max_unmap_lba_count =
- q->limits.max_discard_sectors;
-
- /*
- * Currently hardcoded to 1 in Linux/SCSI code..
- */
- dev->dev_attrib.max_unmap_block_desc_count = 1;
- dev->dev_attrib.unmap_granularity =
- q->limits.discard_granularity >> 9;
- dev->dev_attrib.unmap_granularity_alignment =
- q->limits.discard_alignment;
-
- pr_debug("IBLOCK: BLOCK Discard support available,"
- " disabled by default\n");
- }
- /*
* Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count.
*/
- dev->dev_attrib.max_write_same_len = 0xFFFF;
+ max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd);
+ if (max_write_zeroes_sectors)
+ dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors;
+ else
+ dev->dev_attrib.max_write_same_len = 0xFFFF;
- if (blk_queue_nonrot(q))
+ if (bdev_nonrot(bd))
dev->dev_attrib.is_nonrot = 1;
+ target_configure_write_atomic_from_bdev(&dev->dev_attrib, bd);
+
+ bi = bdev_get_integrity(bd);
+ if (!bi)
+ return 0;
+
+ switch (bi->csum_type) {
+ case BLK_INTEGRITY_CSUM_IP:
+ pr_err("IBLOCK export of blk_integrity: %s not supported\n",
+ blk_integrity_profile_name(bi));
+ ret = -ENOSYS;
+ goto out_blkdev_put;
+ case BLK_INTEGRITY_CSUM_CRC:
+ if (bi->flags & BLK_INTEGRITY_REF_TAG)
+ dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
+ else
+ dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
+ break;
+ default:
+ break;
+ }
+
+ dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
return 0;
+out_blkdev_put:
+ fput(ib_dev->ibd_bdev_file);
out_free_bioset:
- bioset_free(ib_dev->ibd_bio_set);
- ib_dev->ibd_bio_set = NULL;
+ bioset_exit(&ib_dev->ibd_bio_set);
out:
return ret;
}
-static void iblock_free_device(struct se_device *dev)
+static void iblock_dev_call_rcu(struct rcu_head *p)
{
+ struct se_device *dev = container_of(p, struct se_device, rcu_head);
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
- if (ib_dev->ibd_bd != NULL)
- blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
- if (ib_dev->ibd_bio_set != NULL)
- bioset_free(ib_dev->ibd_bio_set);
+ kfree(ib_dev->ibd_plug);
kfree(ib_dev);
}
-static unsigned long long iblock_emulate_read_cap_with_block_size(
- struct se_device *dev,
- struct block_device *bd,
- struct request_queue *q)
+static void iblock_free_device(struct se_device *dev)
+{
+ call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
+}
+
+static void iblock_destroy_device(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+
+ if (ib_dev->ibd_bdev_file)
+ fput(ib_dev->ibd_bdev_file);
+ bioset_exit(&ib_dev->ibd_bio_set);
+}
+
+static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(se_dev);
+ struct iblock_dev_plug *ib_dev_plug;
+
+ /*
+ * Each se_device has a per cpu work this can be run from. We
+ * shouldn't have multiple threads on the same cpu calling this
+ * at the same time.
+ */
+ ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()];
+ if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags))
+ return NULL;
+
+ blk_start_plug(&ib_dev_plug->blk_plug);
+ return &ib_dev_plug->se_plug;
+}
+
+static void iblock_unplug_device(struct se_dev_plug *se_plug)
+{
+ struct iblock_dev_plug *ib_dev_plug = container_of(se_plug,
+ struct iblock_dev_plug, se_plug);
+
+ blk_finish_plug(&ib_dev_plug->blk_plug);
+ clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags);
+}
+
+static sector_t iblock_get_blocks(struct se_device *dev)
{
- unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
- bdev_logical_block_size(bd)) - 1);
- u32 block_size = bdev_logical_block_size(bd);
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ u32 block_size = bdev_logical_block_size(ib_dev->ibd_bd);
+ unsigned long long blocks_long =
+ div_u64(bdev_nr_bytes(ib_dev->ibd_bd), block_size) - 1;
if (block_size == dev->dev_attrib.block_size)
return blocks_long;
@@ -198,6 +256,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
break;
case 512:
blocks_long <<= 3;
+ break;
default:
break;
}
@@ -254,15 +313,17 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
return blocks_long;
}
-static void iblock_complete_cmd(struct se_cmd *cmd)
+static void iblock_complete_cmd(struct se_cmd *cmd, blk_status_t blk_status)
{
struct iblock_req *ibr = cmd->priv;
u8 status;
- if (!atomic_dec_and_test(&ibr->pending))
+ if (!refcount_dec_and_test(&ibr->pending))
return;
- if (atomic_read(&ibr->ib_bio_err_cnt))
+ if (blk_status == BLK_STS_RESV_CONFLICT)
+ status = SAM_STAT_RESERVATION_CONFLICT;
+ else if (atomic_read(&ibr->ib_bio_err_cnt))
status = SAM_STAT_CHECK_CONDITION;
else
status = SAM_STAT_GOOD;
@@ -271,34 +332,28 @@ static void iblock_complete_cmd(struct se_cmd *cmd)
kfree(ibr);
}
-static void iblock_bio_done(struct bio *bio, int err)
+static void iblock_bio_done(struct bio *bio)
{
struct se_cmd *cmd = bio->bi_private;
struct iblock_req *ibr = cmd->priv;
+ blk_status_t blk_status = bio->bi_status;
- /*
- * Set -EIO if !BIO_UPTODATE and the passed is still err=0
- */
- if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
- err = -EIO;
-
- if (err != 0) {
- pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
- " err: %d\n", bio, err);
+ if (bio->bi_status) {
+ pr_err("bio error: %p, err: %d\n", bio, bio->bi_status);
/*
* Bump the ib_bio_err_cnt and release bio.
*/
atomic_inc(&ibr->ib_bio_err_cnt);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
bio_put(bio);
- iblock_complete_cmd(cmd);
+ iblock_complete_cmd(cmd, blk_status);
}
-static struct bio *
-iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
+static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
+ blk_opf_t opf)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
struct bio *bio;
@@ -307,43 +362,43 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
* Only allocate as many vector entries as the bio code allows us to,
* we'll loop later on until we have handled the whole request.
*/
- if (sg_num > BIO_MAX_PAGES)
- sg_num = BIO_MAX_PAGES;
-
- bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
+ bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf,
+ GFP_NOIO, &ib_dev->ibd_bio_set);
if (!bio) {
pr_err("Unable to allocate memory for bio\n");
return NULL;
}
- bio->bi_bdev = ib_dev->ibd_bd;
bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done;
- bio->bi_sector = lba;
+ bio->bi_iter.bi_sector = lba;
return bio;
}
-static void iblock_submit_bios(struct bio_list *list, int rw)
+static void iblock_submit_bios(struct bio_list *list)
{
struct blk_plug plug;
struct bio *bio;
-
+ /*
+ * The block layer handles nested plugs, so just plug/unplug to handle
+ * fabric drivers that didn't support batching and multi bio cmds.
+ */
blk_start_plug(&plug);
while ((bio = bio_list_pop(list)))
- submit_bio(rw, bio);
+ submit_bio(bio);
blk_finish_plug(&plug);
}
-static void iblock_end_io_flush(struct bio *bio, int err)
+static void iblock_end_io_flush(struct bio *bio)
{
struct se_cmd *cmd = bio->bi_private;
- if (err)
- pr_err("IBLOCK: cache flush failed: %d\n", err);
+ if (bio->bi_status)
+ pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
if (cmd) {
- if (err)
+ if (bio->bi_status)
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
else
target_complete_cmd(cmd, SAM_STAT_GOOD);
@@ -370,23 +425,26 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
if (immed)
target_complete_cmd(cmd, SAM_STAT_GOOD);
- bio = bio_alloc(GFP_KERNEL, 0);
+ bio = bio_alloc(ib_dev->ibd_bd, 0, REQ_OP_WRITE | REQ_PREFLUSH,
+ GFP_KERNEL);
bio->bi_end_io = iblock_end_io_flush;
- bio->bi_bdev = ib_dev->ibd_bd;
if (!immed)
bio->bi_private = cmd;
- submit_bio(WRITE_FLUSH, bio);
+ submit_bio(bio);
return 0;
}
static sense_reason_t
-iblock_do_unmap(struct se_cmd *cmd, void *priv,
- sector_t lba, sector_t nolb)
+iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
{
- struct block_device *bdev = priv;
+ struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
+ struct se_device *dev = cmd->se_dev;
int ret;
- ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
+ ret = blkdev_issue_discard(bdev,
+ target_to_linux_sector(dev, lba),
+ target_to_linux_sector(dev, nolb),
+ GFP_KERNEL);
if (ret < 0) {
pr_err("blkdev_issue_discard() failed: %d\n", ret);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -396,38 +454,59 @@ iblock_do_unmap(struct se_cmd *cmd, void *priv,
}
static sense_reason_t
-iblock_execute_unmap(struct se_cmd *cmd)
+iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
{
- struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
+ struct se_device *dev = cmd->se_dev;
+ struct scatterlist *sg = &cmd->t_data_sg[0];
+ unsigned char *buf, *not_zero;
+ int ret;
- return sbc_execute_unmap(cmd, iblock_do_unmap, bdev);
-}
+ buf = kmap(sg_page(sg)) + sg->offset;
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ /*
+ * Fall back to block_execute_write_same() slow-path if
+ * incoming WRITE_SAME payload does not contain zeros.
+ */
+ not_zero = memchr_inv(buf, 0x00, cmd->data_length);
+ kunmap(sg_page(sg));
-static sense_reason_t
-iblock_execute_write_same_unmap(struct se_cmd *cmd)
-{
- struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
- sector_t lba = cmd->t_task_lba;
- sector_t nolb = sbc_get_write_same_sectors(cmd);
- int ret;
+ if (not_zero)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = iblock_do_unmap(cmd, bdev, lba, nolb);
+ ret = blkdev_issue_zeroout(bdev,
+ target_to_linux_sector(dev, cmd->t_task_lba),
+ target_to_linux_sector(dev,
+ sbc_get_write_same_sectors(cmd)),
+ GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
if (ret)
- return ret;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
static sense_reason_t
iblock_execute_write_same(struct se_cmd *cmd)
{
+ struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
struct iblock_req *ibr;
struct scatterlist *sg;
struct bio *bio;
struct bio_list list;
- sector_t block_lba = cmd->t_task_lba;
- sector_t sectors = sbc_get_write_same_sectors(cmd);
+ struct se_device *dev = cmd->se_dev;
+ sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
+ sector_t sectors = target_to_linux_sector(dev,
+ sbc_get_write_same_sectors(cmd));
+
+ if (cmd->prot_op) {
+ pr_err("WRITE_SAME: Protection information with IBLOCK"
+ " backends not supported\n");
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ if (!cmd->t_data_nents)
+ return TCM_INVALID_CDB_FIELD;
sg = &cmd->t_data_sg[0];
@@ -439,38 +518,43 @@ iblock_execute_write_same(struct se_cmd *cmd)
return TCM_INVALID_CDB_FIELD;
}
+ if (bdev_write_zeroes_sectors(bdev)) {
+ if (!iblock_execute_zero_out(bdev, cmd))
+ return 0;
+ }
+
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
if (!ibr)
goto fail;
cmd->priv = ibr;
- bio = iblock_get_bio(cmd, block_lba, 1);
+ bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
if (!bio)
goto fail_free_ibr;
bio_list_init(&list);
bio_list_add(&list, bio);
- atomic_set(&ibr->pending, 1);
+ refcount_set(&ibr->pending, 1);
while (sectors) {
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
- bio = iblock_get_bio(cmd, block_lba, 1);
+ bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE);
if (!bio)
goto fail_put_bios;
- atomic_inc(&ibr->pending);
+ refcount_inc(&ibr->pending);
bio_list_add(&list, bio);
}
/* Always in 512 byte units for Linux/Block */
- block_lba += sg->length >> IBLOCK_LBA_SHIFT;
- sectors -= 1;
+ block_lba += sg->length >> SECTOR_SHIFT;
+ sectors -= sg->length >> SECTOR_SHIFT;
}
- iblock_submit_bios(&list, WRITE);
+ iblock_submit_bios(&list);
return 0;
fail_put_bios:
@@ -483,13 +567,14 @@ fail:
}
enum {
- Opt_udev_path, Opt_readonly, Opt_force, Opt_err
+ Opt_udev_path, Opt_readonly, Opt_force, Opt_exclusive, Opt_err,
};
static match_table_t tokens = {
{Opt_udev_path, "udev_path=%s"},
{Opt_readonly, "readonly=%d"},
{Opt_force, "force=%d"},
+ {Opt_exclusive, "exclusive=%d"},
{Opt_err, NULL}
};
@@ -499,7 +584,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
- int ret = 0, token;
+ int ret = 0, token, tmp_exclusive;
unsigned long tmp_readonly;
opts = kstrdup(page, GFP_KERNEL);
@@ -536,16 +621,32 @@ static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
ret = -ENOMEM;
break;
}
- ret = strict_strtoul(arg_p, 0, &tmp_readonly);
+ ret = kstrtoul(arg_p, 0, &tmp_readonly);
kfree(arg_p);
if (ret < 0) {
- pr_err("strict_strtoul() failed for"
+ pr_err("kstrtoul() failed for"
" readonly=\n");
goto out;
}
ib_dev->ibd_readonly = tmp_readonly;
pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
break;
+ case Opt_exclusive:
+ arg_p = match_strdup(&args[0]);
+ if (!arg_p) {
+ ret = -ENOMEM;
+ break;
+ }
+ ret = kstrtoint(arg_p, 0, &tmp_exclusive);
+ kfree(arg_p);
+ if (ret < 0) {
+ pr_err("kstrtoul() failed for exclusive=\n");
+ goto out;
+ }
+ ib_dev->ibd_exclusive = tmp_exclusive;
+ pr_debug("IBLOCK: exclusive: %d\n",
+ ib_dev->ibd_exclusive);
+ break;
case Opt_force:
break;
default:
@@ -562,23 +663,21 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;
- char buf[BDEVNAME_SIZE];
ssize_t bl = 0;
if (bd)
- bl += sprintf(b + bl, "iBlock device: %s",
- bdevname(bd, buf));
+ bl += sprintf(b + bl, "iBlock device: %pg", bd);
if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
bl += sprintf(b + bl, " UDEV PATH: %s",
ib_dev->ibd_udev_path);
bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
+ bl += sprintf(b + bl, " exclusive: %d\n", ib_dev->ibd_exclusive);
bl += sprintf(b + bl, " ");
if (bd) {
bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
- MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
- "" : (bd->bd_holder == ib_dev) ?
- "CLAIMED: IBLOCK" : "CLAIMED: OS");
+ MAJOR(bd->bd_dev), MINOR(bd->bd_dev),
+ "CLAIMED: IBLOCK");
} else {
bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
}
@@ -586,60 +685,102 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
return bl;
}
+static int
+iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
+ struct sg_mapping_iter *miter)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct blk_integrity *bi;
+ struct bio_integrity_payload *bip;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ int rc;
+ size_t resid, len;
+
+ bi = bdev_get_integrity(ib_dev->ibd_bd);
+ if (!bi) {
+ pr_err("Unable to locate bio_integrity\n");
+ return -ENODEV;
+ }
+
+ bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents));
+ if (IS_ERR(bip)) {
+ pr_err("Unable to allocate bio_integrity_payload\n");
+ return PTR_ERR(bip);
+ }
+
+ /* virtual start sector must be in integrity interval units */
+ bip_set_seed(bip, bio->bi_iter.bi_sector >>
+ (bi->interval_exp - SECTOR_SHIFT));
+
+ pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
+ (unsigned long long)bip->bip_iter.bi_sector);
+
+ resid = bio_integrity_bytes(bi, bio_sectors(bio));
+ while (resid > 0 && sg_miter_next(miter)) {
+
+ len = min_t(size_t, miter->length, resid);
+ rc = bio_integrity_add_page(bio, miter->page, len,
+ offset_in_page(miter->addr));
+ if (rc != len) {
+ pr_err("bio_integrity_add_page() failed; %d\n", rc);
+ sg_miter_stop(miter);
+ return -ENOMEM;
+ }
+
+ pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
+ miter->page, len, offset_in_page(miter->addr));
+
+ resid -= len;
+ if (len < miter->length)
+ miter->consumed -= miter->length - len;
+ }
+ sg_miter_stop(miter);
+
+ return 0;
+}
+
static sense_reason_t
-iblock_execute_rw(struct se_cmd *cmd)
+iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ enum dma_data_direction data_direction)
{
- struct scatterlist *sgl = cmd->t_data_sg;
- u32 sgl_nents = cmd->t_data_nents;
- enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *dev = cmd->se_dev;
+ sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
struct iblock_req *ibr;
struct bio *bio;
struct bio_list list;
struct scatterlist *sg;
u32 sg_num = sgl_nents;
- sector_t block_lba;
+ blk_opf_t opf;
unsigned bio_cnt;
- int rw = 0;
- int i;
+ int i, rc;
+ struct sg_mapping_iter prot_miter;
+ unsigned int miter_dir;
if (data_direction == DMA_TO_DEVICE) {
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
- struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
+
/*
- * Force writethrough using WRITE_FUA if a volatile write cache
+ * Set bits to indicate WRITE_ODIRECT so we are not throttled
+ * by WBT.
+ */
+ opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
+ /*
+ * Force writethrough using REQ_FUA if a volatile write cache
* is not enabled, or if initiator set the Force Unit Access bit.
*/
- if (q->flush_flags & REQ_FUA) {
+ miter_dir = SG_MITER_TO_SG;
+ if (bdev_fua(ib_dev->ibd_bd)) {
if (cmd->se_cmd_flags & SCF_FUA)
- rw = WRITE_FUA;
- else if (!(q->flush_flags & REQ_FLUSH))
- rw = WRITE_FUA;
- else
- rw = WRITE;
- } else {
- rw = WRITE;
+ opf |= REQ_FUA;
+ else if (!bdev_write_cache(ib_dev->ibd_bd))
+ opf |= REQ_FUA;
}
- } else {
- rw = READ;
- }
- /*
- * Convert the blocksize advertised to the initiator to the 512 byte
- * units unconditionally used by the Linux block layer.
- */
- if (dev->dev_attrib.block_size == 4096)
- block_lba = (cmd->t_task_lba << 3);
- else if (dev->dev_attrib.block_size == 2048)
- block_lba = (cmd->t_task_lba << 2);
- else if (dev->dev_attrib.block_size == 1024)
- block_lba = (cmd->t_task_lba << 1);
- else if (dev->dev_attrib.block_size == 512)
- block_lba = cmd->t_task_lba;
- else {
- pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
- " %u\n", dev->dev_attrib.block_size);
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ if (cmd->se_cmd_flags & SCF_ATOMIC)
+ opf |= REQ_ATOMIC;
+ } else {
+ opf = REQ_OP_READ;
+ miter_dir = SG_MITER_FROM_SG;
}
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
@@ -648,21 +789,25 @@ iblock_execute_rw(struct se_cmd *cmd)
cmd->priv = ibr;
if (!sgl_nents) {
- atomic_set(&ibr->pending, 1);
- iblock_complete_cmd(cmd);
+ refcount_set(&ibr->pending, 1);
+ iblock_complete_cmd(cmd, BLK_STS_OK);
return 0;
}
- bio = iblock_get_bio(cmd, block_lba, sgl_nents);
+ bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf);
if (!bio)
goto fail_free_ibr;
bio_list_init(&list);
bio_list_add(&list, bio);
- atomic_set(&ibr->pending, 2);
+ refcount_set(&ibr->pending, 2);
bio_cnt = 1;
+ if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
+ sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
+ miter_dir);
+
for_each_sg(sgl, sg, sgl_nents, i) {
/*
* XXX: if the length the device accepts is shorter than the
@@ -671,27 +816,39 @@ iblock_execute_rw(struct se_cmd *cmd)
*/
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
+ if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
+ rc = iblock_alloc_bip(cmd, bio, &prot_miter);
+ if (rc)
+ goto fail_put_bios;
+ }
+
if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
- iblock_submit_bios(&list, rw);
+ iblock_submit_bios(&list);
bio_cnt = 0;
}
- bio = iblock_get_bio(cmd, block_lba, sg_num);
+ bio = iblock_get_bio(cmd, block_lba, sg_num, opf);
if (!bio)
goto fail_put_bios;
- atomic_inc(&ibr->pending);
+ refcount_inc(&ibr->pending);
bio_list_add(&list, bio);
bio_cnt++;
}
/* Always in 512 byte units for Linux/Block */
- block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+ block_lba += sg->length >> SECTOR_SHIFT;
sg_num--;
}
- iblock_submit_bios(&list, rw);
- iblock_complete_cmd(cmd);
+ if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
+ rc = iblock_alloc_bip(cmd, bio, &prot_miter);
+ if (rc)
+ goto fail_put_bios;
+ }
+
+ iblock_submit_bios(&list);
+ iblock_complete_cmd(cmd, BLK_STS_OK);
return 0;
fail_put_bios:
@@ -703,65 +860,354 @@ fail:
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
-static sector_t iblock_get_blocks(struct se_device *dev)
+static sense_reason_t iblock_execute_pr_out(struct se_cmd *cmd, u8 sa, u64 key,
+ u64 sa_key, u8 type, bool aptpl)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bdev = ib_dev->ibd_bd;
+ const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ int ret;
+
+ if (!ops) {
+ pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ switch (sa) {
+ case PRO_REGISTER:
+ case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
+ if (!ops->pr_register) {
+ pr_err("block device does not support pr_register.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ /* The block layer pr ops always enables aptpl */
+ if (!aptpl)
+ pr_info("APTPL not set by initiator, but will be used.\n");
+
+ ret = ops->pr_register(bdev, key, sa_key,
+ sa == PRO_REGISTER ? 0 : PR_FL_IGNORE_KEY);
+ break;
+ case PRO_RESERVE:
+ if (!ops->pr_reserve) {
+ pr_err("block_device does not support pr_reserve.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ ret = ops->pr_reserve(bdev, key, scsi_pr_type_to_block(type), 0);
+ break;
+ case PRO_CLEAR:
+ if (!ops->pr_clear) {
+ pr_err("block_device does not support pr_clear.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ ret = ops->pr_clear(bdev, key);
+ break;
+ case PRO_PREEMPT:
+ case PRO_PREEMPT_AND_ABORT:
+ if (!ops->pr_clear) {
+ pr_err("block_device does not support pr_preempt.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ ret = ops->pr_preempt(bdev, key, sa_key,
+ scsi_pr_type_to_block(type),
+ sa == PRO_PREEMPT_AND_ABORT);
+ break;
+ case PRO_RELEASE:
+ if (!ops->pr_clear) {
+ pr_err("block_device does not support pr_pclear.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ ret = ops->pr_release(bdev, key, scsi_pr_type_to_block(type));
+ break;
+ default:
+ pr_err("Unknown PERSISTENT_RESERVE_OUT SA: 0x%02x\n", sa);
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (!ret)
+ return TCM_NO_SENSE;
+ else if (ret == PR_STS_RESERVATION_CONFLICT)
+ return TCM_RESERVATION_CONFLICT;
+ else
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+}
+
+static void iblock_pr_report_caps(unsigned char *param_data)
+{
+ u16 len = 8;
+
+ put_unaligned_be16(len, &param_data[0]);
+ /*
+ * When using the pr_ops passthrough method we only support exporting
+ * the device through one target port because from the backend module
+ * level we can't see the target port config. As a result we only
+ * support registration directly from the I_T nexus the cmd is sent
+ * through and do not set ATP_C here.
+ *
+ * The block layer pr_ops do not support passing in initiators so
+ * we don't set SIP_C here.
+ */
+ /* PTPL_C: Persistence across Target Power Loss bit */
+ param_data[2] |= 0x01;
+ /*
+ * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
+ * set the TMV: Task Mask Valid bit.
+ */
+ param_data[3] |= 0x80;
+ /*
+ * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
+ */
+ param_data[3] |= 0x10; /* ALLOW COMMANDs field 001b */
+ /*
+ * PTPL_A: Persistence across Target Power Loss Active bit. The block
+ * layer pr ops always enables this so report it active.
+ */
+ param_data[3] |= 0x01;
+ /*
+ * Setup the PERSISTENT RESERVATION TYPE MASK from Table 212 spc4r37.
+ */
+ param_data[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+ param_data[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
+ param_data[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
+ param_data[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
+ param_data[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
+ param_data[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+}
+
+static sense_reason_t iblock_pr_read_keys(struct se_cmd *cmd,
+ unsigned char *param_data)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bdev = ib_dev->ibd_bd;
+ const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ int i, len, paths, data_offset;
+ struct pr_keys *keys;
+ sense_reason_t ret;
+
+ if (!ops) {
+ pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (!ops->pr_read_keys) {
+ pr_err("Block device does not support read_keys.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ /*
+ * We don't know what's under us, but dm-multipath will register every
+ * path with the same key, so start off with enough space for 16 paths.
+ * which is not a lot of memory and should normally be enough.
+ */
+ paths = 16;
+retry:
+ len = 8 * paths;
+ keys = kzalloc(sizeof(*keys) + len, GFP_KERNEL);
+ if (!keys)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ keys->num_keys = paths;
+ if (!ops->pr_read_keys(bdev, keys)) {
+ if (keys->num_keys > paths) {
+ kfree(keys);
+ paths *= 2;
+ goto retry;
+ }
+ } else {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto free_keys;
+ }
+
+ ret = TCM_NO_SENSE;
+
+ put_unaligned_be32(keys->generation, &param_data[0]);
+ if (!keys->num_keys) {
+ put_unaligned_be32(0, &param_data[4]);
+ goto free_keys;
+ }
+
+ put_unaligned_be32(8 * keys->num_keys, &param_data[4]);
+
+ data_offset = 8;
+ for (i = 0; i < keys->num_keys; i++) {
+ if (data_offset + 8 > cmd->data_length)
+ break;
+
+ put_unaligned_be64(keys->keys[i], &param_data[data_offset]);
+ data_offset += 8;
+ }
+
+free_keys:
+ kfree(keys);
+ return ret;
+}
+
+static sense_reason_t iblock_pr_read_reservation(struct se_cmd *cmd,
+ unsigned char *param_data)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bdev = ib_dev->ibd_bd;
+ const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+ struct pr_held_reservation rsv = { };
+
+ if (!ops) {
+ pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (!ops->pr_read_reservation) {
+ pr_err("Block device does not support read_keys.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (ops->pr_read_reservation(bdev, &rsv))
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ put_unaligned_be32(rsv.generation, &param_data[0]);
+ if (!block_pr_type_to_scsi(rsv.type)) {
+ put_unaligned_be32(0, &param_data[4]);
+ return TCM_NO_SENSE;
+ }
+
+ put_unaligned_be32(16, &param_data[4]);
+
+ if (cmd->data_length < 16)
+ return TCM_NO_SENSE;
+ put_unaligned_be64(rsv.key, &param_data[8]);
+
+ if (cmd->data_length < 22)
+ return TCM_NO_SENSE;
+ param_data[21] = block_pr_type_to_scsi(rsv.type);
+
+ return TCM_NO_SENSE;
+}
+
+static sense_reason_t iblock_execute_pr_in(struct se_cmd *cmd, u8 sa,
+ unsigned char *param_data)
+{
+ sense_reason_t ret = TCM_NO_SENSE;
+
+ switch (sa) {
+ case PRI_REPORT_CAPABILITIES:
+ iblock_pr_report_caps(param_data);
+ break;
+ case PRI_READ_KEYS:
+ ret = iblock_pr_read_keys(cmd, param_data);
+ break;
+ case PRI_READ_RESERVATION:
+ ret = iblock_pr_read_reservation(cmd, param_data);
+ break;
+ default:
+ pr_err("Unknown PERSISTENT_RESERVE_IN SA: 0x%02x\n", sa);
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ return ret;
+}
+
+static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
+ int ret;
+
+ ret = bdev_alignment_offset(bd);
+ if (ret == -1)
+ return 0;
+
+ /* convert offset-bytes to offset-lbas */
+ return ret / bdev_logical_block_size(bd);
+}
+
+static unsigned int iblock_get_lbppbe(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
+ unsigned int logs_per_phys =
+ bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
+
+ return ilog2(logs_per_phys);
+}
+
+static unsigned int iblock_get_io_min(struct se_device *dev)
+{
+ struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+ struct block_device *bd = ib_dev->ibd_bd;
+
+ return bdev_io_min(bd);
+}
+
+static unsigned int iblock_get_io_opt(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;
- struct request_queue *q = bdev_get_queue(bd);
- return iblock_emulate_read_cap_with_block_size(dev, bd, q);
+ return bdev_io_opt(bd);
}
-static struct sbc_ops iblock_sbc_ops = {
+static struct exec_cmd_ops iblock_exec_cmd_ops = {
.execute_rw = iblock_execute_rw,
.execute_sync_cache = iblock_execute_sync_cache,
.execute_write_same = iblock_execute_write_same,
- .execute_write_same_unmap = iblock_execute_write_same_unmap,
.execute_unmap = iblock_execute_unmap,
+ .execute_pr_out = iblock_execute_pr_out,
+ .execute_pr_in = iblock_execute_pr_in,
};
static sense_reason_t
iblock_parse_cdb(struct se_cmd *cmd)
{
- return sbc_parse_cdb(cmd, &iblock_sbc_ops);
+ return sbc_parse_cdb(cmd, &iblock_exec_cmd_ops);
}
-bool iblock_get_write_cache(struct se_device *dev)
+static bool iblock_get_write_cache(struct se_device *dev)
{
- struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
- struct block_device *bd = ib_dev->ibd_bd;
- struct request_queue *q = bdev_get_queue(bd);
-
- return q->flush_flags & REQ_FLUSH;
+ return bdev_write_cache(IBLOCK_DEV(dev)->ibd_bd);
}
-static struct se_subsystem_api iblock_template = {
+static const struct target_backend_ops iblock_ops = {
.name = "iblock",
.inquiry_prod = "IBLOCK",
+ .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR,
.inquiry_rev = IBLOCK_VERSION,
.owner = THIS_MODULE,
- .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
.attach_hba = iblock_attach_hba,
.detach_hba = iblock_detach_hba,
.alloc_device = iblock_alloc_device,
.configure_device = iblock_configure_device,
+ .destroy_device = iblock_destroy_device,
.free_device = iblock_free_device,
+ .configure_unmap = iblock_configure_unmap,
+ .plug_device = iblock_plug_device,
+ .unplug_device = iblock_unplug_device,
.parse_cdb = iblock_parse_cdb,
.set_configfs_dev_params = iblock_set_configfs_dev_params,
.show_configfs_dev_params = iblock_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = iblock_get_blocks,
+ .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
+ .get_lbppbe = iblock_get_lbppbe,
+ .get_io_min = iblock_get_io_min,
+ .get_io_opt = iblock_get_io_opt,
.get_write_cache = iblock_get_write_cache,
+ .tb_dev_attrib_attrs = sbc_attrib_attrs,
};
static int __init iblock_module_init(void)
{
- return transport_subsystem_register(&iblock_template);
+ return transport_backend_register(&iblock_ops);
}
static void __exit iblock_module_exit(void)
{
- transport_subsystem_release(&iblock_template);
+ target_backend_unregister(&iblock_ops);
}
MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 01c2afd81500..e2f28a69a11c 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -1,25 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef TARGET_CORE_IBLOCK_H
#define TARGET_CORE_IBLOCK_H
+#include <linux/atomic.h>
+#include <linux/refcount.h>
+#include <linux/blkdev.h>
+#include <target/target_core_base.h>
+
#define IBLOCK_VERSION "4.0"
#define IBLOCK_MAX_CDBS 16
-#define IBLOCK_LBA_SHIFT 9
struct iblock_req {
- atomic_t pending;
+ refcount_t pending;
atomic_t ib_bio_err_cnt;
} ____cacheline_aligned;
#define IBDF_HAS_UDEV_PATH 0x01
+#define IBD_PLUGF_PLUGGED 0x01
+
+struct iblock_dev_plug {
+ struct se_dev_plug se_plug;
+ struct blk_plug blk_plug;
+ unsigned long flags;
+};
+
struct iblock_dev {
struct se_device dev;
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
u32 ibd_flags;
- struct bio_set *ibd_bio_set;
+ struct bio_set ibd_bio_set;
struct block_device *ibd_bd;
+ struct file *ibd_bdev_file;
bool ibd_readonly;
+ bool ibd_exclusive;
+ struct iblock_dev_plug *ibd_plug;
} ____cacheline_aligned;
#endif /* TARGET_CORE_IBLOCK_H */
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 18d49df4d0ac..763e6d26e187 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -1,55 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef TARGET_CORE_INTERNAL_H
#define TARGET_CORE_INTERNAL_H
+#include <linux/configfs.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
+#define TARGET_CORE_NAME_MAX_LEN 64
+#define TARGET_FABRIC_NAME_SIZE 32
+
+struct target_backend {
+ struct list_head list;
+
+ const struct target_backend_ops *ops;
+
+ struct config_item_type tb_dev_cit;
+ struct config_item_type tb_dev_attrib_cit;
+ struct config_item_type tb_dev_action_cit;
+ struct config_item_type tb_dev_pr_cit;
+ struct config_item_type tb_dev_wwn_cit;
+ struct config_item_type tb_dev_alua_tg_pt_gps_cit;
+ struct config_item_type tb_dev_stat_cit;
+};
+
+struct target_fabric_configfs {
+ atomic_t tf_access_cnt;
+ struct list_head tf_list;
+ struct config_group tf_group;
+ struct config_group tf_disc_group;
+ const struct target_core_fabric_ops *tf_ops;
+
+ struct config_item_type tf_discovery_cit;
+ struct config_item_type tf_wwn_cit;
+ struct config_item_type tf_wwn_fabric_stats_cit;
+ struct config_item_type tf_wwn_param_cit;
+ struct config_item_type tf_tpg_cit;
+ struct config_item_type tf_tpg_base_cit;
+ struct config_item_type tf_tpg_lun_cit;
+ struct config_item_type tf_tpg_port_cit;
+ struct config_item_type tf_tpg_port_stat_cit;
+ struct config_item_type tf_tpg_np_cit;
+ struct config_item_type tf_tpg_np_base_cit;
+ struct config_item_type tf_tpg_attrib_cit;
+ struct config_item_type tf_tpg_auth_cit;
+ struct config_item_type tf_tpg_param_cit;
+ struct config_item_type tf_tpg_nacl_cit;
+ struct config_item_type tf_tpg_nacl_base_cit;
+ struct config_item_type tf_tpg_nacl_attrib_cit;
+ struct config_item_type tf_tpg_nacl_auth_cit;
+ struct config_item_type tf_tpg_nacl_param_cit;
+ struct config_item_type tf_tpg_nacl_stat_cit;
+ struct config_item_type tf_tpg_mappedlun_cit;
+ struct config_item_type tf_tpg_mappedlun_stat_cit;
+};
+
/* target_core_alua.c */
extern struct t10_alua_lu_gp *default_lu_gp;
/* target_core_device.c */
struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
-int core_free_device_list_for_node(struct se_node_acl *,
+void target_pr_kref_release(struct kref *);
+void core_free_device_list_for_node(struct se_node_acl *,
struct se_portal_group *);
-void core_update_device_list_access(u32, u32, struct se_node_acl *);
+void core_update_device_list_access(u64, bool, struct se_node_acl *);
+struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *, u64);
int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
- u32, u32, struct se_node_acl *, struct se_portal_group *);
-int core_disable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
- u32, u32, struct se_node_acl *, struct se_portal_group *);
+ u64, bool, struct se_node_acl *, struct se_portal_group *);
+void core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *,
+ struct se_node_acl *, struct se_portal_group *);
void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
-int core_dev_export(struct se_device *, struct se_portal_group *,
- struct se_lun *);
-void core_dev_unexport(struct se_device *, struct se_portal_group *,
- struct se_lun *);
-int se_dev_set_task_timeout(struct se_device *, u32);
-int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
-int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
-int se_dev_set_unmap_granularity(struct se_device *, u32);
-int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
-int se_dev_set_max_write_same_len(struct se_device *, u32);
-int se_dev_set_emulate_model_alias(struct se_device *, int);
-int se_dev_set_emulate_dpo(struct se_device *, int);
-int se_dev_set_emulate_fua_write(struct se_device *, int);
-int se_dev_set_emulate_fua_read(struct se_device *, int);
-int se_dev_set_emulate_write_cache(struct se_device *, int);
-int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
-int se_dev_set_emulate_tas(struct se_device *, int);
-int se_dev_set_emulate_tpu(struct se_device *, int);
-int se_dev_set_emulate_tpws(struct se_device *, int);
-int se_dev_set_enforce_pr_isids(struct se_device *, int);
-int se_dev_set_is_nonrot(struct se_device *, int);
-int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
-int se_dev_set_queue_depth(struct se_device *, u32);
-int se_dev_set_max_sectors(struct se_device *, u32);
-int se_dev_set_fabric_max_sectors(struct se_device *, u32);
-int se_dev_set_optimal_sectors(struct se_device *, u32);
-int se_dev_set_block_size(struct se_device *, u32);
-struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32);
-int core_dev_del_lun(struct se_portal_group *, u32);
-struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
+int core_dev_add_lun(struct se_portal_group *, struct se_device *,
+ struct se_lun *lun);
+void core_dev_del_lun(struct se_portal_group *, struct se_lun *);
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
- struct se_node_acl *, u32, int *);
+ struct se_node_acl *, u64, int *);
int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
- struct se_lun_acl *, u32, u32);
-int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
- struct se_lun *, struct se_lun_acl *);
+ struct se_lun_acl *, struct se_lun *lun, bool);
+int core_dev_del_initiator_node_lun_acl(struct se_lun *,
+ struct se_lun_acl *);
void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *lacl);
int core_dev_setup_virtual_lun0(void);
@@ -57,6 +86,25 @@ void core_dev_release_virtual_lun0(void);
struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
int target_configure_device(struct se_device *dev);
void target_free_device(struct se_device *);
+int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
+ void *data);
+void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq);
+
+/* target_core_configfs.c */
+extern struct configfs_item_operations target_core_dev_item_ops;
+void target_setup_backend_cits(struct target_backend *);
+
+/* target_core_fabric_configfs.c */
+int target_fabric_setup_cits(struct target_fabric_configfs *);
+
+/* target_core_fabric_lib.c */
+int target_get_pr_transport_id_len(struct se_node_acl *nacl,
+ struct t10_pr_registration *pr_reg, int *format_code);
+int target_get_pr_transport_id(struct se_node_acl *nacl,
+ struct t10_pr_registration *pr_reg, int *format_code,
+ unsigned char *buf);
+bool target_parse_pr_out_transport_id(struct se_portal_group *tpg,
+ char *buf, u32 *out_tid_len, char **port_nexus_ptr, char *i_str);
/* target_core_hba.c */
struct se_hba *core_alloc_hba(const char *, u32, u32);
@@ -73,24 +121,25 @@ extern struct se_device *g_lun0_dev;
struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
const char *);
-struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
- unsigned char *);
-void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
+void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *,
+ struct se_lun *);
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
-struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
-int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *,
- u32, void *);
-struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun);
-int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
+struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64);
+void target_tpg_free_lun(struct rcu_head *head);
+int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
+ bool, struct se_device *);
+void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);
+struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg,
+ const char *initiatorname);
+void core_tpg_del_initiator_node_acl(struct se_node_acl *acl);
+int target_tpg_enable(struct se_portal_group *se_tpg);
+int target_tpg_disable(struct se_portal_group *se_tpg);
/* target_core_transport.c */
-extern struct kmem_cache *se_tmr_req_cache;
-
int init_se_kmem_caches(void);
void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void);
-void transport_cmd_finish_abort(struct se_cmd *, int);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *,
@@ -99,15 +148,28 @@ void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
-bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
-int transport_clear_lun_from_sessions(struct se_lun *);
-void transport_send_task_abort(struct se_cmd *);
+void transport_clear_lun_ref(struct se_lun *);
sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
void target_qf_do_work(struct work_struct *work);
+void target_do_delayed_work(struct work_struct *work);
+bool target_check_wce(struct se_device *dev);
+bool target_check_fua(struct se_device *dev);
+void __target_execute_cmd(struct se_cmd *, bool);
+void target_queued_submit_work(struct work_struct *work);
/* target_core_stat.c */
void target_stat_setup_dev_default_groups(struct se_device *);
void target_stat_setup_port_default_groups(struct se_lun *);
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
+/* target_core_xcopy.c */
+extern struct se_portal_group xcopy_pt_tpg;
+
+/* target_core_configfs.c */
+#define DB_ROOT_LEN 4096
+#define DB_ROOT_DEFAULT "/var/target"
+#define DB_ROOT_PREFERRED "/etc/target"
+
+extern char db_root[];
+
#endif /* TARGET_CORE_INTERNAL_H */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index bd78faf67c6b..83e172c92238 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1,41 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_pr.c
*
* This file contains SPC-3 compliant persistent reservations and
* legacy SPC-2 reservations with compatible reservation handling (CRH=1)
*
- * (c) Copyright 2009-2012 RisingTide Systems LLC.
+ * (c) Copyright 2009-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/vmalloc.h>
#include <linux/file.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <asm/unaligned.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <scsi/scsi_proto.h>
+#include <linux/unaligned.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_pr.h"
@@ -45,7 +33,6 @@
* Used for Specify Initiator Ports Capable Bit (SPEC_I_PT)
*/
struct pr_transport_id_holder {
- int dest_local_nexus;
struct t10_pr_registration *dest_pr_reg;
struct se_portal_group *dest_tpg;
struct se_node_acl *dest_node_acl;
@@ -58,8 +45,10 @@ void core_pr_dump_initiator_port(
char *buf,
u32 size)
{
- if (!pr_reg->isid_present_at_reg)
+ if (!pr_reg->isid_present_at_reg) {
buf[0] = '\0';
+ return;
+ }
snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid);
}
@@ -76,7 +65,23 @@ enum preempt_type {
};
static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
- struct t10_pr_registration *, int);
+ struct t10_pr_registration *, int, int);
+
+static int is_reservation_holder(
+ struct t10_pr_registration *pr_res_holder,
+ struct t10_pr_registration *pr_reg)
+{
+ int pr_res_type;
+
+ if (pr_res_holder) {
+ pr_res_type = pr_res_holder->pr_res_type;
+
+ return pr_res_holder == pr_reg ||
+ pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG ||
+ pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG;
+ }
+ return 0;
+}
static sense_reason_t
target_scsi2_reservation_check(struct se_cmd *cmd)
@@ -86,17 +91,17 @@ target_scsi2_reservation_check(struct se_cmd *cmd)
switch (cmd->t_task_cdb[0]) {
case INQUIRY:
- case RELEASE:
+ case RELEASE_6:
case RELEASE_10:
return 0;
default:
break;
}
- if (!dev->dev_reserved_node_acl || !sess)
+ if (!dev->reservation_holder || !sess)
return 0;
- if (dev->dev_reserved_node_acl != sess->se_node_acl)
+ if (dev->reservation_holder->se_node_acl != sess->se_node_acl)
return TCM_RESERVATION_CONFLICT;
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
@@ -182,6 +187,16 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd)
return 0;
}
+void target_release_reservation(struct se_device *dev)
+{
+ dev->reservation_holder = NULL;
+ dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
+ if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
+ dev->dev_res_bin_isid = 0;
+ dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID;
+ }
+}
+
sense_reason_t
target_scsi2_reservation_release(struct se_cmd *cmd)
{
@@ -199,31 +214,27 @@ target_scsi2_reservation_release(struct se_cmd *cmd)
return TCM_RESERVATION_CONFLICT;
spin_lock(&dev->dev_reservation_lock);
- if (!dev->dev_reserved_node_acl || !sess)
+ if (!dev->reservation_holder || !sess)
goto out_unlock;
- if (dev->dev_reserved_node_acl != sess->se_node_acl)
+ if (dev->reservation_holder->se_node_acl != sess->se_node_acl)
goto out_unlock;
if (dev->dev_res_bin_isid != sess->sess_bin_isid)
goto out_unlock;
- dev->dev_reserved_node_acl = NULL;
- dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
- if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
- dev->dev_res_bin_isid = 0;
- dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID;
- }
+ target_release_reservation(dev);
tpg = sess->se_tpg;
- pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
- " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
- cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
+ pr_debug("SCSI-2 Released reservation for %s LUN: %llu ->"
+ " MAPPED LUN: %llu for %s\n",
+ tpg->se_tpg_tfo->fabric_name,
+ cmd->se_lun->unpacked_lun, cmd->orig_fe_lun,
sess->se_node_acl->initiatorname);
out_unlock:
spin_unlock(&dev->dev_reservation_lock);
out:
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
@@ -238,8 +249,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
if ((cmd->t_task_cdb[1] & 0x01) &&
(cmd->t_task_cdb[1] & 0x02)) {
- pr_err("LongIO and Obselete Bits set, returning"
- " ILLEGAL_REQUEST\n");
+ pr_err("LongIO and Obsolete Bits set, returning ILLEGAL_REQUEST\n");
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
/*
@@ -257,37 +267,37 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
tpg = sess->se_tpg;
spin_lock(&dev->dev_reservation_lock);
- if (dev->dev_reserved_node_acl &&
- (dev->dev_reserved_node_acl != sess->se_node_acl)) {
- pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
- tpg->se_tpg_tfo->get_fabric_name());
- pr_err("Original reserver LUN: %u %s\n",
+ if (dev->reservation_holder &&
+ dev->reservation_holder->se_node_acl != sess->se_node_acl) {
+ pr_err("SCSI-2 RESERVATION CONFLICT for %s fabric\n",
+ tpg->se_tpg_tfo->fabric_name);
+ pr_err("Original reserver LUN: %llu %s\n",
cmd->se_lun->unpacked_lun,
- dev->dev_reserved_node_acl->initiatorname);
- pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u"
+ dev->reservation_holder->se_node_acl->initiatorname);
+ pr_err("Current attempt - LUN: %llu -> MAPPED LUN: %llu"
" from %s \n", cmd->se_lun->unpacked_lun,
- cmd->se_deve->mapped_lun,
+ cmd->orig_fe_lun,
sess->se_node_acl->initiatorname);
ret = TCM_RESERVATION_CONFLICT;
goto out_unlock;
}
- dev->dev_reserved_node_acl = sess->se_node_acl;
+ dev->reservation_holder = sess;
dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS;
if (sess->sess_bin_isid != 0) {
dev->dev_res_bin_isid = sess->sess_bin_isid;
dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID;
}
- pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
- " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
- cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
+ pr_debug("SCSI-2 Reserved %s LUN: %llu -> MAPPED LUN: %llu"
+ " for %s\n", tpg->se_tpg_tfo->fabric_name,
+ cmd->se_lun->unpacked_lun, cmd->orig_fe_lun,
sess->se_node_acl->initiatorname);
out_unlock:
spin_unlock(&dev->dev_reservation_lock);
out:
if (!ret)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
}
@@ -298,59 +308,59 @@ out:
* This function is called by those initiator ports who are *NOT*
* the active PR reservation holder when a reservation is present.
*/
-static int core_scsi3_pr_seq_non_holder(
- struct se_cmd *cmd,
- u32 pr_reg_type)
+static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
+ bool isid_mismatch)
{
unsigned char *cdb = cmd->t_task_cdb;
- struct se_dev_entry *se_deve;
struct se_session *se_sess = cmd->se_sess;
- int other_cdb = 0, ignore_reg;
+ struct se_node_acl *nacl = se_sess->se_node_acl;
+ int other_cdb = 0;
int registered_nexus = 0, ret = 1; /* Conflict by default */
int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */
int we = 0; /* Write Exclusive */
int legacy = 0; /* Act like a legacy device and return
* RESERVATION CONFLICT on some CDBs */
- se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
- /*
- * Determine if the registration should be ignored due to
- * non-matching ISIDs in target_scsi3_pr_reservation_check().
- */
- ignore_reg = (pr_reg_type & 0x80000000);
- if (ignore_reg)
- pr_reg_type &= ~0x80000000;
+ if (isid_mismatch) {
+ registered_nexus = 0;
+ } else {
+ struct se_dev_entry *se_deve;
+
+ rcu_read_lock();
+ se_deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
+ if (se_deve)
+ registered_nexus = test_bit(DEF_PR_REG_ACTIVE,
+ &se_deve->deve_flags);
+ rcu_read_unlock();
+ }
switch (pr_reg_type) {
case PR_TYPE_WRITE_EXCLUSIVE:
we = 1;
+ fallthrough;
case PR_TYPE_EXCLUSIVE_ACCESS:
/*
* Some commands are only allowed for the persistent reservation
* holder.
*/
- if ((se_deve->def_pr_registered) && !(ignore_reg))
- registered_nexus = 1;
break;
case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
we = 1;
+ fallthrough;
case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
/*
* Some commands are only allowed for registered I_T Nexuses.
*/
reg_only = 1;
- if ((se_deve->def_pr_registered) && !(ignore_reg))
- registered_nexus = 1;
break;
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
we = 1;
+ fallthrough;
case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
/*
* Each registered I_T Nexus is a reservation holder.
*/
all_reg = 1;
- if ((se_deve->def_pr_registered) && !(ignore_reg))
- registered_nexus = 1;
break;
default:
return -EINVAL;
@@ -408,12 +418,12 @@ static int core_scsi3_pr_seq_non_holder(
return -EINVAL;
}
break;
- case RELEASE:
+ case RELEASE_6:
case RELEASE_10:
/* Handled by CRH=1 in target_scsi2_reservation_release() */
ret = 0;
break;
- case RESERVE:
+ case RESERVE_6:
case RESERVE_10:
/* Handled by CRH=1 in target_scsi2_reservation_reserve() */
ret = 0;
@@ -459,7 +469,8 @@ static int core_scsi3_pr_seq_non_holder(
case ACCESS_CONTROL_OUT:
case INQUIRY:
case LOG_SENSE:
- case READ_MEDIA_SERIAL_NUMBER:
+ case SERVICE_ACTION_IN_12:
+ case READ_CAPACITY:
case REPORT_LUNS:
case REQUEST_SENSE:
case PERSISTENT_RESERVE_IN:
@@ -474,7 +485,7 @@ static int core_scsi3_pr_seq_non_holder(
* statement.
*/
if (!ret && !other_cdb) {
- pr_debug("Allowing explict CDB: 0x%02x for %s"
+ pr_debug("Allowing explicit CDB: 0x%02x for %s"
" reservation holder\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
@@ -507,7 +518,7 @@ static int core_scsi3_pr_seq_non_holder(
*/
if (!registered_nexus) {
- pr_debug("Allowing implict CDB: 0x%02x"
+ pr_debug("Allowing implicit CDB: 0x%02x"
" for %s reservation on unregistered"
" nexus\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
@@ -522,12 +533,24 @@ static int core_scsi3_pr_seq_non_holder(
* allow commands from registered nexuses.
*/
- pr_debug("Allowing implict CDB: 0x%02x for %s"
+ pr_debug("Allowing implicit CDB: 0x%02x for %s"
" reservation\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
return 0;
}
+ } else if (we && registered_nexus) {
+ /*
+ * Reads are allowed for Write Exclusive locks
+ * from all registrants.
+ */
+ if (cmd->data_direction == DMA_FROM_DEVICE) {
+ pr_debug("Allowing READ CDB: 0x%02x for %s"
+ " reservation\n", cdb[0],
+ core_scsi3_pr_dump_type(pr_reg_type));
+
+ return 0;
+ }
}
pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
" for %s reservation\n", transport_dump_cmd_direction(cmd),
@@ -544,6 +567,7 @@ target_scsi3_pr_reservation_check(struct se_cmd *cmd)
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
u32 pr_reg_type;
+ bool isid_mismatch = false;
if (!dev->dev_pr_res_holder)
return 0;
@@ -556,7 +580,7 @@ target_scsi3_pr_reservation_check(struct se_cmd *cmd)
if (dev->dev_pr_res_holder->isid_present_at_reg) {
if (dev->dev_pr_res_holder->pr_reg_bin_isid !=
sess->sess_bin_isid) {
- pr_reg_type |= 0x80000000;
+ isid_mismatch = true;
goto check_nonholder;
}
}
@@ -564,7 +588,7 @@ target_scsi3_pr_reservation_check(struct se_cmd *cmd)
return 0;
check_nonholder:
- if (core_scsi3_pr_seq_non_holder(cmd, pr_reg_type))
+ if (core_scsi3_pr_seq_non_holder(cmd, pr_reg_type, isid_mismatch))
return TCM_RESERVATION_CONFLICT;
return 0;
}
@@ -592,7 +616,9 @@ static u32 core_scsi3_pr_generation(struct se_device *dev)
static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
struct se_device *dev,
struct se_node_acl *nacl,
- struct se_dev_entry *deve,
+ struct se_lun *lun,
+ struct se_dev_entry *dest_deve,
+ u64 mapped_lun,
unsigned char *isid,
u64 sa_res_key,
int all_tg_pt,
@@ -613,13 +639,35 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
atomic_set(&pr_reg->pr_res_holders, 0);
pr_reg->pr_reg_nacl = nacl;
- pr_reg->pr_reg_deve = deve;
- pr_reg->pr_res_mapped_lun = deve->mapped_lun;
- pr_reg->pr_aptpl_target_lun = deve->se_lun->unpacked_lun;
+ /*
+ * For destination registrations for ALL_TG_PT=1 and SPEC_I_PT=1,
+ * the se_dev_entry->pr_ref will have been already obtained by
+ * core_get_se_deve_from_rtpi() or __core_scsi3_alloc_registration().
+ *
+ * Otherwise, locate se_dev_entry now and obtain a reference until
+ * registration completes in __core_scsi3_add_registration().
+ */
+ if (dest_deve) {
+ pr_reg->pr_reg_deve = dest_deve;
+ } else {
+ rcu_read_lock();
+ pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun);
+ if (!pr_reg->pr_reg_deve) {
+ rcu_read_unlock();
+ pr_err("Unable to locate PR deve %s mapped_lun: %llu\n",
+ nacl->initiatorname, mapped_lun);
+ kmem_cache_free(t10_pr_reg_cache, pr_reg);
+ return NULL;
+ }
+ kref_get(&pr_reg->pr_reg_deve->pr_kref);
+ rcu_read_unlock();
+ }
+ pr_reg->pr_res_mapped_lun = mapped_lun;
+ pr_reg->pr_aptpl_target_lun = lun->unpacked_lun;
+ pr_reg->tg_pt_sep_rtpi = lun->lun_tpg->tpg_rtpi;
pr_reg->pr_res_key = sa_res_key;
pr_reg->pr_reg_all_tg_pt = all_tg_pt;
pr_reg->pr_reg_aptpl = aptpl;
- pr_reg->pr_reg_tg_pt_lun = deve->se_lun;
/*
* If an ISID value for this SCSI Initiator Port exists,
* save it to the registration now.
@@ -643,7 +691,9 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *);
static struct t10_pr_registration *__core_scsi3_alloc_registration(
struct se_device *dev,
struct se_node_acl *nacl,
+ struct se_lun *lun,
struct se_dev_entry *deve,
+ u64 mapped_lun,
unsigned char *isid,
u64 sa_res_key,
int all_tg_pt,
@@ -651,16 +701,18 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
{
struct se_dev_entry *deve_tmp;
struct se_node_acl *nacl_tmp;
- struct se_port *port, *port_tmp;
- struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+ struct se_lun_acl *lacl_tmp;
+ struct se_lun *lun_tmp, *next, *dest_lun;
+ const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe;
int ret;
/*
* Create a registration for the I_T Nexus upon which the
* PROUT REGISTER was received.
*/
- pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid,
- sa_res_key, all_tg_pt, aptpl);
+ pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, lun, deve, mapped_lun,
+ isid, sa_res_key, all_tg_pt,
+ aptpl);
if (!pr_reg)
return NULL;
/*
@@ -673,23 +725,23 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
* for ALL_TG_PT=1
*/
spin_lock(&dev->se_port_lock);
- list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
- atomic_inc(&port->sep_tg_pt_ref_cnt);
- smp_mb__after_atomic_inc();
+ list_for_each_entry_safe(lun_tmp, next, &dev->dev_sep_list, lun_dev_link) {
+ if (!percpu_ref_tryget_live(&lun_tmp->lun_ref))
+ continue;
spin_unlock(&dev->se_port_lock);
- spin_lock_bh(&port->sep_alua_lock);
- list_for_each_entry(deve_tmp, &port->sep_alua_list,
- alua_port_list) {
+ spin_lock(&lun_tmp->lun_deve_lock);
+ list_for_each_entry(deve_tmp, &lun_tmp->lun_deve_list, lun_link) {
/*
* This pointer will be NULL for demo mode MappedLUNs
- * that have not been make explict via a ConfigFS
+ * that have not been make explicit via a ConfigFS
* MappedLUN group for the SCSI Initiator Node ACL.
*/
if (!deve_tmp->se_lun_acl)
continue;
- nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl;
+ lacl_tmp = deve_tmp->se_lun_acl;
+ nacl_tmp = lacl_tmp->se_lun_nacl;
/*
* Skip the matching struct se_node_acl that is allocated
* above..
@@ -709,9 +761,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname))
continue;
- atomic_inc(&deve_tmp->pr_ref_count);
- smp_mb__after_atomic_inc();
- spin_unlock_bh(&port->sep_alua_lock);
+ kref_get(&deve_tmp->pr_kref);
+ spin_unlock(&lun_tmp->lun_deve_lock);
/*
* Grab a configfs group dependency that is released
* for the exception path at label out: below, or upon
@@ -722,10 +773,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
if (ret < 0) {
pr_err("core_scsi3_lunacl_depend"
"_item() failed\n");
- atomic_dec(&port->sep_tg_pt_ref_cnt);
- smp_mb__after_atomic_dec();
- atomic_dec(&deve_tmp->pr_ref_count);
- smp_mb__after_atomic_dec();
+ percpu_ref_put(&lun_tmp->lun_ref);
+ kref_put(&deve_tmp->pr_kref, target_pr_kref_release);
goto out;
}
/*
@@ -735,27 +784,26 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
* the original *pr_reg is processed in
* __core_scsi3_add_registration()
*/
+ dest_lun = deve_tmp->se_lun;
+
pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
- nacl_tmp, deve_tmp, NULL,
+ nacl_tmp, dest_lun, deve_tmp,
+ deve_tmp->mapped_lun, NULL,
sa_res_key, all_tg_pt, aptpl);
if (!pr_reg_atp) {
- atomic_dec(&port->sep_tg_pt_ref_cnt);
- smp_mb__after_atomic_dec();
- atomic_dec(&deve_tmp->pr_ref_count);
- smp_mb__after_atomic_dec();
+ percpu_ref_put(&lun_tmp->lun_ref);
core_scsi3_lunacl_undepend_item(deve_tmp);
goto out;
}
list_add_tail(&pr_reg_atp->pr_reg_atp_mem_list,
&pr_reg->pr_reg_atp_list);
- spin_lock_bh(&port->sep_alua_lock);
+ spin_lock(&lun_tmp->lun_deve_lock);
}
- spin_unlock_bh(&port->sep_alua_lock);
+ spin_unlock(&lun_tmp->lun_deve_lock);
spin_lock(&dev->se_port_lock);
- atomic_dec(&port->sep_tg_pt_ref_cnt);
- smp_mb__after_atomic_dec();
+ percpu_ref_put(&lun_tmp->lun_ref);
}
spin_unlock(&dev->se_port_lock);
@@ -776,10 +824,10 @@ int core_scsi3_alloc_aptpl_registration(
u64 sa_res_key,
unsigned char *i_port,
unsigned char *isid,
- u32 mapped_lun,
+ u64 mapped_lun,
unsigned char *t_port,
u16 tpgt,
- u32 target_lun,
+ u64 target_lun,
int res_holder,
int all_tg_pt,
u8 type)
@@ -810,7 +858,6 @@ int core_scsi3_alloc_aptpl_registration(
pr_reg->pr_res_key = sa_res_key;
pr_reg->pr_reg_all_tg_pt = all_tg_pt;
pr_reg->pr_reg_aptpl = 1;
- pr_reg->pr_reg_tg_pt_lun = NULL;
pr_reg->pr_res_scope = 0; /* Always LUN_SCOPE */
pr_reg->pr_res_type = type;
/*
@@ -848,9 +895,8 @@ static void core_scsi3_aptpl_reserve(
struct se_node_acl *node_acl,
struct t10_pr_registration *pr_reg)
{
- char i_buf[PR_REG_ISID_ID_LEN];
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
spin_lock(&dev->dev_reservation_lock);
@@ -859,11 +905,11 @@ static void core_scsi3_aptpl_reserve(
pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created"
" new reservation holder TYPE: %s ALL_TG_PT: %d\n",
- tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->fabric_name,
core_scsi3_pr_dump_type(pr_reg->pr_res_type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
- tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname,
+ tpg->se_tpg_tfo->fabric_name, node_acl->initiatorname,
i_buf);
}
@@ -874,18 +920,16 @@ static int __core_scsi3_check_aptpl_registration(
struct se_device *dev,
struct se_portal_group *tpg,
struct se_lun *lun,
- u32 target_lun,
+ u64 target_lun,
struct se_node_acl *nacl,
- struct se_dev_entry *deve)
+ u64 mapped_lun)
{
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
- unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
- unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
+ unsigned char i_port[PR_APTPL_MAX_IPORT_LEN] = { };
+ unsigned char t_port[PR_APTPL_MAX_TPORT_LEN] = { };
u16 tpgt;
- memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
- memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
/*
* Copy Initiator Port information from struct se_node_acl
*/
@@ -902,23 +946,35 @@ static int __core_scsi3_check_aptpl_registration(
spin_lock(&pr_tmpl->aptpl_reg_lock);
list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
pr_reg_aptpl_list) {
+
if (!strcmp(pr_reg->pr_iport, i_port) &&
- (pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
+ (pr_reg->pr_res_mapped_lun == mapped_lun) &&
!(strcmp(pr_reg->pr_tport, t_port)) &&
(pr_reg->pr_reg_tpgt == tpgt) &&
(pr_reg->pr_aptpl_target_lun == target_lun)) {
+ /*
+ * Obtain the ->pr_reg_deve pointer + reference, that
+ * is released by __core_scsi3_add_registration() below.
+ */
+ rcu_read_lock();
+ pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun);
+ if (!pr_reg->pr_reg_deve) {
+ pr_err("Unable to locate PR APTPL %s mapped_lun:"
+ " %llu\n", nacl->initiatorname, mapped_lun);
+ rcu_read_unlock();
+ continue;
+ }
+ kref_get(&pr_reg->pr_reg_deve->pr_kref);
+ rcu_read_unlock();
pr_reg->pr_reg_nacl = nacl;
- pr_reg->pr_reg_deve = deve;
- pr_reg->pr_reg_tg_pt_lun = lun;
-
+ pr_reg->tg_pt_sep_rtpi = lun->lun_tpg->tpg_rtpi;
list_del(&pr_reg->pr_reg_aptpl_list);
spin_unlock(&pr_tmpl->aptpl_reg_lock);
/*
* At this point all of the pointers in *pr_reg will
* be setup, so go ahead and add the registration.
*/
-
__core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0);
/*
* If this registration is the reservation holder,
@@ -944,53 +1000,47 @@ int core_scsi3_check_aptpl_registration(
struct se_device *dev,
struct se_portal_group *tpg,
struct se_lun *lun,
- struct se_lun_acl *lun_acl)
+ struct se_node_acl *nacl,
+ u64 mapped_lun)
{
- struct se_node_acl *nacl = lun_acl->se_lun_nacl;
- struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun];
-
if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
return 0;
return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
- lun->unpacked_lun, nacl, deve);
+ lun->unpacked_lun, nacl,
+ mapped_lun);
}
static void __core_scsi3_dump_registration(
- struct target_core_fabric_ops *tfo,
+ const struct target_core_fabric_ops *tfo,
struct se_device *dev,
struct se_node_acl *nacl,
struct t10_pr_registration *pr_reg,
enum register_type register_type)
{
struct se_portal_group *se_tpg = nacl->se_tpg;
- char i_buf[PR_REG_ISID_ID_LEN];
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
- memset(&i_buf[0], 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
- " Node: %s%s\n", tfo->get_fabric_name(), (register_type == REGISTER_AND_MOVE) ?
+ " Node: %s%s\n", tfo->fabric_name, (register_type == REGISTER_AND_MOVE) ?
"_AND_MOVE" : (register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ?
"_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
i_buf);
pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
- tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
+ tfo->fabric_name, tfo->tpg_get_wwn(se_tpg),
tfo->tpg_get_tag(se_tpg));
pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
- " Port(s)\n", tfo->get_fabric_name(),
+ " Port(s)\n", tfo->fabric_name,
(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
dev->transport->name);
pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
- " 0x%08x APTPL: %d\n", tfo->get_fabric_name(),
+ " 0x%08x APTPL: %d\n", tfo->fabric_name,
pr_reg->pr_res_key, pr_reg->pr_res_generation,
pr_reg->pr_reg_aptpl);
}
-/*
- * this function can be called with struct se_device->dev_reservation_lock
- * when register_move = 1
- */
static void __core_scsi3_add_registration(
struct se_device *dev,
struct se_node_acl *nacl,
@@ -998,9 +1048,10 @@ static void __core_scsi3_add_registration(
enum register_type register_type,
int register_move)
{
- struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+ const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
+ struct se_dev_entry *deve;
/*
* Increment PRgeneration counter for struct se_device upon a successful
@@ -1017,7 +1068,6 @@ static void __core_scsi3_add_registration(
spin_lock(&pr_tmpl->registration_lock);
list_add_tail(&pr_reg->pr_reg_list, &pr_tmpl->registration_list);
- pr_reg->pr_reg_deve->def_pr_registered = 1;
__core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
spin_unlock(&pr_tmpl->registration_lock);
@@ -1025,13 +1075,15 @@ static void __core_scsi3_add_registration(
* Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
*/
if (!pr_reg->pr_reg_all_tg_pt || register_move)
- return;
+ goto out;
/*
* Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
* allocated in __core_scsi3_alloc_registration()
*/
list_for_each_entry_safe(pr_reg_tmp, pr_reg_tmp_safe,
&pr_reg->pr_reg_atp_list, pr_reg_atp_mem_list) {
+ struct se_node_acl *nacl_tmp = pr_reg_tmp->pr_reg_nacl;
+
list_del(&pr_reg_tmp->pr_reg_atp_mem_list);
pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev);
@@ -1039,24 +1091,43 @@ static void __core_scsi3_add_registration(
spin_lock(&pr_tmpl->registration_lock);
list_add_tail(&pr_reg_tmp->pr_reg_list,
&pr_tmpl->registration_list);
- pr_reg_tmp->pr_reg_deve->def_pr_registered = 1;
- __core_scsi3_dump_registration(tfo, dev,
- pr_reg_tmp->pr_reg_nacl, pr_reg_tmp,
- register_type);
+ __core_scsi3_dump_registration(tfo, dev, nacl_tmp, pr_reg_tmp,
+ register_type);
spin_unlock(&pr_tmpl->registration_lock);
/*
- * Drop configfs group dependency reference from
- * __core_scsi3_alloc_registration()
+ * Drop configfs group dependency reference and deve->pr_kref
+ * obtained from __core_scsi3_alloc_registration() code.
*/
- core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
+ rcu_read_lock();
+ deve = pr_reg_tmp->pr_reg_deve;
+ if (deve) {
+ set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
+ core_scsi3_lunacl_undepend_item(deve);
+ pr_reg_tmp->pr_reg_deve = NULL;
+ }
+ rcu_read_unlock();
+ }
+out:
+ /*
+ * Drop deve->pr_kref obtained in __core_scsi3_do_alloc_registration()
+ */
+ rcu_read_lock();
+ deve = pr_reg->pr_reg_deve;
+ if (deve) {
+ set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
+ kref_put(&deve->pr_kref, target_pr_kref_release);
+ pr_reg->pr_reg_deve = NULL;
}
+ rcu_read_unlock();
}
static int core_scsi3_alloc_registration(
struct se_device *dev,
struct se_node_acl *nacl,
+ struct se_lun *lun,
struct se_dev_entry *deve,
+ u64 mapped_lun,
unsigned char *isid,
u64 sa_res_key,
int all_tg_pt,
@@ -1066,8 +1137,9 @@ static int core_scsi3_alloc_registration(
{
struct t10_pr_registration *pr_reg;
- pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid,
- sa_res_key, all_tg_pt, aptpl);
+ pr_reg = __core_scsi3_alloc_registration(dev, nacl, lun, deve, mapped_lun,
+ isid, sa_res_key, all_tg_pt,
+ aptpl);
if (!pr_reg)
return -EPERM;
@@ -1083,7 +1155,6 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
{
struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
- struct se_portal_group *tpg;
spin_lock(&pr_tmpl->registration_lock);
list_for_each_entry_safe(pr_reg, pr_reg_tmp,
@@ -1094,23 +1165,12 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
if (pr_reg->pr_reg_nacl != nacl)
continue;
- tpg = pr_reg->pr_reg_nacl->se_tpg;
/*
* If this registration does NOT contain a fabric provided
* ISID, then we have found a match.
*/
if (!pr_reg->isid_present_at_reg) {
- /*
- * Determine if this SCSI device server requires that
- * SCSI Intiatior TransportID w/ ISIDs is enforced
- * for fabric modules (iSCSI) requiring them.
- */
- if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
- if (dev->dev_attrib.enforce_pr_isids)
- continue;
- }
- atomic_inc(&pr_reg->pr_res_holders);
- smp_mb__after_atomic_inc();
+ atomic_inc_mb(&pr_reg->pr_res_holders);
spin_unlock(&pr_tmpl->registration_lock);
return pr_reg;
}
@@ -1124,8 +1184,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
if (strcmp(isid, pr_reg->pr_reg_isid))
continue;
- atomic_inc(&pr_reg->pr_res_holders);
- smp_mb__after_atomic_inc();
+ atomic_inc_mb(&pr_reg->pr_res_holders);
spin_unlock(&pr_tmpl->registration_lock);
return pr_reg;
}
@@ -1140,10 +1199,10 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(
struct se_session *sess)
{
struct se_portal_group *tpg = nacl->se_tpg;
- unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+ unsigned char buf[PR_REG_ISID_LEN] = { };
+ unsigned char *isid_ptr = NULL;
if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
- memset(&buf[0], 0, PR_REG_ISID_LEN);
tpg->se_tpg_tfo->sess_get_initiator_sid(sess, &buf[0],
PR_REG_ISID_LEN);
isid_ptr = &buf[0];
@@ -1154,11 +1213,10 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(
static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
{
- atomic_dec(&pr_reg->pr_res_holders);
- smp_mb__after_atomic_dec();
+ atomic_dec_mb(&pr_reg->pr_res_holders);
}
-static int core_scsi3_check_implict_release(
+static int core_scsi3_check_implicit_release(
struct se_device *dev,
struct t10_pr_registration *pr_reg)
{
@@ -1174,7 +1232,7 @@ static int core_scsi3_check_implict_release(
}
if (pr_res_holder == pr_reg) {
/*
- * Perform an implict RELEASE if the registration that
+ * Perform an implicit RELEASE if the registration that
* is being released is holding the reservation.
*
* From spc4r17, section 5.7.11.1:
@@ -1186,13 +1244,13 @@ static int core_scsi3_check_implict_release(
* service action with the SERVICE ACTION RESERVATION KEY
* field set to zero (see 5.7.11.3).
*/
- __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0);
+ __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0, 1);
ret = 1;
/*
* For 'All Registrants' reservation types, all existing
* registrations are still processed as reservation holders
* in core_scsi3_pr_seq_non_holder() after the initial
- * reservation holder is implictly released here.
+ * reservation holder is implicitly released here.
*/
} else if (pr_reg->pr_reg_all_tg_pt &&
(!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
@@ -1209,32 +1267,35 @@ static int core_scsi3_check_implict_release(
return ret;
}
-/*
- * Called with struct t10_reservation->registration_lock held.
- */
static void __core_scsi3_free_registration(
struct se_device *dev,
struct t10_pr_registration *pr_reg,
struct list_head *preempt_and_abort_list,
int dec_holders)
+ __releases(&pr_tmpl->registration_lock)
+ __acquires(&pr_tmpl->registration_lock)
{
- struct target_core_fabric_ops *tfo =
+ const struct target_core_fabric_ops *tfo =
pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
- char i_buf[PR_REG_ISID_ID_LEN];
+ struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
+ struct se_dev_entry *deve;
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
+
+ lockdep_assert_held(&pr_tmpl->registration_lock);
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
- pr_reg->pr_reg_deve->def_pr_registered = 0;
- pr_reg->pr_reg_deve->pr_res_key = 0;
- list_del(&pr_reg->pr_reg_list);
+ if (!list_empty(&pr_reg->pr_reg_list))
+ list_del(&pr_reg->pr_reg_list);
/*
* Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
* so call core_scsi3_put_pr_reg() to decrement our reference.
*/
if (dec_holders)
core_scsi3_put_pr_reg(pr_reg);
+
+ spin_unlock(&pr_tmpl->registration_lock);
/*
* Wait until all reference from any other I_T nexuses for this
* *pr_reg have been released. Because list_del() is called above,
@@ -1242,23 +1303,28 @@ static void __core_scsi3_free_registration(
* count back to zero, and we release *pr_reg.
*/
while (atomic_read(&pr_reg->pr_res_holders) != 0) {
- spin_unlock(&pr_tmpl->registration_lock);
pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n",
- tfo->get_fabric_name());
+ tfo->fabric_name);
cpu_relax();
- spin_lock(&pr_tmpl->registration_lock);
}
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, pr_reg->pr_res_mapped_lun);
+ if (deve)
+ clear_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
+ rcu_read_unlock();
+
+ spin_lock(&pr_tmpl->registration_lock);
pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
- " Node: %s%s\n", tfo->get_fabric_name(),
+ " Node: %s%s\n", tfo->fabric_name,
pr_reg->pr_reg_nacl->initiatorname,
i_buf);
pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
- " Port(s)\n", tfo->get_fabric_name(),
+ " Port(s)\n", tfo->fabric_name,
(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
dev->transport->name);
pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
- " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key,
+ " 0x%08x\n", tfo->fabric_name, pr_reg->pr_res_key,
pr_reg->pr_res_generation);
if (!preempt_and_abort_list) {
@@ -1280,6 +1346,7 @@ void core_scsi3_free_pr_reg_from_nacl(
{
struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
+ bool free_reg = false;
/*
* If the passed se_node_acl matches the reservation holder,
* release the reservation.
@@ -1287,13 +1354,18 @@ void core_scsi3_free_pr_reg_from_nacl(
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
if ((pr_res_holder != NULL) &&
- (pr_res_holder->pr_reg_nacl == nacl))
- __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0);
+ (pr_res_holder->pr_reg_nacl == nacl)) {
+ __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0, 1);
+ free_reg = true;
+ }
spin_unlock(&dev->dev_reservation_lock);
/*
* Release any registration associated with the struct se_node_acl.
*/
spin_lock(&pr_tmpl->registration_lock);
+ if (pr_res_holder && free_reg)
+ __core_scsi3_free_registration(dev, pr_res_holder, NULL, 0);
+
list_for_each_entry_safe(pr_reg, pr_reg_tmp,
&pr_tmpl->registration_list, pr_reg_list) {
@@ -1316,7 +1388,7 @@ void core_scsi3_free_all_registrations(
if (pr_res_holder != NULL) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
__core_scsi3_complete_pro_release(dev, pr_res_nacl,
- pr_res_holder, 0);
+ pr_res_holder, 0, 0);
}
spin_unlock(&dev->dev_reservation_lock);
@@ -1339,86 +1411,52 @@ void core_scsi3_free_all_registrations(
static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
{
- return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
- &tpg->tpg_group.cg_item);
+ return target_depend_item(&tpg->tpg_group.cg_item);
}
static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
{
- configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
- &tpg->tpg_group.cg_item);
-
- atomic_dec(&tpg->tpg_pr_ref_count);
- smp_mb__after_atomic_dec();
+ target_undepend_item(&tpg->tpg_group.cg_item);
+ atomic_dec_mb(&tpg->tpg_pr_ref_count);
}
static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
{
- struct se_portal_group *tpg = nacl->se_tpg;
-
if (nacl->dynamic_node_acl)
return 0;
-
- return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
- &nacl->acl_group.cg_item);
+ return target_depend_item(&nacl->acl_group.cg_item);
}
static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
{
- struct se_portal_group *tpg = nacl->se_tpg;
-
- if (nacl->dynamic_node_acl) {
- atomic_dec(&nacl->acl_pr_ref_count);
- smp_mb__after_atomic_dec();
- return;
- }
-
- configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
- &nacl->acl_group.cg_item);
-
- atomic_dec(&nacl->acl_pr_ref_count);
- smp_mb__after_atomic_dec();
+ if (!nacl->dynamic_node_acl)
+ target_undepend_item(&nacl->acl_group.cg_item);
+ atomic_dec_mb(&nacl->acl_pr_ref_count);
}
static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
{
- struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
- struct se_node_acl *nacl;
- struct se_portal_group *tpg;
/*
* For nacl->dynamic_node_acl=1
*/
- if (!lun_acl)
+ if (!se_deve->se_lun_acl)
return 0;
- nacl = lun_acl->se_lun_nacl;
- tpg = nacl->se_tpg;
-
- return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
- &lun_acl->se_lun_group.cg_item);
+ return target_depend_item(&se_deve->se_lun_acl->se_lun_group.cg_item);
}
static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
{
- struct se_lun_acl *lun_acl = se_deve->se_lun_acl;
- struct se_node_acl *nacl;
- struct se_portal_group *tpg;
/*
* For nacl->dynamic_node_acl=1
*/
- if (!lun_acl) {
- atomic_dec(&se_deve->pr_ref_count);
- smp_mb__after_atomic_dec();
+ if (!se_deve->se_lun_acl) {
+ kref_put(&se_deve->pr_kref, target_pr_kref_release);
return;
}
- nacl = lun_acl->se_lun_nacl;
- tpg = nacl->se_tpg;
-
- configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
- &lun_acl->se_lun_group.cg_item);
- atomic_dec(&se_deve->pr_ref_count);
- smp_mb__after_atomic_dec();
+ target_undepend_item(&se_deve->se_lun_acl->se_lun_group.cg_item);
+ kref_put(&se_deve->pr_kref, target_pr_kref_release);
}
static sense_reason_t
@@ -1431,57 +1469,54 @@ core_scsi3_decode_spec_i_port(
int aptpl)
{
struct se_device *dev = cmd->se_dev;
- struct se_port *tmp_port;
struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
struct se_session *se_sess = cmd->se_sess;
struct se_node_acl *dest_node_acl = NULL;
- struct se_dev_entry *dest_se_deve = NULL, *local_se_deve;
+ struct se_dev_entry *dest_se_deve = NULL;
struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e;
struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
LIST_HEAD(tid_dest_list);
struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
- struct target_core_fabric_ops *tmp_tf_ops;
- unsigned char *buf;
- unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
- char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+ unsigned char *buf, *ptr, proto_ident;
+ unsigned char i_str[TRANSPORT_IQN_LEN];
+ char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
sense_reason_t ret;
u32 tpdl, tid_len = 0;
- int dest_local_nexus;
u32 dest_rtpi = 0;
+ bool tid_found;
- memset(dest_iport, 0, 64);
-
- local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
/*
* Allocate a struct pr_transport_id_holder and setup the
- * local_node_acl and local_se_deve pointers and add to
- * struct list_head tid_dest_list for add registration
- * processing in the loop of tid_dest_list below.
+ * local_node_acl pointer and add to struct list_head tid_dest_list
+ * for add registration processing in the loop of tid_dest_list below.
*/
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
if (!tidh_new) {
pr_err("Unable to allocate tidh_new\n");
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = tpg;
tidh_new->dest_node_acl = se_sess->se_node_acl;
- tidh_new->dest_se_deve = local_se_deve;
local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
- se_sess->se_node_acl, local_se_deve, l_isid,
+ se_sess->se_node_acl, cmd->se_lun,
+ NULL, cmd->orig_fe_lun, l_isid,
sa_res_key, all_tg_pt, aptpl);
if (!local_pr_reg) {
kfree(tidh_new);
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
+ }
+
+ if (core_scsi3_lunacl_depend_item(local_pr_reg->pr_reg_deve)) {
+ kfree(tidh_new);
+ kref_put(&local_pr_reg->pr_reg_deve->pr_kref,
+ target_pr_kref_release);
+ kmem_cache_free(t10_pr_reg_cache, local_pr_reg);
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
}
+
tidh_new->dest_pr_reg = local_pr_reg;
- /*
- * The local I_T nexus does not hold any configfs dependances,
- * so we set tid_h->dest_local_nexus=1 to prevent the
- * configfs_undepend_item() calls in the tid_dest_list loops below.
- */
- tidh_new->dest_local_nexus = 1;
list_add_tail(&tidh_new->dest_list, &tid_dest_list);
if (cmd->data_length < 28) {
@@ -1493,7 +1528,7 @@ core_scsi3_decode_spec_i_port(
buf = transport_kmap_data_sg(cmd);
if (!buf) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out;
}
@@ -1502,10 +1537,7 @@ core_scsi3_decode_spec_i_port(
* first extract TransportID Parameter Data Length, and make sure
* the value matches up to the SCSI expected data transfer length.
*/
- tpdl = (buf[24] & 0xff) << 24;
- tpdl |= (buf[25] & 0xff) << 16;
- tpdl |= (buf[26] & 0xff) << 8;
- tpdl |= buf[27] & 0xff;
+ tpdl = get_unaligned_be32(&buf[24]);
if ((tpdl + 28) != cmd->data_length) {
pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
@@ -1522,44 +1554,50 @@ core_scsi3_decode_spec_i_port(
ptr = &buf[28];
while (tpdl > 0) {
+ struct se_lun *dest_lun, *tmp_lun;
+
proto_ident = (ptr[0] & 0x0f);
dest_tpg = NULL;
spin_lock(&dev->se_port_lock);
- list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) {
- tmp_tpg = tmp_port->sep_tpg;
- if (!tmp_tpg)
- continue;
- tmp_tf_ops = tmp_tpg->se_tpg_tfo;
- if (!tmp_tf_ops)
- continue;
- if (!tmp_tf_ops->get_fabric_proto_ident ||
- !tmp_tf_ops->tpg_parse_pr_out_transport_id)
- continue;
+ list_for_each_entry(tmp_lun, &dev->dev_sep_list, lun_dev_link) {
+ tmp_tpg = tmp_lun->lun_tpg;
+
/*
* Look for the matching proto_ident provided by
* the received TransportID
*/
- tmp_proto_ident = tmp_tf_ops->get_fabric_proto_ident(tmp_tpg);
- if (tmp_proto_ident != proto_ident)
+ if (tmp_tpg->proto_id != proto_ident)
continue;
- dest_rtpi = tmp_port->sep_rtpi;
+ dest_rtpi = tmp_lun->lun_tpg->tpg_rtpi;
- i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id(
- tmp_tpg, (const char *)ptr, &tid_len,
- &iport_ptr);
- if (!i_str)
+ iport_ptr = NULL;
+ tid_found = target_parse_pr_out_transport_id(tmp_tpg,
+ ptr, &tid_len, &iport_ptr, i_str);
+ if (!tid_found)
continue;
+ /*
+ * Determine if this SCSI device server requires that
+ * SCSI Intiatior TransportID w/ ISIDs is enforced
+ * for fabric modules (iSCSI) requiring them.
+ */
+ if (tpg->se_tpg_tfo->sess_get_initiator_sid &&
+ dev->dev_attrib.enforce_pr_isids &&
+ !iport_ptr) {
+ pr_warn("SPC-PR: enforce_pr_isids is set but a isid has not been sent in the SPEC_I_PT data for %s.",
+ i_str);
+ ret = TCM_INVALID_PARAMETER_LIST;
+ spin_unlock(&dev->se_port_lock);
+ goto out_unmap;
+ }
- atomic_inc(&tmp_tpg->tpg_pr_ref_count);
- smp_mb__after_atomic_inc();
+ atomic_inc_mb(&tmp_tpg->tpg_pr_ref_count);
spin_unlock(&dev->se_port_lock);
if (core_scsi3_tpg_depend_item(tmp_tpg)) {
pr_err(" core_scsi3_tpg_depend_item()"
" for tmp_tpg\n");
- atomic_dec(&tmp_tpg->tpg_pr_ref_count);
- smp_mb__after_atomic_dec();
+ atomic_dec_mb(&tmp_tpg->tpg_pr_ref_count);
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out_unmap;
}
@@ -1568,14 +1606,12 @@ core_scsi3_decode_spec_i_port(
* from the decoded fabric module specific TransportID
* at *i_str.
*/
- spin_lock_irq(&tmp_tpg->acl_node_lock);
+ mutex_lock(&tmp_tpg->acl_node_mutex);
dest_node_acl = __core_tpg_get_initiator_node_acl(
tmp_tpg, i_str);
- if (dest_node_acl) {
- atomic_inc(&dest_node_acl->acl_pr_ref_count);
- smp_mb__after_atomic_inc();
- }
- spin_unlock_irq(&tmp_tpg->acl_node_lock);
+ if (dest_node_acl)
+ atomic_inc_mb(&dest_node_acl->acl_pr_ref_count);
+ mutex_unlock(&tmp_tpg->acl_node_mutex);
if (!dest_node_acl) {
core_scsi3_tpg_undepend_item(tmp_tpg);
@@ -1586,17 +1622,15 @@ core_scsi3_decode_spec_i_port(
if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
pr_err("configfs_depend_item() failed"
" for dest_node_acl->acl_group\n");
- atomic_dec(&dest_node_acl->acl_pr_ref_count);
- smp_mb__after_atomic_dec();
+ atomic_dec_mb(&dest_node_acl->acl_pr_ref_count);
core_scsi3_tpg_undepend_item(tmp_tpg);
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out_unmap;
}
dest_tpg = tmp_tpg;
- pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:"
- " %s Port RTPI: %hu\n",
- dest_tpg->se_tpg_tfo->get_fabric_name(),
+ pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s Port RTPI: %u\n",
+ dest_tpg->se_tpg_tfo->fabric_name,
dest_node_acl->initiatorname, dest_rtpi);
spin_lock(&dev->se_port_lock);
@@ -1613,7 +1647,7 @@ core_scsi3_decode_spec_i_port(
pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
" tid_len: %d for %s + %s\n",
- dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length,
+ dest_tpg->se_tpg_tfo->fabric_name, cmd->data_length,
tpdl, tid_len, i_str, iport_ptr);
if (tid_len > tpdl) {
@@ -1632,9 +1666,8 @@ core_scsi3_decode_spec_i_port(
dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl,
dest_rtpi);
if (!dest_se_deve) {
- pr_err("Unable to locate %s dest_se_deve"
- " from destination RTPI: %hu\n",
- dest_tpg->se_tpg_tfo->get_fabric_name(),
+ pr_err("Unable to locate %s dest_se_deve from destination RTPI: %u\n",
+ dest_tpg->se_tpg_tfo->fabric_name,
dest_rtpi);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
@@ -1646,8 +1679,7 @@ core_scsi3_decode_spec_i_port(
if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
pr_err("core_scsi3_lunacl_depend_item()"
" failed\n");
- atomic_dec(&dest_se_deve->pr_ref_count);
- smp_mb__after_atomic_dec();
+ kref_put(&dest_se_deve->pr_kref, target_pr_kref_release);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -1655,8 +1687,8 @@ core_scsi3_decode_spec_i_port(
}
pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
- " dest_se_deve mapped_lun: %u\n",
- dest_tpg->se_tpg_tfo->get_fabric_name(),
+ " dest_se_deve mapped_lun: %llu\n",
+ dest_tpg->se_tpg_tfo->fabric_name,
dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
/*
@@ -1711,15 +1743,18 @@ core_scsi3_decode_spec_i_port(
* and then call __core_scsi3_add_registration() in the
* 2nd loop which will never fail.
*/
+ dest_lun = dest_se_deve->se_lun;
+
dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
- dest_node_acl, dest_se_deve, iport_ptr,
- sa_res_key, all_tg_pt, aptpl);
+ dest_node_acl, dest_lun, dest_se_deve,
+ dest_se_deve->mapped_lun, iport_ptr,
+ sa_res_key, all_tg_pt, aptpl);
if (!dest_pr_reg) {
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
kfree(tidh_new);
- ret = TCM_INVALID_PARAMETER_LIST;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out_unmap;
}
tidh_new->dest_pr_reg = dest_pr_reg;
@@ -1751,7 +1786,6 @@ core_scsi3_decode_spec_i_port(
dest_node_acl = tidh->dest_node_acl;
dest_se_deve = tidh->dest_se_deve;
dest_pr_reg = tidh->dest_pr_reg;
- dest_local_nexus = tidh->dest_local_nexus;
list_del(&tidh->dest_list);
kfree(tidh);
@@ -1764,13 +1798,13 @@ core_scsi3_decode_spec_i_port(
pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully"
" registered Transport ID for Node: %s%s Mapped LUN:"
- " %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(),
- dest_node_acl->initiatorname, i_buf, dest_se_deve->mapped_lun);
+ " %llu\n", dest_tpg->se_tpg_tfo->fabric_name,
+ dest_node_acl->initiatorname, i_buf, (dest_se_deve) ?
+ dest_se_deve->mapped_lun : 0);
- if (dest_local_nexus)
+ if (dest_pr_reg == local_pr_reg)
continue;
- core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
}
@@ -1784,11 +1818,15 @@ out:
* including *dest_pr_reg and the configfs dependances..
*/
list_for_each_entry_safe(tidh, tidh_tmp, &tid_dest_list, dest_list) {
+ bool is_local = false;
+
dest_tpg = tidh->dest_tpg;
dest_node_acl = tidh->dest_node_acl;
dest_se_deve = tidh->dest_se_deve;
dest_pr_reg = tidh->dest_pr_reg;
- dest_local_nexus = tidh->dest_local_nexus;
+
+ if (dest_pr_reg == local_pr_reg)
+ is_local = true;
list_del(&tidh->dest_list);
kfree(tidh);
@@ -1806,10 +1844,12 @@ out:
kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
- if (dest_local_nexus)
+ if (dest_se_deve)
+ core_scsi3_lunacl_undepend_item(dest_se_deve);
+
+ if (is_local)
continue;
- core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
}
@@ -1821,7 +1861,6 @@ static int core_scsi3_update_aptpl_buf(
unsigned char *buf,
u32 pr_aptpl_buf_len)
{
- struct se_lun *lun;
struct se_portal_group *tpg;
struct t10_pr_registration *pr_reg;
unsigned char tmp[512], isid_buf[32];
@@ -1840,7 +1879,6 @@ static int core_scsi3_update_aptpl_buf(
tmp[0] = '\0';
isid_buf[0] = '\0';
tpg = pr_reg->pr_reg_nacl->se_tpg;
- lun = pr_reg->pr_reg_tg_pt_lun;
/*
* Write out any ISID value to APTPL metadata that was included
* in the original registration.
@@ -1859,8 +1897,8 @@ static int core_scsi3_update_aptpl_buf(
"sa_res_key=%llu\n"
"res_holder=1\nres_type=%02x\n"
"res_scope=%02x\nres_all_tg_pt=%d\n"
- "mapped_lun=%u\n", reg_count,
- tpg->se_tpg_tfo->get_fabric_name(),
+ "mapped_lun=%llu\n", reg_count,
+ tpg->se_tpg_tfo->fabric_name,
pr_reg->pr_reg_nacl->initiatorname, isid_buf,
pr_reg->pr_res_key, pr_reg->pr_res_type,
pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt,
@@ -1869,16 +1907,16 @@ static int core_scsi3_update_aptpl_buf(
snprintf(tmp, 512, "PR_REG_START: %d\n"
"initiator_fabric=%s\ninitiator_node=%s\n%s"
"sa_res_key=%llu\nres_holder=0\n"
- "res_all_tg_pt=%d\nmapped_lun=%u\n",
- reg_count, tpg->se_tpg_tfo->get_fabric_name(),
+ "res_all_tg_pt=%d\nmapped_lun=%llu\n",
+ reg_count, tpg->se_tpg_tfo->fabric_name,
pr_reg->pr_reg_nacl->initiatorname, isid_buf,
pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
pr_reg->pr_res_mapped_lun);
}
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
- pr_err("Unable to update renaming"
- " APTPL metadata\n");
+ pr_err("Unable to update renaming APTPL metadata,"
+ " reallocating larger buffer\n");
ret = -EMSGSIZE;
goto out;
}
@@ -1888,15 +1926,16 @@ static int core_scsi3_update_aptpl_buf(
* Include information about the associated SCSI target port.
*/
snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
- "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:"
- " %d\n", tpg->se_tpg_tfo->get_fabric_name(),
+ "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%llu\nPR_REG_END:"
+ " %d\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_wwn(tpg),
tpg->se_tpg_tfo->tpg_get_tag(tpg),
- lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
+ pr_reg->tg_pt_sep_rtpi, pr_reg->pr_aptpl_target_lun,
+ reg_count);
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
- pr_err("Unable to update renaming"
- " APTPL metadata\n");
+ pr_err("Unable to update renaming APTPL metadata,"
+ " reallocating larger buffer\n");
ret = -EMSGSIZE;
goto out;
}
@@ -1921,35 +1960,34 @@ static int __core_scsi3_write_aptpl_to_file(
struct t10_wwn *wwn = &dev->t10_wwn;
struct file *file;
int flags = O_RDWR | O_CREAT | O_TRUNC;
- char path[512];
+ char *path;
u32 pr_aptpl_buf_len;
int ret;
+ loff_t pos = 0;
- memset(path, 0, 512);
-
- if (strlen(&wwn->unit_serial[0]) >= 512) {
- pr_err("WWN value for struct se_device does not fit"
- " into path buffer\n");
- return -EMSGSIZE;
- }
+ path = kasprintf(GFP_KERNEL, "%s/pr/aptpl_%s", db_root,
+ &wwn->unit_serial[0]);
+ if (!path)
+ return -ENOMEM;
- snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
file = filp_open(path, flags, 0600);
if (IS_ERR(file)) {
pr_err("filp_open(%s) for APTPL metadata"
" failed\n", path);
+ kfree(path);
return PTR_ERR(file);
}
pr_aptpl_buf_len = (strlen(buf) + 1); /* Add extra for NULL */
- ret = kernel_write(file, buf, pr_aptpl_buf_len, 0);
+ ret = kernel_write(file, buf, pr_aptpl_buf_len, &pos);
if (ret < 0)
pr_debug("Error writing APTPL metadata file: %s\n", path);
fput(file);
+ kfree(path);
- return ret ? -EIO : 0;
+ return (ret < 0) ? -EIO : 0;
}
/*
@@ -1959,7 +1997,7 @@ static int __core_scsi3_write_aptpl_to_file(
static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl)
{
unsigned char *buf;
- int rc;
+ int rc, len = PR_APTPL_BUF_LEN;
if (!aptpl) {
char *null_buf = "No Registrations or Reservations\n";
@@ -1973,25 +2011,26 @@ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, b
return 0;
}
-
- buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL);
+retry:
+ buf = vzalloc(len);
if (!buf)
return TCM_OUT_OF_RESOURCES;
- rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN);
+ rc = core_scsi3_update_aptpl_buf(dev, buf, len);
if (rc < 0) {
- kfree(buf);
- return TCM_OUT_OF_RESOURCES;
+ vfree(buf);
+ len *= 2;
+ goto retry;
}
rc = __core_scsi3_write_aptpl_to_file(dev, buf);
if (rc != 0) {
pr_err("SPC-3 PR: Could not update APTPL\n");
- kfree(buf);
+ vfree(buf);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
dev->t10_pr.pr_aptpl_active = 1;
- kfree(buf);
+ vfree(buf);
pr_debug("SPC-3 PR: Set APTPL Bit Activated\n");
return 0;
}
@@ -2002,24 +2041,22 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
{
struct se_session *se_sess = cmd->se_sess;
struct se_device *dev = cmd->se_dev;
- struct se_dev_entry *se_deve;
struct se_lun *se_lun = cmd->se_lun;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
- unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
+ unsigned char isid_buf[PR_REG_ISID_LEN] = { };
+ unsigned char *isid_ptr = NULL;
sense_reason_t ret = TCM_NO_SENSE;
- int pr_holder = 0;
+ int pr_holder = 0, type;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
se_tpg = se_sess->se_tpg;
- se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) {
- memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &isid_buf[0],
PR_REG_ISID_LEN);
isid_ptr = &isid_buf[0];
@@ -2047,12 +2084,13 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
* Logical Unit of the SCSI device server.
*/
if (core_scsi3_alloc_registration(cmd->se_dev,
- se_sess->se_node_acl, se_deve, isid_ptr,
+ se_sess->se_node_acl, cmd->se_lun,
+ NULL, cmd->orig_fe_lun, isid_ptr,
sa_res_key, all_tg_pt, aptpl,
register_type, 0)) {
pr_err("Unable to allocate"
" struct t10_pr_registration\n");
- return TCM_INVALID_PARAMETER_LIST;
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
}
} else {
/*
@@ -2068,7 +2106,6 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
if (ret != 0)
return ret;
}
-
return core_scsi3_update_and_write_aptpl(dev, aptpl);
}
@@ -2116,7 +2153,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
pr_reg->pr_res_key = sa_res_key;
pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
" Key for %s to: 0x%016Lx PRgeneration:"
- " 0x%08x\n", cmd->se_tfo->get_fabric_name(),
+ " 0x%08x\n", cmd->se_tfo->fabric_name,
(register_type == REGISTER_AND_IGNORE_EXISTING_KEY) ? "_AND_IGNORE_EXISTING_KEY" : "",
pr_reg->pr_reg_nacl->initiatorname,
pr_reg->pr_res_key, pr_reg->pr_res_generation);
@@ -2125,8 +2162,9 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
/*
* sa_res_key=0 Unregister Reservation Key for registered I_T Nexus.
*/
- pr_holder = core_scsi3_check_implict_release(
- cmd->se_dev, pr_reg);
+ type = pr_reg->pr_res_type;
+ pr_holder = core_scsi3_check_implicit_release(cmd->se_dev,
+ pr_reg);
if (pr_holder < 0) {
ret = TCM_RESERVATION_CONFLICT;
goto out;
@@ -2161,6 +2199,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
* Release the calling I_T Nexus registration now..
*/
__core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1);
+ pr_reg = NULL;
/*
* From spc4r17, section 5.7.11.3 Unregistering
@@ -2174,13 +2213,13 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
* RESERVATIONS RELEASED.
*/
if (pr_holder &&
- (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
- pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
+ (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
+ type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
list_for_each_entry(pr_reg_p,
&pr_tmpl->registration_list,
pr_reg_list) {
- core_scsi3_ua_allocate(
+ target_ua_allocate_lun(
pr_reg_p->pr_reg_nacl,
pr_reg_p->pr_res_mapped_lun,
0x2A,
@@ -2194,7 +2233,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
ret = core_scsi3_update_and_write_aptpl(dev, aptpl);
out:
- core_scsi3_put_pr_reg(pr_reg);
+ if (pr_reg)
+ core_scsi3_put_pr_reg(pr_reg);
return ret;
}
@@ -2228,11 +2268,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
struct se_lun *se_lun = cmd->se_lun;
struct t10_pr_registration *pr_reg, *pr_res_holder;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
- char i_buf[PR_REG_ISID_ID_LEN];
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
sense_reason_t ret;
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
-
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2296,14 +2334,14 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
* the logical unit, then the command shall be completed with
* RESERVATION CONFLICT status.
*/
- if (pr_res_holder != pr_reg) {
+ if (!is_reservation_holder(pr_res_holder, pr_reg)) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
pr_err("SPC-3 PR: Attempted RESERVE from"
" [%s]: %s while reservation already held by"
" [%s]: %s, returning RESERVATION_CONFLICT\n",
- cmd->se_tfo->get_fabric_name(),
+ cmd->se_tfo->fabric_name,
se_sess->se_node_acl->initiatorname,
- pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ pr_res_nacl->se_tpg->se_tpg_tfo->fabric_name,
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -2324,9 +2362,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
" [%s]: %s trying to change TYPE and/or SCOPE,"
" while reservation already held by [%s]: %s,"
" returning RESERVATION_CONFLICT\n",
- cmd->se_tfo->get_fabric_name(),
+ cmd->se_tfo->fabric_name,
se_sess->se_node_acl->initiatorname,
- pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ pr_res_nacl->se_tpg->se_tpg_tfo->fabric_name,
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -2359,10 +2397,10 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
- cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type),
+ cmd->se_tfo->fabric_name, core_scsi3_pr_dump_type(type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
- cmd->se_tfo->get_fabric_name(),
+ cmd->se_tfo->fabric_name,
se_sess->se_node_acl->initiatorname,
i_buf);
spin_unlock(&dev->dev_reservation_lock);
@@ -2395,32 +2433,66 @@ core_scsi3_emulate_pro_reserve(struct se_cmd *cmd, int type, int scope,
}
}
-/*
- * Called with struct se_device->dev_reservation_lock held.
- */
static void __core_scsi3_complete_pro_release(
struct se_device *dev,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
- int explict)
+ int explicit,
+ int unreg)
{
- struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
- char i_buf[PR_REG_ISID_ID_LEN];
+ const struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
+ int pr_res_type = 0, pr_res_scope = 0;
+
+ lockdep_assert_held(&dev->dev_reservation_lock);
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
/*
* Go ahead and release the current PR reservation holder.
+ * If an All Registrants reservation is currently active and
+ * a unregister operation is requested, replace the current
+ * dev_pr_res_holder with another active registration.
*/
- dev->dev_pr_res_holder = NULL;
+ if (dev->dev_pr_res_holder) {
+ pr_res_type = dev->dev_pr_res_holder->pr_res_type;
+ pr_res_scope = dev->dev_pr_res_holder->pr_res_scope;
+ dev->dev_pr_res_holder->pr_res_type = 0;
+ dev->dev_pr_res_holder->pr_res_scope = 0;
+ dev->dev_pr_res_holder->pr_res_holder = 0;
+ dev->dev_pr_res_holder = NULL;
+ }
+ if (!unreg)
+ goto out;
- pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
- " reservation holder TYPE: %s ALL_TG_PT: %d\n",
- tfo->get_fabric_name(), (explict) ? "explict" : "implict",
- core_scsi3_pr_dump_type(pr_reg->pr_res_type),
- (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+ spin_lock(&dev->t10_pr.registration_lock);
+ list_del_init(&pr_reg->pr_reg_list);
+ /*
+ * If the I_T nexus is a reservation holder, the persistent reservation
+ * is of an all registrants type, and the I_T nexus is the last remaining
+ * registered I_T nexus, then the device server shall also release the
+ * persistent reservation.
+ */
+ if (!list_empty(&dev->t10_pr.registration_list) &&
+ ((pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+ (pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) {
+ dev->dev_pr_res_holder =
+ list_entry(dev->t10_pr.registration_list.next,
+ struct t10_pr_registration, pr_reg_list);
+ dev->dev_pr_res_holder->pr_res_type = pr_res_type;
+ dev->dev_pr_res_holder->pr_res_scope = pr_res_scope;
+ dev->dev_pr_res_holder->pr_res_holder = 1;
+ }
+ spin_unlock(&dev->t10_pr.registration_lock);
+out:
+ if (!dev->dev_pr_res_holder) {
+ pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
+ " reservation holder TYPE: %s ALL_TG_PT: %d\n",
+ tfo->fabric_name, (explicit) ? "explicit" :
+ "implicit", core_scsi3_pr_dump_type(pr_res_type),
+ (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+ }
pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
- tfo->get_fabric_name(), se_nacl->initiatorname,
+ tfo->fabric_name, se_nacl->initiatorname,
i_buf);
/*
* Clear TYPE and SCOPE for the next PROUT Service Action: RESERVE
@@ -2437,7 +2509,6 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
struct se_lun *se_lun = cmd->se_lun;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
- int all_reg = 0;
sense_reason_t ret = 0;
if (!se_sess || !se_lun) {
@@ -2474,13 +2545,9 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
spin_unlock(&dev->dev_reservation_lock);
goto out_put_pr_reg;
}
- if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
- (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
- all_reg = 1;
- if ((all_reg == 0) && (pr_res_holder != pr_reg)) {
+ if (!is_reservation_holder(pr_res_holder, pr_reg)) {
/*
- * Non 'All Registrants' PR Type cases..
* Release request from a registered I_T nexus that is not a
* persistent reservation holder. return GOOD status.
*/
@@ -2523,9 +2590,9 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
" reservation from [%s]: %s with different TYPE "
"and/or SCOPE while reservation already held by"
" [%s]: %s, returning RESERVATION_CONFLICT\n",
- cmd->se_tfo->get_fabric_name(),
+ cmd->se_tfo->fabric_name,
se_sess->se_node_acl->initiatorname,
- pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ pr_res_nacl->se_tpg->se_tpg_tfo->fabric_name,
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -2549,7 +2616,7 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
* server shall not establish a unit attention condition.
*/
__core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
- pr_reg, 1);
+ pr_reg, 1, 0);
spin_unlock(&dev->dev_reservation_lock);
@@ -2575,7 +2642,7 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
if (pr_reg_p == pr_reg)
continue;
- core_scsi3_ua_allocate(pr_reg_p->pr_reg_nacl,
+ target_ua_allocate_lun(pr_reg_p->pr_reg_nacl,
pr_reg_p->pr_res_mapped_lun,
0x2A, ASCQ_2AH_RESERVATIONS_RELEASED);
}
@@ -2598,7 +2665,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
struct se_session *se_sess = cmd->se_sess;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
- u32 pr_res_mapped_lun = 0;
+ u64 pr_res_mapped_lun = 0;
int calling_it_nexus = 0;
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2637,7 +2704,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
if (pr_res_holder) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
__core_scsi3_complete_pro_release(dev, pr_res_nacl,
- pr_res_holder, 0);
+ pr_res_holder, 0, 0);
}
spin_unlock(&dev->dev_reservation_lock);
/*
@@ -2660,13 +2727,13 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
* additional sense code set to RESERVATIONS PREEMPTED.
*/
if (!calling_it_nexus)
- core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun,
+ target_ua_allocate_lun(pr_reg_nacl, pr_res_mapped_lun,
0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
}
spin_unlock(&pr_tmpl->registration_lock);
pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n",
- cmd->se_tfo->get_fabric_name());
+ cmd->se_tfo->fabric_name);
core_scsi3_update_and_write_aptpl(cmd->se_dev, false);
@@ -2674,9 +2741,6 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
return 0;
}
-/*
- * Called with struct se_device->dev_reservation_lock held.
- */
static void __core_scsi3_complete_pro_preempt(
struct se_device *dev,
struct t10_pr_registration *pr_reg,
@@ -2686,17 +2750,18 @@ static void __core_scsi3_complete_pro_preempt(
enum preempt_type preempt_type)
{
struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
- struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
- char i_buf[PR_REG_ISID_ID_LEN];
+ const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+ char i_buf[PR_REG_ISID_ID_LEN] = { };
+
+ lockdep_assert_held(&dev->dev_reservation_lock);
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
/*
- * Do an implict RELEASE of the existing reservation.
+ * Do an implicit RELEASE of the existing reservation.
*/
if (dev->dev_pr_res_holder)
__core_scsi3_complete_pro_release(dev, nacl,
- dev->dev_pr_res_holder, 0);
+ dev->dev_pr_res_holder, 0, 0);
dev->dev_pr_res_holder = pr_reg;
pr_reg->pr_res_holder = 1;
@@ -2705,11 +2770,11 @@ static void __core_scsi3_complete_pro_preempt(
pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
- tfo->get_fabric_name(), (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "",
+ tfo->fabric_name, (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "",
core_scsi3_pr_dump_type(type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
- tfo->get_fabric_name(), (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "",
+ tfo->fabric_name, (preempt_type == PREEMPT_AND_ABORT) ? "_AND_ABORT" : "",
nacl->initiatorname, i_buf);
/*
* For PREEMPT_AND_ABORT, add the preempting reservation's
@@ -2754,8 +2819,9 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
LIST_HEAD(preempt_and_abort_list);
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
- u32 pr_res_mapped_lun = 0;
- int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
+ u64 pr_res_mapped_lun = 0;
+ int all_reg = 0, calling_it_nexus = 0;
+ bool sa_res_key_unmatched = sa_res_key != 0;
int prh_type = 0, prh_scope = 0;
if (!se_sess)
@@ -2830,6 +2896,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
if (!all_reg) {
if (pr_reg->pr_res_key != sa_res_key)
continue;
+ sa_res_key_unmatched = false;
calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
pr_reg_nacl = pr_reg->pr_reg_nacl;
@@ -2837,7 +2904,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
__core_scsi3_free_registration(dev, pr_reg,
(preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list :
NULL, calling_it_nexus);
- released_regs++;
} else {
/*
* Case for any existing all registrants type
@@ -2845,7 +2911,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
* 5.7.11.4 Preempting, Table 52 and Figure 7.
*
* For a ZERO SA Reservation key, release
- * all other registrations and do an implict
+ * all other registrations and do an implicit
* release of active persistent reservation.
*
* For a non-ZERO SA Reservation key, only
@@ -2855,6 +2921,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
if ((sa_res_key) &&
(pr_reg->pr_res_key != sa_res_key))
continue;
+ sa_res_key_unmatched = false;
calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
if (calling_it_nexus)
@@ -2865,10 +2932,9 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
__core_scsi3_free_registration(dev, pr_reg,
(preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list :
NULL, 0);
- released_regs++;
}
if (!calling_it_nexus)
- core_scsi3_ua_allocate(pr_reg_nacl,
+ target_ua_allocate_lun(pr_reg_nacl,
pr_res_mapped_lun, 0x2A,
ASCQ_2AH_REGISTRATIONS_PREEMPTED);
}
@@ -2880,7 +2946,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
* registered reservation key, then the device server shall
* complete the command with RESERVATION CONFLICT status.
*/
- if (!released_regs) {
+ if (sa_res_key_unmatched) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
return TCM_RESERVATION_CONFLICT;
@@ -2894,13 +2960,28 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
__core_scsi3_complete_pro_preempt(dev, pr_reg_n,
(preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL,
type, scope, preempt_type);
-
- if (preempt_type == PREEMPT_AND_ABORT)
- core_scsi3_release_preempt_and_abort(
- &preempt_and_abort_list, pr_reg_n);
}
+
spin_unlock(&dev->dev_reservation_lock);
+ /*
+ * SPC-4 5.12.11.2.6 Preempting and aborting
+ * The actions described in this subclause shall be performed
+ * for all I_T nexuses that are registered with the non-zero
+ * SERVICE ACTION RESERVATION KEY value, without regard for
+ * whether the preempted I_T nexuses hold the persistent
+ * reservation. If the SERVICE ACTION RESERVATION KEY field is
+ * set to zero and an all registrants persistent reservation is
+ * present, the device server shall abort all commands for all
+ * registered I_T nexuses.
+ */
+ if (preempt_type == PREEMPT_AND_ABORT) {
+ core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list,
+ cmd);
+ core_scsi3_release_preempt_and_abort(
+ &preempt_and_abort_list, pr_reg_n);
+ }
+
if (pr_tmpl->pr_aptpl_active)
core_scsi3_update_and_write_aptpl(cmd->se_dev, true);
@@ -2940,8 +3021,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
*/
if (pr_reg_n != pr_res_holder)
__core_scsi3_complete_pro_release(dev,
- pr_res_holder->pr_reg_nacl,
- dev->dev_pr_res_holder, 0);
+ pr_res_holder->pr_reg_nacl,
+ dev->dev_pr_res_holder, 0, 0);
/*
* b) Remove the registrations for all I_T nexuses identified
* by the SERVICE ACTION RESERVATION KEY field, except the
@@ -2960,7 +3041,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
if (calling_it_nexus)
continue;
- if (pr_reg->pr_res_key != sa_res_key)
+ if (sa_res_key && pr_reg->pr_res_key != sa_res_key)
continue;
pr_reg_nacl = pr_reg->pr_reg_nacl;
@@ -2974,7 +3055,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
* persistent reservation and/or registration, with the
* additional sense code set to REGISTRATIONS PREEMPTED;
*/
- core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
+ target_ua_allocate_lun(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
ASCQ_2AH_REGISTRATIONS_PREEMPTED);
}
spin_unlock(&pr_tmpl->registration_lock);
@@ -3007,7 +3088,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
if (calling_it_nexus)
continue;
- core_scsi3_ua_allocate(pr_reg->pr_reg_nacl,
+ target_ua_allocate_lun(pr_reg->pr_reg_nacl,
pr_reg->pr_res_mapped_lun, 0x2A,
ASCQ_2AH_RESERVATIONS_RELEASED);
}
@@ -3066,29 +3147,27 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
struct se_session *se_sess = cmd->se_sess;
struct se_device *dev = cmd->se_dev;
struct se_dev_entry *dest_se_deve = NULL;
- struct se_lun *se_lun = cmd->se_lun;
+ struct se_lun *se_lun = cmd->se_lun, *tmp_lun;
struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
- struct se_port *se_port;
struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
- struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
+ const struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
- unsigned char *initiator_str;
- char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+ unsigned char initiator_str[TRANSPORT_IQN_LEN];
+ char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN] = { };
u32 tid_len, tmp_tid_len;
int new_reg = 0, type, scope, matching_iname;
sense_reason_t ret;
unsigned short rtpi;
unsigned char proto_ident;
+ bool tid_found;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- memset(dest_iport, 0, 64);
- memset(i_buf, 0, PR_REG_ISID_ID_LEN);
se_tpg = se_sess->se_tpg;
tf_ops = se_tpg->se_tpg_tfo;
/*
@@ -3132,16 +3211,12 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
*/
buf = transport_kmap_data_sg(cmd);
if (!buf) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out_put_pr_reg;
}
- rtpi = (buf[18] & 0xff) << 8;
- rtpi |= buf[19] & 0xff;
- tid_len = (buf[20] & 0xff) << 24;
- tid_len |= (buf[21] & 0xff) << 16;
- tid_len |= (buf[22] & 0xff) << 8;
- tid_len |= buf[23] & 0xff;
+ rtpi = get_unaligned_be16(&buf[18]);
+ tid_len = get_unaligned_be32(&buf[20]);
transport_kunmap_data_sg(cmd);
buf = NULL;
@@ -3154,25 +3229,21 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
}
spin_lock(&dev->se_port_lock);
- list_for_each_entry(se_port, &dev->dev_sep_list, sep_list) {
- if (se_port->sep_rtpi != rtpi)
- continue;
- dest_se_tpg = se_port->sep_tpg;
- if (!dest_se_tpg)
+ list_for_each_entry(tmp_lun, &dev->dev_sep_list, lun_dev_link) {
+ if (tmp_lun->lun_tpg->tpg_rtpi != rtpi)
continue;
+ dest_se_tpg = tmp_lun->lun_tpg;
dest_tf_ops = dest_se_tpg->se_tpg_tfo;
if (!dest_tf_ops)
continue;
- atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
- smp_mb__after_atomic_inc();
+ atomic_inc_mb(&dest_se_tpg->tpg_pr_ref_count);
spin_unlock(&dev->se_port_lock);
if (core_scsi3_tpg_depend_item(dest_se_tpg)) {
pr_err("core_scsi3_tpg_depend_item() failed"
" for dest_se_tpg\n");
- atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
- smp_mb__after_atomic_dec();
+ atomic_dec_mb(&dest_se_tpg->tpg_pr_ref_count);
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out_put_pr_reg;
}
@@ -3192,7 +3263,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
buf = transport_kmap_data_sg(cmd);
if (!buf) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out_put_pr_reg;
}
proto_ident = (buf[24] & 0x0f);
@@ -3200,25 +3271,18 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
" 0x%02x\n", proto_ident);
- if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
+ if (proto_ident != dest_se_tpg->proto_id) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Received"
" proto_ident: 0x%02x does not match ident: 0x%02x"
" from fabric: %s\n", proto_ident,
- dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
- dest_tf_ops->get_fabric_name());
+ dest_se_tpg->proto_id,
+ dest_tf_ops->fabric_name);
ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
- if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
- pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
- " containg a valid tpg_parse_pr_out_transport_id"
- " function pointer\n");
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- goto out;
- }
- initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
- (const char *)&buf[24], &tmp_tid_len, &iport_ptr);
- if (!initiator_str) {
+ tid_found = target_parse_pr_out_transport_id(dest_se_tpg,
+ &buf[24], &tmp_tid_len, &iport_ptr, initiator_str);
+ if (!tid_found) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" initiator_str from Transport ID\n");
ret = TCM_INVALID_PARAMETER_LIST;
@@ -3229,7 +3293,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
buf = NULL;
pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s"
- " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ?
+ " %s\n", dest_tf_ops->fabric_name, (iport_ptr != NULL) ?
"port" : "device", initiator_str, (iport_ptr != NULL) ?
iport_ptr : "");
/*
@@ -3265,18 +3329,16 @@ after_iport_check:
/*
* Locate the destination struct se_node_acl from the received Transport ID
*/
- spin_lock_irq(&dest_se_tpg->acl_node_lock);
+ mutex_lock(&dest_se_tpg->acl_node_mutex);
dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
initiator_str);
- if (dest_node_acl) {
- atomic_inc(&dest_node_acl->acl_pr_ref_count);
- smp_mb__after_atomic_inc();
- }
- spin_unlock_irq(&dest_se_tpg->acl_node_lock);
+ if (dest_node_acl)
+ atomic_inc_mb(&dest_node_acl->acl_pr_ref_count);
+ mutex_unlock(&dest_se_tpg->acl_node_mutex);
if (!dest_node_acl) {
pr_err("Unable to locate %s dest_node_acl for"
- " TransportID%s\n", dest_tf_ops->get_fabric_name(),
+ " TransportID%s\n", dest_tf_ops->fabric_name,
initiator_str);
ret = TCM_INVALID_PARAMETER_LIST;
goto out;
@@ -3285,15 +3347,14 @@ after_iport_check:
if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
pr_err("core_scsi3_nodeacl_depend_item() for"
" dest_node_acl\n");
- atomic_dec(&dest_node_acl->acl_pr_ref_count);
- smp_mb__after_atomic_dec();
+ atomic_dec_mb(&dest_node_acl->acl_pr_ref_count);
dest_node_acl = NULL;
ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
- " %s from TransportID\n", dest_tf_ops->get_fabric_name(),
+ " %s from TransportID\n", dest_tf_ops->fabric_name,
dest_node_acl->initiatorname);
/*
@@ -3303,23 +3364,22 @@ after_iport_check:
dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
if (!dest_se_deve) {
pr_err("Unable to locate %s dest_se_deve from RTPI:"
- " %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
+ " %hu\n", dest_tf_ops->fabric_name, rtpi);
ret = TCM_INVALID_PARAMETER_LIST;
goto out;
}
if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
pr_err("core_scsi3_lunacl_depend_item() failed\n");
- atomic_dec(&dest_se_deve->pr_ref_count);
- smp_mb__after_atomic_dec();
+ kref_put(&dest_se_deve->pr_kref, target_pr_kref_release);
dest_se_deve = NULL;
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out;
}
pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
- " ACL for dest_se_deve->mapped_lun: %u\n",
- dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
+ " ACL for dest_se_deve->mapped_lun: %llu\n",
+ dest_tf_ops->fabric_name, dest_node_acl->initiatorname,
dest_se_deve->mapped_lun);
/*
@@ -3341,7 +3401,7 @@ after_iport_check:
* From spc4r17 section 5.7.8 Table 50 --
* Register behaviors for a REGISTER AND MOVE service action
*/
- if (pr_res_holder != pr_reg) {
+ if (!is_reservation_holder(pr_res_holder, pr_reg)) {
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
" Nexus is not reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
@@ -3385,8 +3445,6 @@ after_iport_check:
* transport protocols where port names are not required;
* d) Register the reservation key specified in the SERVICE ACTION
* RESERVATION KEY field;
- * e) Retain the reservation key specified in the SERVICE ACTION
- * RESERVATION KEY field and associated information;
*
* Also, It is not an error for a REGISTER AND MOVE service action to
* register an I_T nexus that is already registered with the same
@@ -3395,23 +3453,32 @@ after_iport_check:
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
iport_ptr);
if (!dest_pr_reg) {
- if (core_scsi3_alloc_registration(cmd->se_dev,
- dest_node_acl, dest_se_deve, iport_ptr,
- sa_res_key, 0, aptpl, 2, 1)) {
- spin_unlock(&dev->dev_reservation_lock);
- ret = TCM_INVALID_PARAMETER_LIST;
+ struct se_lun *dest_lun = dest_se_deve->se_lun;
+
+ spin_unlock(&dev->dev_reservation_lock);
+ if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
+ dest_lun, dest_se_deve, dest_se_deve->mapped_lun,
+ iport_ptr, sa_res_key, 0, aptpl, 2, 1)) {
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out;
}
+ spin_lock(&dev->dev_reservation_lock);
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
iport_ptr);
new_reg = 1;
+ } else {
+ /*
+ * e) Retain the reservation key specified in the SERVICE ACTION
+ * RESERVATION KEY field and associated information;
+ */
+ dest_pr_reg->pr_res_key = sa_res_key;
}
/*
* f) Release the persistent reservation for the persistent reservation
* holder (i.e., the I_T nexus on which the
*/
__core_scsi3_complete_pro_release(dev, pr_res_nacl,
- dev->dev_pr_res_holder, 0);
+ dev->dev_pr_res_holder, 0, 0);
/*
* g) Move the persistent reservation to the specified I_T nexus using
* the same scope and type as the persistent reservation released in
@@ -3431,13 +3498,13 @@ after_iport_check:
pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
" created new reservation holder TYPE: %s on object RTPI:"
- " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(),
+ " %hu PRGeneration: 0x%08x\n", dest_tf_ops->fabric_name,
core_scsi3_pr_dump_type(type), rtpi,
dest_pr_reg->pr_res_generation);
pr_debug("SPC-3 PR Successfully moved reservation from"
" %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
- tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
- i_buf, dest_tf_ops->get_fabric_name(),
+ tf_ops->fabric_name, pr_reg_nacl->initiatorname,
+ i_buf, dest_tf_ops->fabric_name,
dest_node_acl->initiatorname, (iport_ptr != NULL) ?
iport_ptr : "");
/*
@@ -3460,8 +3527,6 @@ after_iport_check:
core_scsi3_update_and_write_aptpl(cmd->se_dev, aptpl);
- transport_kunmap_data_sg(cmd);
-
core_scsi3_put_pr_reg(dest_pr_reg);
return 0;
out:
@@ -3478,14 +3543,35 @@ out_put_pr_reg:
return ret;
}
-static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
+static sense_reason_t
+target_try_pr_out_pt(struct se_cmd *cmd, u8 sa, u64 res_key, u64 sa_res_key,
+ u8 type, bool aptpl, bool all_tg_pt, bool spec_i_pt)
{
- unsigned int __v1, __v2;
+ struct exec_cmd_ops *ops = cmd->protocol_data;
+
+ if (!cmd->se_sess || !cmd->se_lun) {
+ pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ if (!ops->execute_pr_out) {
+ pr_err("SPC-3 PR: Device has been configured for PR passthrough but it's not supported by the backend.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
- __v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3];
- __v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7];
+ switch (sa) {
+ case PRO_REGISTER_AND_MOVE:
+ case PRO_REPLACE_LOST_RESERVATION:
+ pr_err("SPC-3 PR: PRO_REGISTER_AND_MOVE and PRO_REPLACE_LOST_RESERVATION are not supported by PR passthrough.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
- return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+ if (spec_i_pt || all_tg_pt) {
+ pr_err("SPC-3 PR: SPEC_I_PT and ALL_TG_PT are not supported by PR passthrough.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ return ops->execute_pr_out(cmd, sa, res_key, sa_res_key, type, aptpl);
}
/*
@@ -3494,6 +3580,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
sense_reason_t
target_scsi3_emulate_pr_out(struct se_cmd *cmd)
{
+ struct se_device *dev = cmd->se_dev;
unsigned char *cdb = &cmd->t_task_cdb[0];
unsigned char *buf;
u64 res_key, sa_res_key;
@@ -3527,7 +3614,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
if (cmd->data_length < 24) {
pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
- return TCM_INVALID_PARAMETER_LIST;
+ return TCM_PARAMETER_LIST_LENGTH_ERROR;
}
/*
@@ -3544,8 +3631,8 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
/*
* From PERSISTENT_RESERVE_OUT parameter list (payload)
*/
- res_key = core_scsi3_extract_reservation_key(&buf[0]);
- sa_res_key = core_scsi3_extract_reservation_key(&buf[8]);
+ res_key = get_unaligned_be64(&buf[0]);
+ sa_res_key = get_unaligned_be64(&buf[8]);
/*
* REGISTER_AND_MOVE uses a different SA parameter list containing
* SCSI TransportIDs.
@@ -3558,13 +3645,20 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
aptpl = (buf[17] & 0x01);
unreg = (buf[17] & 0x02);
}
+ /*
+ * If the backend device has been configured to force APTPL metadata
+ * write-out, go ahead and propigate aptpl=1 down now.
+ */
+ if (dev->dev_attrib.force_pr_aptpl)
+ aptpl = 1;
+
transport_kunmap_data_sg(cmd);
buf = NULL;
/*
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
- if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER))
+ if (spec_i_pt && (sa != PRO_REGISTER))
return TCM_INVALID_PARAMETER_LIST;
/*
@@ -3576,11 +3670,17 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
* the sense key set to ILLEGAL REQUEST, and the additional sense
* code set to PARAMETER LIST LENGTH ERROR.
*/
- if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
+ if (!spec_i_pt && (sa != PRO_REGISTER_AND_MOVE) &&
(cmd->data_length != 24)) {
pr_warn("SPC-PR: Received PR OUT illegal parameter"
" list length: %u\n", cmd->data_length);
- return TCM_INVALID_PARAMETER_LIST;
+ return TCM_PARAMETER_LIST_LENGTH_ERROR;
+ }
+
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) {
+ ret = target_try_pr_out_pt(cmd, sa, res_key, sa_res_key, type,
+ aptpl, all_tg_pt, spec_i_pt);
+ goto done;
}
/*
@@ -3620,12 +3720,13 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
break;
default:
pr_err("Unknown PERSISTENT_RESERVE_OUT service"
- " action: 0x%02x\n", cdb[1] & 0x1f);
+ " action: 0x%02x\n", sa);
return TCM_INVALID_CDB_FIELD;
}
+done:
if (!ret)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
}
@@ -3652,10 +3753,7 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
- buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
- buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
- buf[3] = (dev->t10_pr.pr_generation & 0xff);
+ put_unaligned_be32(dev->t10_pr.pr_generation, buf);
spin_lock(&dev->t10_pr.registration_lock);
list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
@@ -3664,26 +3762,22 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
* Check for overflow of 8byte PRI READ_KEYS payload and
* next reservation key list descriptor.
*/
- if ((add_len + 8) > (cmd->data_length - 8))
- break;
-
- buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
- buf[off++] = (pr_reg->pr_res_key & 0xff);
-
+ if (off + 8 <= cmd->data_length) {
+ put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
+ off += 8;
+ }
+ /*
+ * SPC5r17: 6.16.2 READ KEYS service action
+ * The ADDITIONAL LENGTH field indicates the number of bytes in
+ * the Reservation key list. The contents of the ADDITIONAL
+ * LENGTH field are not altered based on the allocation length
+ */
add_len += 8;
}
spin_unlock(&dev->t10_pr.registration_lock);
- buf[4] = ((add_len >> 24) & 0xff);
- buf[5] = ((add_len >> 16) & 0xff);
- buf[6] = ((add_len >> 8) & 0xff);
- buf[7] = (add_len & 0xff);
+ put_unaligned_be32(add_len, &buf[4]);
+ target_set_cmd_data_length(cmd, 8 + add_len);
transport_kunmap_data_sg(cmd);
@@ -3702,7 +3796,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
struct t10_pr_registration *pr_reg;
unsigned char *buf;
u64 pr_res_key;
- u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
+ u32 add_len = 0;
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
@@ -3714,21 +3808,16 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
- buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
- buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
- buf[3] = (dev->t10_pr.pr_generation & 0xff);
+ put_unaligned_be32(dev->t10_pr.pr_generation, &buf[0]);
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
if (pr_reg) {
/*
- * Set the hardcoded Additional Length
+ * Set the Additional Length to 16 when a reservation is held
*/
- buf[4] = ((add_len >> 24) & 0xff);
- buf[5] = ((add_len >> 16) & 0xff);
- buf[6] = ((add_len >> 8) & 0xff);
- buf[7] = (add_len & 0xff);
+ add_len = 16;
+ put_unaligned_be32(add_len, &buf[4]);
if (cmd->data_length < 22)
goto err;
@@ -3755,14 +3844,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
else
pr_res_key = pr_reg->pr_res_key;
- buf[8] = ((pr_res_key >> 56) & 0xff);
- buf[9] = ((pr_res_key >> 48) & 0xff);
- buf[10] = ((pr_res_key >> 40) & 0xff);
- buf[11] = ((pr_res_key >> 32) & 0xff);
- buf[12] = ((pr_res_key >> 24) & 0xff);
- buf[13] = ((pr_res_key >> 16) & 0xff);
- buf[14] = ((pr_res_key >> 8) & 0xff);
- buf[15] = (pr_res_key & 0xff);
+ put_unaligned_be64(pr_res_key, &buf[8]);
/*
* Set the SCOPE and TYPE
*/
@@ -3770,6 +3852,8 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
(pr_reg->pr_res_type & 0x0f);
}
+ target_set_cmd_data_length(cmd, 8 + add_len);
+
err:
spin_unlock(&dev->dev_reservation_lock);
transport_kunmap_data_sg(cmd);
@@ -3788,7 +3872,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
struct se_device *dev = cmd->se_dev;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
- u16 add_len = 8; /* Hardcoded to 8. */
+ u16 len = 8; /* Hardcoded to 8. */
if (cmd->data_length < 6) {
pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
@@ -3800,8 +3884,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- buf[0] = ((add_len << 8) & 0xff);
- buf[1] = (add_len & 0xff);
+ put_unaligned_be16(len, &buf[0]);
buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
@@ -3830,6 +3913,8 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+ target_set_cmd_data_length(cmd, len);
+
transport_kunmap_data_sg(cmd);
return 0;
@@ -3849,9 +3934,11 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
- u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
+ u32 add_desc_len = 0, add_len = 0;
u32 off = 8; /* off into first Full Status descriptor */
- int format_code = 0;
+ int format_code = 0, pr_res_type = 0, pr_res_scope = 0;
+ int exp_desc_len, desc_len;
+ bool all_reg = false;
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
@@ -3863,10 +3950,20 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
if (!buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
- buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
- buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
- buf[3] = (dev->t10_pr.pr_generation & 0xff);
+ put_unaligned_be32(dev->t10_pr.pr_generation, &buf[0]);
+
+ spin_lock(&dev->dev_reservation_lock);
+ if (dev->dev_pr_res_holder) {
+ struct t10_pr_registration *pr_holder = dev->dev_pr_res_holder;
+
+ if (pr_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG ||
+ pr_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG) {
+ all_reg = true;
+ pr_res_type = pr_holder->pr_res_type;
+ pr_res_scope = pr_holder->pr_res_scope;
+ }
+ }
+ spin_unlock(&dev->dev_reservation_lock);
spin_lock(&pr_tmpl->registration_lock);
list_for_each_entry_safe(pr_reg, pr_reg_tmp,
@@ -3876,35 +3973,27 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
se_tpg = pr_reg->pr_reg_nacl->se_tpg;
add_desc_len = 0;
- atomic_inc(&pr_reg->pr_res_holders);
- smp_mb__after_atomic_inc();
+ atomic_inc_mb(&pr_reg->pr_res_holders);
spin_unlock(&pr_tmpl->registration_lock);
/*
* Determine expected length of $FABRIC_MOD specific
* TransportID full status descriptor..
*/
- exp_desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id_len(
- se_tpg, se_nacl, pr_reg, &format_code);
-
- if ((exp_desc_len + add_len) > cmd->data_length) {
+ exp_desc_len = target_get_pr_transport_id_len(se_nacl, pr_reg,
+ &format_code);
+ if (exp_desc_len < 0 ||
+ exp_desc_len + add_len > cmd->data_length) {
pr_warn("SPC-3 PRIN READ_FULL_STATUS ran"
" out of buffer: %d\n", cmd->data_length);
spin_lock(&pr_tmpl->registration_lock);
- atomic_dec(&pr_reg->pr_res_holders);
- smp_mb__after_atomic_dec();
+ atomic_dec_mb(&pr_reg->pr_res_holders);
break;
}
/*
* Set RESERVATION KEY
*/
- buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
- buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
- buf[off++] = (pr_reg->pr_res_key & 0xff);
+ put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
+ off += 8;
off += 4; /* Skip Over Reserved area */
/*
@@ -3917,14 +4006,20 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
* reservation holder for PR_HOLDER bit.
*
* Also, if this registration is the reservation
- * holder, fill in SCOPE and TYPE in the next byte.
+ * holder or there is an All Registrants reservation
+ * active, fill in SCOPE and TYPE in the next byte.
*/
if (pr_reg->pr_res_holder) {
buf[off++] |= 0x01;
buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
(pr_reg->pr_res_type & 0x0f);
- } else
+ } else if (all_reg) {
+ buf[off++] |= 0x01;
+ buf[off++] = (pr_res_scope & 0xf0) |
+ (pr_res_type & 0x0f);
+ } else {
off += 2;
+ }
off += 4; /* Skip over reserved area */
/*
@@ -3938,29 +4033,31 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
* IDENTIFIER field are not defined by this standard.
*/
if (!pr_reg->pr_reg_all_tg_pt) {
- struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep;
+ u16 sep_rtpi = pr_reg->tg_pt_sep_rtpi;
- buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
- buf[off++] = (port->sep_rtpi & 0xff);
+ put_unaligned_be16(sep_rtpi, &buf[off]);
+ off += 2;
} else
off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFIER */
+ buf[off+4] = se_tpg->proto_id;
+
/*
- * Now, have the $FABRIC_MOD fill in the protocol identifier
+ * Now, have the $FABRIC_MOD fill in the transport ID.
*/
- desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id(se_tpg,
- se_nacl, pr_reg, &format_code, &buf[off+4]);
+ desc_len = target_get_pr_transport_id(se_nacl, pr_reg,
+ &format_code, &buf[off+4]);
spin_lock(&pr_tmpl->registration_lock);
- atomic_dec(&pr_reg->pr_res_holders);
- smp_mb__after_atomic_dec();
+ atomic_dec_mb(&pr_reg->pr_res_holders);
+
+ if (desc_len < 0)
+ break;
/*
* Set the ADDITIONAL DESCRIPTOR LENGTH
*/
- buf[off++] = ((desc_len >> 24) & 0xff);
- buf[off++] = ((desc_len >> 16) & 0xff);
- buf[off++] = ((desc_len >> 8) & 0xff);
- buf[off++] = (desc_len & 0xff);
+ put_unaligned_be32(desc_len, &buf[off]);
+ off += 4;
/*
* Size of full desctipor header minus TransportID
* containing $FABRIC_MOD specific) initiator device/port
@@ -3977,19 +4074,50 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
/*
* Set ADDITIONAL_LENGTH
*/
- buf[4] = ((add_len >> 24) & 0xff);
- buf[5] = ((add_len >> 16) & 0xff);
- buf[6] = ((add_len >> 8) & 0xff);
- buf[7] = (add_len & 0xff);
+ put_unaligned_be32(add_len, &buf[4]);
+ target_set_cmd_data_length(cmd, 8 + add_len);
transport_kunmap_data_sg(cmd);
return 0;
}
+static sense_reason_t target_try_pr_in_pt(struct se_cmd *cmd, u8 sa)
+{
+ struct exec_cmd_ops *ops = cmd->protocol_data;
+ unsigned char *buf;
+ sense_reason_t ret;
+
+ if (cmd->data_length < 8) {
+ pr_err("PRIN SA SCSI Data Length: %u too small\n",
+ cmd->data_length);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ if (!ops->execute_pr_in) {
+ pr_err("SPC-3 PR: Device has been configured for PR passthrough but it's not supported by the backend.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (sa == PRI_READ_FULL_STATUS) {
+ pr_err("SPC-3 PR: PRI_READ_FULL_STATUS is not supported by PR passthrough.\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ buf = transport_kmap_data_sg(cmd);
+ if (!buf)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ ret = ops->execute_pr_in(cmd, sa, buf);
+
+ transport_kunmap_data_sg(cmd);
+ return ret;
+}
+
sense_reason_t
target_scsi3_emulate_pr_in(struct se_cmd *cmd)
{
+ u8 sa = cmd->t_task_cdb[1] & 0x1f;
sense_reason_t ret;
/*
@@ -4008,7 +4136,12 @@ target_scsi3_emulate_pr_in(struct se_cmd *cmd)
return TCM_RESERVATION_CONFLICT;
}
- switch (cmd->t_task_cdb[1] & 0x1f) {
+ if (cmd->se_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) {
+ ret = target_try_pr_in_pt(cmd, sa);
+ goto done;
+ }
+
+ switch (sa) {
case PRI_READ_KEYS:
ret = core_scsi3_pri_read_keys(cmd);
break;
@@ -4027,8 +4160,9 @@ target_scsi3_emulate_pr_in(struct se_cmd *cmd)
return TCM_INVALID_CDB_FIELD;
}
+done:
if (!ret)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
}
@@ -4042,7 +4176,9 @@ target_check_reservation(struct se_cmd *cmd)
return 0;
if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
return 0;
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ if (!dev->dev_attrib.emulate_pr)
+ return 0;
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
return 0;
spin_lock(&dev->dev_reservation_lock);
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index ed75cdd32cb0..b793c99637ab 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -1,9 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef TARGET_CORE_PR_H
#define TARGET_CORE_PR_H
+
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
/*
* PERSISTENT_RESERVE_OUT service action codes
*
- * spc4r17 section 6.14.2 Table 171
+ * spc5r04b section 6.15.2 Table 174
*/
#define PRO_REGISTER 0x00
#define PRO_RESERVE 0x01
@@ -13,10 +18,11 @@
#define PRO_PREEMPT_AND_ABORT 0x05
#define PRO_REGISTER_AND_IGNORE_EXISTING_KEY 0x06
#define PRO_REGISTER_AND_MOVE 0x07
+#define PRO_REPLACE_LOST_RESERVATION 0x08
/*
* PERSISTENT_RESERVE_IN service action codes
*
- * spc4r17 section 6.13.1 Table 159
+ * spc5r04b section 6.14.1 Table 162
*/
#define PRI_READ_KEYS 0x00
#define PRI_READ_RESERVATION 0x01
@@ -25,13 +31,13 @@
/*
* PERSISTENT_RESERVE_ SCOPE field
*
- * spc4r17 section 6.13.3.3 Table 163
+ * spc5r04b section 6.14.3.2 Table 166
*/
#define PR_SCOPE_LU_SCOPE 0x00
/*
* PERSISTENT_RESERVE_* TYPE field
*
- * spc4r17 section 6.13.3.4 Table 164
+ * spc5r04b section 6.14.3.3 Table 167
*/
#define PR_TYPE_WRITE_EXCLUSIVE 0x01
#define PR_TYPE_EXCLUSIVE_ACCESS 0x03
@@ -43,19 +49,25 @@
#define PR_APTPL_MAX_IPORT_LEN 256
#define PR_APTPL_MAX_TPORT_LEN 256
+/*
+ * Function defined in target_core_spc.c
+ */
+void spc_gen_naa_6h_vendor_specific(struct se_device *, unsigned char *);
+
extern struct kmem_cache *t10_pr_reg_cache;
extern void core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32);
+extern void target_release_reservation(struct se_device *dev);
extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *);
extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration(
struct t10_reservation *, u64,
- unsigned char *, unsigned char *, u32,
- unsigned char *, u16, u32, int, int, u8);
+ unsigned char *, unsigned char *, u64,
+ unsigned char *, u16, u64, int, int, u8);
extern int core_scsi3_check_aptpl_registration(struct se_device *,
struct se_portal_group *, struct se_lun *,
- struct se_lun_acl *);
+ struct se_node_acl *, u64);
extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
struct se_node_acl *);
extern void core_scsi3_free_all_registrations(struct se_device *);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index e992b27aa090..db4e09042469 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1,26 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_pscsi.c
*
* This file contains the generic target mode <-> Linux SCSI subsystem plugin.
*
- * (c) Copyright 2003-2012 RisingTide Systems LLC.
+ * (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#include <linux/string.h>
@@ -30,15 +17,12 @@
#include <linux/blk_types.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/genhd.h>
#include <linux/cdrom.h>
#include <linux/ratelimit.h>
#include <linux/module.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
-#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
-#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
@@ -46,19 +30,16 @@
#include <target/target_core_backend.h>
#include "target_core_alua.h"
+#include "target_core_internal.h"
#include "target_core_pscsi.h"
-#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
-
static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
{
return container_of(dev, struct pscsi_dev_virt, dev);
}
-static struct se_subsystem_api pscsi_template;
-
static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
-static void pscsi_req_done(struct request *, int);
+static enum rq_end_io_ret pscsi_req_done(struct request *, blk_status_t);
/* pscsi_attach_hba():
*
@@ -81,7 +62,7 @@ static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
- PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
+ PSCSI_VERSION, TARGET_CORE_VERSION);
pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
hba->hba_id);
@@ -134,10 +115,10 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
* pSCSI Host ID and enable for phba mode
*/
sh = scsi_host_lookup(phv->phv_host_id);
- if (IS_ERR(sh)) {
+ if (!sh) {
pr_err("pSCSI: Unable to locate SCSI Host for"
" phv_host_id: %d\n", phv->phv_host_id);
- return PTR_ERR(sh);
+ return -EINVAL;
}
phv->phv_lld_host = sh;
@@ -157,44 +138,44 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
buf = kzalloc(12, GFP_KERNEL);
if (!buf)
- return;
+ goto out_free;
memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = MODE_SENSE;
cdb[4] = 0x0c; /* 12 bytes */
- ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL,
- HZ, 1, NULL);
+ ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf, 12, HZ, 1, NULL);
if (ret)
goto out_free;
/*
* If MODE_SENSE still returns zero, set the default value to 1024.
*/
- sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+ sdev->sector_size = get_unaligned_be24(&buf[9]);
+out_free:
if (!sdev->sector_size)
sdev->sector_size = 1024;
-out_free:
+
kfree(buf);
}
static void
pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
{
- unsigned char *buf;
-
if (sdev->inquiry_len < INQUIRY_LEN)
return;
-
- buf = sdev->inquiry;
- if (!buf)
- return;
/*
- * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev()
+ * Use sdev->inquiry data from drivers/scsi/scsi_scan.c:scsi_add_lun()
*/
- memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor));
- memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model));
- memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision));
+ BUILD_BUG_ON(sizeof(wwn->vendor) != INQUIRY_VENDOR_LEN + 1);
+ snprintf(wwn->vendor, sizeof(wwn->vendor),
+ "%." __stringify(INQUIRY_VENDOR_LEN) "s", sdev->vendor);
+ BUILD_BUG_ON(sizeof(wwn->model) != INQUIRY_MODEL_LEN + 1);
+ snprintf(wwn->model, sizeof(wwn->model),
+ "%." __stringify(INQUIRY_MODEL_LEN) "s", sdev->model);
+ BUILD_BUG_ON(sizeof(wwn->revision) != INQUIRY_REVISION_LEN + 1);
+ snprintf(wwn->revision, sizeof(wwn->revision),
+ "%." __stringify(INQUIRY_REVISION_LEN) "s", sdev->rev);
}
static int
@@ -211,11 +192,10 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
cdb[0] = INQUIRY;
cdb[1] = 0x01; /* Query VPD */
cdb[2] = 0x80; /* Unit Serial Number */
- cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff;
- cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff);
+ put_unaligned_be16(INQUIRY_VPD_SERIAL_LEN, &cdb[3]);
- ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
- INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
+ ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf,
+ INQUIRY_VPD_SERIAL_LEN, HZ, 1, NULL);
if (ret)
goto out_free;
@@ -247,16 +227,14 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
cdb[0] = INQUIRY;
cdb[1] = 0x01; /* Query VPD */
cdb[2] = 0x83; /* Device Identifier */
- cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff;
- cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff);
+ put_unaligned_be16(INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, &cdb[3]);
- ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
- INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
- NULL, HZ, 1, NULL);
+ ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf,
+ INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, HZ, 1, NULL);
if (ret)
goto out;
- page_len = (buf[2] << 8) | buf[3];
+ page_len = get_unaligned_be16(&buf[2]);
while (page_len > 0) {
/* Grab a pointer to the Identification descriptor */
page_83 = &buf[off];
@@ -312,14 +290,15 @@ static int pscsi_add_device_to_list(struct se_device *dev,
if (!sd->queue_depth) {
sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
- pr_err("Set broken SCSI Device %d:%d:%d"
+ pr_err("Set broken SCSI Device %d:%d:%llu"
" queue_depth to %d\n", sd->channel, sd->id,
sd->lun, sd->queue_depth);
}
- dev->dev_attrib.hw_block_size = sd->sector_size;
+ dev->dev_attrib.hw_block_size =
+ min_not_zero((int)sd->sector_size, 512);
dev->dev_attrib.hw_max_sectors =
- min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+ min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
dev->dev_attrib.hw_queue_depth = sd->queue_depth;
/*
@@ -342,8 +321,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
/*
* For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
*/
- if (sd->type == TYPE_TAPE)
+ if (sd->type == TYPE_TAPE) {
pscsi_tape_read_blocksize(dev, sd);
+ dev->dev_attrib.hw_block_size = sd->sector_size;
+ }
return 0;
}
@@ -371,11 +352,11 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct Scsi_Host *sh = sd->host;
- struct block_device *bd;
+ struct file *bdev_file;
int ret;
if (scsi_device_get(sd)) {
- pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
+ pr_err("scsi_device_get() failed for %d:%d:%d:%llu\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
return -EIO;
@@ -383,33 +364,34 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
spin_unlock_irq(sh->host_lock);
/*
* Claim exclusive struct block_device access to struct scsi_device
- * for TYPE_DISK using supplied udev_path
+ * for TYPE_DISK and TYPE_ZBC using supplied udev_path
*/
- bd = blkdev_get_by_path(dev->udev_path,
- FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
- if (IS_ERR(bd)) {
- pr_err("pSCSI: blkdev_get_by_path() failed\n");
+ bdev_file = bdev_file_open_by_path(dev->udev_path,
+ BLK_OPEN_WRITE | BLK_OPEN_READ, pdv, NULL);
+ if (IS_ERR(bdev_file)) {
+ pr_err("pSCSI: bdev_file_open_by_path() failed\n");
scsi_device_put(sd);
- return PTR_ERR(bd);
+ return PTR_ERR(bdev_file);
}
- pdv->pdv_bd = bd;
+ pdv->pdv_bdev_file = bdev_file;
ret = pscsi_add_device_to_list(dev, sd);
if (ret) {
- blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+ fput(bdev_file);
scsi_device_put(sd);
return ret;
}
- pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
- phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
+ pr_debug("CORE_PSCSI[%d] - Added TYPE_%s for %d:%d:%d:%llu\n",
+ phv->phv_host_id, sd->type == TYPE_DISK ? "DISK" : "ZBC",
+ sh->host_no, sd->channel, sd->id, sd->lun);
return 0;
}
/*
* Called with struct Scsi_Host->host_lock called.
*/
-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
+static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
__releases(sh->host_lock)
{
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
@@ -417,7 +399,7 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
int ret;
if (scsi_device_get(sd)) {
- pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
+ pr_err("scsi_device_get() failed for %d:%d:%d:%llu\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
return -EIO;
@@ -429,35 +411,13 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
scsi_device_put(sd);
return ret;
}
- pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+ pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
return 0;
}
-/*
- * Called with struct Scsi_Host->host_lock called.
- */
-static int pscsi_create_type_other(struct se_device *dev,
- struct scsi_device *sd)
- __releases(sh->host_lock)
-{
- struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
- struct Scsi_Host *sh = sd->host;
- int ret;
-
- spin_unlock_irq(sh->host_lock);
- ret = pscsi_add_device_to_list(dev, sd);
- if (ret)
- return ret;
-
- pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
- phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
- sd->channel, sd->id, sd->lun);
- return 0;
-}
-
static int pscsi_configure_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
@@ -515,11 +475,12 @@ static int pscsi_configure_device(struct se_device *dev)
sh = phv->phv_lld_host;
} else {
sh = scsi_host_lookup(pdv->pdv_host_id);
- if (IS_ERR(sh)) {
+ if (!sh) {
pr_err("pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id);
- return PTR_ERR(sh);
+ return -EINVAL;
}
+ pdv->pdv_lld_host = sh;
}
} else {
if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
@@ -537,18 +498,16 @@ static int pscsi_configure_device(struct se_device *dev)
continue;
/*
* Functions will release the held struct scsi_host->host_lock
- * before calling calling pscsi_add_device_to_list() to register
+ * before calling pscsi_add_device_to_list() to register
* struct scsi_device with target_core_mod.
*/
switch (sd->type) {
case TYPE_DISK:
+ case TYPE_ZBC:
ret = pscsi_create_type_disk(dev, sd);
break;
- case TYPE_ROM:
- ret = pscsi_create_type_rom(dev, sd);
- break;
default:
- ret = pscsi_create_type_other(dev, sd);
+ ret = pscsi_create_type_nondisk(dev, sd);
break;
}
@@ -579,8 +538,21 @@ static int pscsi_configure_device(struct se_device *dev)
return -ENODEV;
}
+static void pscsi_dev_call_rcu(struct rcu_head *p)
+{
+ struct se_device *dev = container_of(p, struct se_device, rcu_head);
+ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+
+ kfree(pdv);
+}
+
static void pscsi_free_device(struct se_device *dev)
{
+ call_rcu(&dev->rcu_head, pscsi_dev_call_rcu);
+}
+
+static void pscsi_destroy_device(struct se_device *dev)
+{
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct scsi_device *sd = pdv->pdv_sd;
@@ -588,12 +560,13 @@ static void pscsi_free_device(struct se_device *dev)
if (sd) {
/*
* Release exclusive pSCSI internal struct block_device claim for
- * struct scsi_device with TYPE_DISK from pscsi_create_type_disk()
+ * struct scsi_device with TYPE_DISK or TYPE_ZBC
+ * from pscsi_create_type_disk()
*/
- if ((sd->type == TYPE_DISK) && pdv->pdv_bd) {
- blkdev_put(pdv->pdv_bd,
- FMODE_WRITE|FMODE_READ|FMODE_EXCL);
- pdv->pdv_bd = NULL;
+ if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) &&
+ pdv->pdv_bdev_file) {
+ fput(pdv->pdv_bdev_file);
+ pdv->pdv_bdev_file = NULL;
}
/*
* For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
@@ -602,58 +575,56 @@ static void pscsi_free_device(struct se_device *dev)
if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
(phv->phv_lld_host != NULL))
scsi_host_put(phv->phv_lld_host);
+ else if (pdv->pdv_lld_host)
+ scsi_host_put(pdv->pdv_lld_host);
- if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
- scsi_device_put(sd);
+ scsi_device_put(sd);
pdv->pdv_sd = NULL;
}
-
- kfree(pdv);
}
-static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
- unsigned char *sense_buffer)
+static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
+ unsigned char *req_sense, int valid_data)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct scsi_device *sd = pdv->pdv_sd;
- int result;
- struct pscsi_plugin_task *pt = cmd->priv;
- unsigned char *cdb;
+ unsigned char *cdb = cmd->priv;
+
/*
- * Special case for REPORT_LUNs handling where pscsi_plugin_task has
- * not been allocated because TCM is handling the emulation directly.
+ * Special case for REPORT_LUNs which is emulated and not passed on.
*/
- if (!pt)
+ if (!cdb)
return;
- cdb = &pt->pscsi_cdb[0];
- result = pt->pscsi_result;
/*
* Hack to make sure that Write-Protect modepage is set if R/O mode is
* forced.
*/
- if (!cmd->se_deve || !cmd->data_length)
+ if (!cmd->data_length)
goto after_mode_sense;
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
- (status_byte(result) << 1) == SAM_STAT_GOOD) {
- if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) {
+ scsi_status == SAM_STAT_GOOD) {
+ bool read_only = target_lun_is_rdonly(cmd);
+
+ if (read_only) {
unsigned char *buf;
buf = transport_kmap_data_sg(cmd);
- if (!buf)
+ if (!buf) {
; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
-
- if (cdb[0] == MODE_SENSE_10) {
- if (!(buf[3] & 0x80))
- buf[3] |= 0x80;
} else {
- if (!(buf[2] & 0x80))
- buf[2] |= 0x80;
+ if (cdb[0] == MODE_SENSE_10) {
+ if (!(buf[3] & 0x80))
+ buf[3] |= 0x80;
+ } else {
+ if (!(buf[2] & 0x80))
+ buf[2] |= 0x80;
+ }
+
+ transport_kunmap_data_sg(cmd);
}
-
- transport_kunmap_data_sg(cmd);
}
}
after_mode_sense:
@@ -670,39 +641,56 @@ after_mode_sense:
* storage engine.
*/
if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
- (status_byte(result) << 1) == SAM_STAT_GOOD) {
+ scsi_status == SAM_STAT_GOOD) {
unsigned char *buf;
u16 bdl;
u32 blocksize;
- buf = sg_virt(&sg[0]);
+ buf = sg_virt(&cmd->t_data_sg[0]);
if (!buf) {
pr_err("Unable to get buf for scatterlist\n");
goto after_mode_select;
}
if (cdb[0] == MODE_SELECT)
- bdl = (buf[3]);
+ bdl = buf[3];
else
- bdl = (buf[6] << 8) | (buf[7]);
+ bdl = get_unaligned_be16(&buf[6]);
if (!bdl)
goto after_mode_select;
if (cdb[0] == MODE_SELECT)
- blocksize = (buf[9] << 16) | (buf[10] << 8) |
- (buf[11]);
+ blocksize = get_unaligned_be24(&buf[9]);
else
- blocksize = (buf[13] << 16) | (buf[14] << 8) |
- (buf[15]);
+ blocksize = get_unaligned_be24(&buf[13]);
sd->sector_size = blocksize;
}
after_mode_select:
- if (sense_buffer && (status_byte(result) & CHECK_CONDITION)) {
- memcpy(sense_buffer, pt->pscsi_sense, TRANSPORT_SENSE_BUFFER);
- cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
+ if (scsi_status == SAM_STAT_CHECK_CONDITION) {
+ transport_copy_sense_to_cmd(cmd, req_sense);
+
+ /*
+ * check for TAPE device reads with
+ * FM/EOM/ILI set, so that we can get data
+ * back despite framework assumption that a
+ * check condition means there is no data
+ */
+ if (sd->type == TYPE_TAPE && valid_data &&
+ cmd->data_direction == DMA_FROM_DEVICE) {
+ /*
+ * is sense data valid, fixed format,
+ * and have FM, EOM, or ILI set?
+ */
+ if (req_sense[0] == 0xf0 && /* valid, fixed format */
+ req_sense[2] & 0xe0 && /* FM, EOM, or ILI */
+ (req_sense[2] & 0xf) == 0) { /* key==NO_SENSE */
+ pr_debug("Tape FM/EOM/ILI status detected. Treat as normal read.\n");
+ cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
+ }
+ }
}
}
@@ -749,14 +737,18 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
ret = -EINVAL;
goto out;
}
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
pdv->pdv_host_id = arg;
pr_debug("PSCSI[%d]: Referencing SCSI Host ID:"
" %d\n", phv->phv_host_id, pdv->pdv_host_id);
pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
break;
case Opt_scsi_channel_id:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
pdv->pdv_channel_id = arg;
pr_debug("PSCSI[%d]: Referencing SCSI Channel"
" ID: %d\n", phv->phv_host_id,
@@ -764,7 +756,9 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
break;
case Opt_scsi_target_id:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
pdv->pdv_target_id = arg;
pr_debug("PSCSI[%d]: Referencing SCSI Target"
" ID: %d\n", phv->phv_host_id,
@@ -772,7 +766,9 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
pdv->pdv_flags |= PDF_HAS_TARGET_ID;
break;
case Opt_scsi_lun_id:
- match_int(args, &arg);
+ ret = match_int(args, &arg);
+ if (ret)
+ goto out;
pdv->pdv_lun_id = arg;
pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:"
" %d\n", phv->phv_host_id, pdv->pdv_lun_id);
@@ -795,7 +791,6 @@ static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
struct scsi_device *sd = pdv->pdv_sd;
unsigned char host_id[16];
ssize_t bl;
- int i;
if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
snprintf(host_id, 16, "%d", pdv->pdv_host_id);
@@ -808,70 +803,36 @@ static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
host_id);
if (sd) {
- bl += sprintf(b + bl, " ");
- bl += sprintf(b + bl, "Vendor: ");
- for (i = 0; i < 8; i++) {
- if (ISPRINT(sd->vendor[i])) /* printable character? */
- bl += sprintf(b + bl, "%c", sd->vendor[i]);
- else
- bl += sprintf(b + bl, " ");
- }
- bl += sprintf(b + bl, " Model: ");
- for (i = 0; i < 16; i++) {
- if (ISPRINT(sd->model[i])) /* printable character ? */
- bl += sprintf(b + bl, "%c", sd->model[i]);
- else
- bl += sprintf(b + bl, " ");
- }
- bl += sprintf(b + bl, " Rev: ");
- for (i = 0; i < 4; i++) {
- if (ISPRINT(sd->rev[i])) /* printable character ? */
- bl += sprintf(b + bl, "%c", sd->rev[i]);
- else
- bl += sprintf(b + bl, " ");
- }
- bl += sprintf(b + bl, "\n");
+ bl += sprintf(b + bl, " Vendor: %."
+ __stringify(INQUIRY_VENDOR_LEN) "s", sd->vendor);
+ bl += sprintf(b + bl, " Model: %."
+ __stringify(INQUIRY_MODEL_LEN) "s", sd->model);
+ bl += sprintf(b + bl, " Rev: %."
+ __stringify(INQUIRY_REVISION_LEN) "s\n", sd->rev);
}
return bl;
}
-static void pscsi_bi_endio(struct bio *bio, int error)
+static void pscsi_bi_endio(struct bio *bio)
{
- bio_put(bio);
-}
-
-static inline struct bio *pscsi_get_bio(int nr_vecs)
-{
- struct bio *bio;
- /*
- * Use bio_malloc() following the comment in for bio -> struct request
- * in block/blk-core.c:blk_make_request()
- */
- bio = bio_kmalloc(GFP_KERNEL, nr_vecs);
- if (!bio) {
- pr_err("PSCSI: bio_kmalloc() failed\n");
- return NULL;
- }
- bio->bi_end_io = pscsi_bi_endio;
-
- return bio;
+ bio_uninit(bio);
+ kfree(bio);
}
static sense_reason_t
pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
- enum dma_data_direction data_direction, struct bio **hbio)
+ struct request *req)
{
- struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
- struct bio *bio = NULL, *tbio = NULL;
+ struct bio *bio = NULL;
struct page *page;
struct scatterlist *sg;
u32 data_len = cmd->data_length, i, len, bytes, off;
int nr_pages = (cmd->data_length + sgl[0].offset +
PAGE_SIZE - 1) >> PAGE_SHIFT;
int nr_vecs = 0, rc;
- int rw = (data_direction == DMA_TO_DEVICE);
+ int rw = (cmd->data_direction == DMA_TO_DEVICE);
- *hbio = NULL;
+ BUG_ON(!cmd->data_length);
pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
@@ -895,130 +856,76 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
bytes = min(bytes, data_len);
if (!bio) {
- nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
- nr_pages -= nr_vecs;
- /*
- * Calls bio_kmalloc() and sets bio->bi_end_io()
- */
- bio = pscsi_get_bio(nr_vecs);
+new_bio:
+ nr_vecs = bio_max_segs(nr_pages);
+ bio = bio_kmalloc(nr_vecs, GFP_KERNEL);
if (!bio)
goto fail;
-
- if (rw)
- bio->bi_rw |= REQ_WRITE;
+ bio_init_inline(bio, NULL, nr_vecs,
+ rw ? REQ_OP_WRITE : REQ_OP_READ);
+ bio->bi_end_io = pscsi_bi_endio;
pr_debug("PSCSI: Allocated bio: %p,"
" dir: %s nr_vecs: %d\n", bio,
(rw) ? "rw" : "r", nr_vecs);
- /*
- * Set *hbio pointer to handle the case:
- * nr_pages > BIO_MAX_PAGES, where additional
- * bios need to be added to complete a given
- * command.
- */
- if (!*hbio)
- *hbio = tbio = bio;
- else
- tbio = tbio->bi_next = bio;
}
- pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
+ pr_debug("PSCSI: Calling bio_add_page() i: %d"
" bio: %p page: %p len: %d off: %d\n", i, bio,
page, len, off);
- rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
- bio, page, bytes, off);
- if (rc != bytes)
- goto fail;
-
+ rc = bio_add_page(bio, page, bytes, off);
pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
- bio->bi_vcnt, nr_vecs);
-
- if (bio->bi_vcnt > nr_vecs) {
+ bio_segments(bio), nr_vecs);
+ if (rc != bytes) {
pr_debug("PSCSI: Reached bio->bi_vcnt max:"
" %d i: %d bio: %p, allocating another"
" bio\n", bio->bi_vcnt, i, bio);
- /*
- * Clear the pointer so that another bio will
- * be allocated with pscsi_get_bio() above, the
- * current bio has already been set *tbio and
- * bio->bi_next.
- */
- bio = NULL;
+
+ rc = blk_rq_append_bio(req, bio);
+ if (rc) {
+ pr_err("pSCSI: failed to append bio\n");
+ goto fail;
+ }
+
+ goto new_bio;
}
data_len -= bytes;
}
}
+ if (bio) {
+ rc = blk_rq_append_bio(req, bio);
+ if (rc) {
+ pr_err("pSCSI: failed to append bio\n");
+ goto fail;
+ }
+ }
+
return 0;
fail:
- while (*hbio) {
- bio = *hbio;
- *hbio = (*hbio)->bi_next;
- bio_endio(bio, 0); /* XXX: should be error */
+ if (bio) {
+ bio_uninit(bio);
+ kfree(bio);
}
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-}
-
-/*
- * Clear a lun set in the cdb if the initiator talking to use spoke
- * and old standards version, as we can't assume the underlying device
- * won't choke up on it.
- */
-static inline void pscsi_clear_cdb_lun(unsigned char *cdb)
-{
- switch (cdb[0]) {
- case READ_10: /* SBC - RDProtect */
- case READ_12: /* SBC - RDProtect */
- case READ_16: /* SBC - RDProtect */
- case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
- case VERIFY: /* SBC - VRProtect */
- case VERIFY_16: /* SBC - VRProtect */
- case WRITE_VERIFY: /* SBC - VRProtect */
- case WRITE_VERIFY_12: /* SBC - VRProtect */
- case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
- break;
- default:
- cdb[1] &= 0x1f; /* clear logical unit number */
- break;
+ while (req->bio) {
+ bio = req->bio;
+ req->bio = bio->bi_next;
+ bio_uninit(bio);
+ kfree(bio);
}
+ req->biotail = NULL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
static sense_reason_t
pscsi_parse_cdb(struct se_cmd *cmd)
{
- unsigned char *cdb = cmd->t_task_cdb;
-
if (cmd->se_cmd_flags & SCF_BIDI)
return TCM_UNSUPPORTED_SCSI_OPCODE;
- pscsi_clear_cdb_lun(cdb);
-
- /*
- * For REPORT LUNS we always need to emulate the response, for everything
- * else the default for pSCSI is to pass the command to the underlying
- * LLD / physical hardware.
- */
- switch (cdb[0]) {
- case REPORT_LUNS:
- cmd->execute_cmd = spc_emulate_report_luns;
- return 0;
- case READ_6:
- case READ_10:
- case READ_12:
- case READ_16:
- case WRITE_6:
- case WRITE_10:
- case WRITE_12:
- case WRITE_16:
- case WRITE_VERIFY:
- cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- /* FALLTHROUGH*/
- default:
- cmd->execute_cmd = pscsi_execute_cmd;
- return 0;
- }
+ return passthrough_parse_cdb(cmd, pscsi_execute_cmd);
}
static sense_reason_t
@@ -1026,80 +933,49 @@ pscsi_execute_cmd(struct se_cmd *cmd)
{
struct scatterlist *sgl = cmd->t_data_sg;
u32 sgl_nents = cmd->t_data_nents;
- enum dma_data_direction data_direction = cmd->data_direction;
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
- struct pscsi_plugin_task *pt;
+ struct scsi_cmnd *scmd;
struct request *req;
- struct bio *hbio;
sense_reason_t ret;
- /*
- * Dynamically alloc cdb space, since it may be larger than
- * TCM_MAX_COMMAND_SIZE
- */
- pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL);
- if (!pt) {
+ req = scsi_alloc_request(pdv->pdv_sd->request_queue,
+ cmd->data_direction == DMA_TO_DEVICE ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ if (IS_ERR(req))
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
- cmd->priv = pt;
-
- memcpy(pt->pscsi_cdb, cmd->t_task_cdb,
- scsi_command_size(cmd->t_task_cdb));
-
- if (!sgl) {
- req = blk_get_request(pdv->pdv_sd->request_queue,
- (data_direction == DMA_TO_DEVICE),
- GFP_KERNEL);
- if (!req || IS_ERR(req)) {
- pr_err("PSCSI: blk_get_request() failed: %ld\n",
- req ? IS_ERR(req) : -ENOMEM);
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- goto fail;
- }
- } else {
- BUG_ON(!cmd->data_length);
- ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio);
+ if (sgl) {
+ ret = pscsi_map_sg(cmd, sgl, sgl_nents, req);
if (ret)
- goto fail;
-
- req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
- GFP_KERNEL);
- if (IS_ERR(req)) {
- pr_err("pSCSI: blk_make_request() failed\n");
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- goto fail_free_bio;
- }
+ goto fail_put_request;
}
- req->cmd_type = REQ_TYPE_BLOCK_PC;
req->end_io = pscsi_req_done;
req->end_io_data = cmd;
- req->cmd_len = scsi_command_size(pt->pscsi_cdb);
- req->cmd = &pt->pscsi_cdb[0];
- req->sense = &pt->pscsi_sense[0];
- req->sense_len = 0;
- if (pdv->pdv_sd->type == TYPE_DISK)
+
+ scmd = blk_mq_rq_to_pdu(req);
+ scmd->cmd_len = scsi_command_size(cmd->t_task_cdb);
+ if (scmd->cmd_len > sizeof(scmd->cmnd)) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto fail_put_request;
+ }
+ memcpy(scmd->cmnd, cmd->t_task_cdb, scmd->cmd_len);
+
+ if (pdv->pdv_sd->type == TYPE_DISK ||
+ pdv->pdv_sd->type == TYPE_ZBC)
req->timeout = PS_TIMEOUT_DISK;
else
req->timeout = PS_TIMEOUT_OTHER;
- req->retries = PS_RETRY;
+ scmd->allowed = PS_RETRY;
+
+ cmd->priv = scmd->cmnd;
- blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
- (cmd->sam_task_attr == MSG_HEAD_TAG),
- pscsi_req_done);
+ blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG);
return 0;
-fail_free_bio:
- while (hbio) {
- struct bio *bio = hbio;
- hbio = hbio->bi_next;
- bio_endio(bio, 0); /* XXX: should be error */
- }
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-fail:
- kfree(pt);
+fail_put_request:
+ blk_mq_free_request(req);
return ret;
}
@@ -1112,77 +988,78 @@ static u32 pscsi_get_device_type(struct se_device *dev)
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct scsi_device *sd = pdv->pdv_sd;
- return sd->type;
+ return (sd) ? sd->type : TYPE_NO_LUN;
}
static sector_t pscsi_get_blocks(struct se_device *dev)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
- if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
- return pdv->pdv_bd->bd_part->nr_sects;
-
- dump_stack();
+ if (pdv->pdv_bdev_file)
+ return bdev_nr_sectors(file_bdev(pdv->pdv_bdev_file));
return 0;
}
-static void pscsi_req_done(struct request *req, int uptodate)
+static enum rq_end_io_ret pscsi_req_done(struct request *req,
+ blk_status_t status)
{
struct se_cmd *cmd = req->end_io_data;
- struct pscsi_plugin_task *pt = cmd->priv;
-
- pt->pscsi_result = req->errors;
- pt->pscsi_resid = req->resid_len;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
+ enum sam_status scsi_status = scmd->result & 0xff;
+ int valid_data = cmd->data_length - scmd->resid_len;
+ u8 *cdb = cmd->priv;
- cmd->scsi_status = status_byte(pt->pscsi_result) << 1;
- if (cmd->scsi_status) {
+ if (scsi_status != SAM_STAT_GOOD) {
pr_debug("PSCSI Status Byte exception at cmd: %p CDB:"
- " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
- pt->pscsi_result);
+ " 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result);
}
- switch (host_byte(pt->pscsi_result)) {
+ pscsi_complete_cmd(cmd, scsi_status, scmd->sense_buffer, valid_data);
+
+ switch (host_byte(scmd->result)) {
case DID_OK:
- target_complete_cmd(cmd, cmd->scsi_status);
+ target_complete_cmd_with_length(cmd, scsi_status, valid_data);
break;
default:
pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
- " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
- pt->pscsi_result);
+ " 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result);
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
break;
}
- __blk_put_request(req->q, req);
- kfree(pt);
+ blk_mq_free_request(req);
+ return RQ_END_IO_NONE;
}
-static struct se_subsystem_api pscsi_template = {
+static const struct target_backend_ops pscsi_ops = {
.name = "pscsi",
.owner = THIS_MODULE,
- .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV,
+ .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH |
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA |
+ TRANSPORT_FLAG_PASSTHROUGH_PGR,
.attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba,
.alloc_device = pscsi_alloc_device,
.configure_device = pscsi_configure_device,
+ .destroy_device = pscsi_destroy_device,
.free_device = pscsi_free_device,
- .transport_complete = pscsi_transport_complete,
.parse_cdb = pscsi_parse_cdb,
.set_configfs_dev_params = pscsi_set_configfs_dev_params,
.show_configfs_dev_params = pscsi_show_configfs_dev_params,
.get_device_type = pscsi_get_device_type,
.get_blocks = pscsi_get_blocks,
+ .tb_dev_attrib_attrs = passthrough_attrib_attrs,
};
static int __init pscsi_module_init(void)
{
- return transport_subsystem_register(&pscsi_template);
+ return transport_backend_register(&pscsi_ops);
}
static void __exit pscsi_module_exit(void)
{
- transport_subsystem_release(&pscsi_template);
+ target_backend_unregister(&pscsi_ops);
}
MODULE_DESCRIPTION("TCM PSCSI subsystem plugin");
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index 1bd757dff8ee..9acaa21e4c78 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef TARGET_CORE_PSCSI_H
#define TARGET_CORE_PSCSI_H
@@ -15,19 +16,12 @@
#define PS_TIMEOUT_DISK (15*HZ)
#define PS_TIMEOUT_OTHER (500*HZ)
-#include <linux/device.h>
-#include <scsi/scsi_driver.h>
-#include <scsi/scsi_device.h>
-#include <linux/kref.h>
-#include <linux/kobject.h>
-
-struct pscsi_plugin_task {
- unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
- int pscsi_direction;
- int pscsi_result;
- u32 pscsi_resid;
- unsigned char pscsi_cdb[0];
-} ____cacheline_aligned;
+#include <linux/cache.h> /* ___cacheline_aligned */
+#include <target/target_core_base.h> /* struct se_device */
+
+struct block_device;
+struct scsi_device;
+struct Scsi_Host;
#define PDF_HAS_CHANNEL_ID 0x01
#define PDF_HAS_TARGET_ID 0x02
@@ -43,8 +37,9 @@ struct pscsi_dev_virt {
int pdv_channel_id;
int pdv_target_id;
int pdv_lun_id;
- struct block_device *pdv_bd;
+ struct file *pdv_bdev_file;
struct scsi_device *pdv_sd;
+ struct Scsi_Host *pdv_lld_host;
} ____cacheline_aligned;
typedef enum phv_modes {
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 51127d15d5c5..6f67cc09c2b5 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -1,37 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_rd.c
*
* This file contains the Storage Engine <-> Ramdisk transport
* specific functions.
*
- * (c) Copyright 2003-2012 RisingTide Systems LLC.
+ * (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#include <linux/string.h>
#include <linux/parser.h>
+#include <linux/highmem.h>
#include <linux/timer.h>
-#include <linux/blkdev.h>
+#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
+#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
@@ -43,19 +30,13 @@ static inline struct rd_dev *RD_DEV(struct se_device *dev)
return container_of(dev, struct rd_dev, dev);
}
-/* rd_attach_hba(): (Part of se_subsystem_api_t template)
- *
- *
- */
static int rd_attach_hba(struct se_hba *hba, u32 host_id)
{
struct rd_host *rd_host;
- rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
- if (!rd_host) {
- pr_err("Unable to allocate memory for struct rd_host\n");
+ rd_host = kzalloc(sizeof(*rd_host), GFP_KERNEL);
+ if (!rd_host)
return -ENOMEM;
- }
rd_host->rd_host_id = host_id;
@@ -63,7 +44,7 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id)
pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
- RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
+ RD_HBA_VERSION, TARGET_CORE_VERSION);
return 0;
}
@@ -79,23 +60,14 @@ static void rd_detach_hba(struct se_hba *hba)
hba->hba_ptr = NULL;
}
-/* rd_release_device_space():
- *
- *
- */
-static void rd_release_device_space(struct rd_dev *rd_dev)
+static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
+ u32 sg_table_count)
{
- u32 i, j, page_count = 0, sg_per_table;
- struct rd_dev_sg_table *sg_table;
struct page *pg;
struct scatterlist *sg;
+ u32 i, j, page_count = 0, sg_per_table;
- if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
- return;
-
- sg_table = rd_dev->sg_table_array;
-
- for (i = 0; i < rd_dev->sg_table_count; i++) {
+ for (i = 0; i < sg_table_count; i++) {
sg = sg_table[i].sg_table;
sg_per_table = sg_table[i].rd_sg_count;
@@ -106,16 +78,28 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
page_count++;
}
}
-
kfree(sg);
}
+ kfree(sg_table);
+ return page_count;
+}
+
+static void rd_release_device_space(struct rd_dev *rd_dev)
+{
+ u32 page_count;
+
+ if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
+ return;
+
+ page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
+ rd_dev->sg_table_count);
+
pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
" Device ID: %u, pages %u in %u tables total bytes %lu\n",
rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
- kfree(sg_table);
rd_dev->sg_table_array = NULL;
rd_dev->sg_table_count = 0;
}
@@ -125,52 +109,39 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
*
*
*/
-static int rd_build_device_space(struct rd_dev *rd_dev)
+static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
+ u32 total_sg_needed, unsigned char init_payload)
{
- u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
+ u32 i = 0, j, page_offset = 0, sg_per_table;
u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
- struct rd_dev_sg_table *sg_table;
struct page *pg;
struct scatterlist *sg;
-
- if (rd_dev->rd_page_count <= 0) {
- pr_err("Illegal page count: %u for Ramdisk device\n",
- rd_dev->rd_page_count);
- return -EINVAL;
- }
-
- /* Don't need backing pages for NULLIO */
- if (rd_dev->rd_flags & RDF_NULLIO)
- return 0;
-
- total_sg_needed = rd_dev->rd_page_count;
-
- sg_tables = (total_sg_needed / max_sg_per_table) + 1;
-
- sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
- if (!sg_table) {
- pr_err("Unable to allocate memory for Ramdisk"
- " scatterlist tables\n");
- return -ENOMEM;
- }
-
- rd_dev->sg_table_array = sg_table;
- rd_dev->sg_table_count = sg_tables;
+ unsigned char *p;
while (total_sg_needed) {
+ unsigned int chain_entry = 0;
+
sg_per_table = (total_sg_needed > max_sg_per_table) ?
max_sg_per_table : total_sg_needed;
- sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
+ /*
+ * Reserve extra element for chain entry
+ */
+ if (sg_per_table < total_sg_needed)
+ chain_entry = 1;
+
+ sg = kmalloc_array(sg_per_table + chain_entry, sizeof(*sg),
GFP_KERNEL);
- if (!sg) {
- pr_err("Unable to allocate scatterlist array"
- " for struct rd_dev\n");
+ if (!sg)
return -ENOMEM;
- }
- sg_init_table(sg, sg_per_table);
+ sg_init_table(sg, sg_per_table + chain_entry);
+
+ if (i > 0) {
+ sg_chain(sg_table[i - 1].sg_table,
+ max_sg_per_table + 1, sg);
+ }
sg_table[i].sg_table = sg;
sg_table[i].rd_sg_count = sg_per_table;
@@ -187,16 +158,111 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
}
sg_assign_page(&sg[j], pg);
sg[j].length = PAGE_SIZE;
+
+ p = kmap(pg);
+ memset(p, init_payload, PAGE_SIZE);
+ kunmap(pg);
}
page_offset += sg_per_table;
total_sg_needed -= sg_per_table;
}
+ return 0;
+}
+
+static int rd_build_device_space(struct rd_dev *rd_dev)
+{
+ struct rd_dev_sg_table *sg_table;
+ u32 sg_tables, total_sg_needed;
+ u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+ sizeof(struct scatterlist));
+ int rc;
+
+ if (rd_dev->rd_page_count <= 0) {
+ pr_err("Illegal page count: %u for Ramdisk device\n",
+ rd_dev->rd_page_count);
+ return -EINVAL;
+ }
+
+ /* Don't need backing pages for NULLIO */
+ if (rd_dev->rd_flags & RDF_NULLIO)
+ return 0;
+
+ total_sg_needed = rd_dev->rd_page_count;
+
+ sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+ sg_table = kcalloc(sg_tables, sizeof(*sg_table), GFP_KERNEL);
+ if (!sg_table)
+ return -ENOMEM;
+
+ rd_dev->sg_table_array = sg_table;
+ rd_dev->sg_table_count = sg_tables;
+
+ rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
+ if (rc)
+ return rc;
+
pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
- " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
- rd_dev->rd_dev_id, rd_dev->rd_page_count,
- rd_dev->sg_table_count);
+ " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+ rd_dev->rd_dev_id, rd_dev->rd_page_count,
+ rd_dev->sg_table_count);
+
+ return 0;
+}
+
+static void rd_release_prot_space(struct rd_dev *rd_dev)
+{
+ u32 page_count;
+
+ if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
+ return;
+
+ page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
+ rd_dev->sg_prot_count);
+
+ pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
+ " Device ID: %u, pages %u in %u tables total bytes %lu\n",
+ rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
+ rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
+
+ rd_dev->sg_prot_array = NULL;
+ rd_dev->sg_prot_count = 0;
+}
+
+static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
+{
+ struct rd_dev_sg_table *sg_table;
+ u32 total_sg_needed, sg_tables;
+ u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+ sizeof(struct scatterlist));
+ int rc;
+
+ if (rd_dev->rd_flags & RDF_NULLIO)
+ return 0;
+ /*
+ * prot_length=8byte dif data
+ * tot sg needed = rd_page_count * (PGSZ/block_size) *
+ * (prot_length/block_size) + pad
+ * PGSZ canceled each other.
+ */
+ total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
+
+ sg_tables = (total_sg_needed / max_sg_per_table) + 1;
+ sg_table = kcalloc(sg_tables, sizeof(*sg_table), GFP_KERNEL);
+ if (!sg_table)
+ return -ENOMEM;
+
+ rd_dev->sg_prot_array = sg_table;
+ rd_dev->sg_prot_count = sg_tables;
+
+ rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
+ if (rc)
+ return rc;
+
+ pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
+ " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
+ rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
return 0;
}
@@ -206,11 +272,9 @@ static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
struct rd_dev *rd_dev;
struct rd_host *rd_host = hba->hba_ptr;
- rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
- if (!rd_dev) {
- pr_err("Unable to allocate memory for struct rd_dev\n");
+ rd_dev = kzalloc(sizeof(*rd_dev), GFP_KERNEL);
+ if (!rd_dev)
return NULL;
- }
rd_dev->rd_host = rd_host;
@@ -235,6 +299,7 @@ static int rd_configure_device(struct se_device *dev)
dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
dev->dev_attrib.hw_max_sectors = UINT_MAX;
dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
+ dev->dev_attrib.is_nonrot = 1;
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
@@ -251,12 +316,24 @@ fail:
return ret;
}
+static void rd_dev_call_rcu(struct rcu_head *p)
+{
+ struct se_device *dev = container_of(p, struct se_device, rcu_head);
+ struct rd_dev *rd_dev = RD_DEV(dev);
+
+ kfree(rd_dev);
+}
+
static void rd_free_device(struct se_device *dev)
{
+ call_rcu(&dev->rcu_head, rd_dev_call_rcu);
+}
+
+static void rd_destroy_device(struct se_device *dev)
+{
struct rd_dev *rd_dev = RD_DEV(dev);
rd_release_device_space(rd_dev);
- kfree(rd_dev);
}
static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
@@ -279,12 +356,67 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL;
}
+static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
+{
+ struct rd_dev_sg_table *sg_table;
+ u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
+ sizeof(struct scatterlist));
+
+ i = page / sg_per_table;
+ if (i < rd_dev->sg_prot_count) {
+ sg_table = &rd_dev->sg_prot_array[i];
+ if ((sg_table->page_start_offset <= page) &&
+ (sg_table->page_end_offset >= page))
+ return sg_table;
+ }
+
+ pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
+ page);
+
+ return NULL;
+}
+
+static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
+{
+ struct se_device *se_dev = cmd->se_dev;
+ struct rd_dev *dev = RD_DEV(se_dev);
+ struct rd_dev_sg_table *prot_table;
+ struct scatterlist *prot_sg;
+ u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
+ u32 prot_offset, prot_page;
+ u32 prot_npages __maybe_unused;
+ u64 tmp;
+ sense_reason_t rc = 0;
+
+ tmp = cmd->t_task_lba * se_dev->prot_length;
+ prot_offset = do_div(tmp, PAGE_SIZE);
+ prot_page = tmp;
+
+ prot_table = rd_get_prot_table(dev, prot_page);
+ if (!prot_table)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ prot_sg = &prot_table->sg_table[prot_page -
+ prot_table->page_start_offset];
+
+ if (se_dev->dev_attrib.pi_prot_verify) {
+ if (is_read)
+ rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
+ prot_sg, prot_offset);
+ else
+ rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
+ cmd->t_prot_sg, 0);
+ }
+ if (!rc)
+ sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);
+
+ return rc;
+}
+
static sense_reason_t
-rd_execute_rw(struct se_cmd *cmd)
+rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ enum dma_data_direction data_direction)
{
- struct scatterlist *sgl = cmd->t_data_sg;
- u32 sgl_nents = cmd->t_data_nents;
- enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *se_dev = cmd->se_dev;
struct rd_dev *dev = RD_DEV(se_dev);
struct rd_dev_sg_table *table;
@@ -295,6 +427,7 @@ rd_execute_rw(struct se_cmd *cmd)
u32 rd_page;
u32 src_len;
u64 tmp;
+ sense_reason_t rc;
if (dev->rd_flags & RDF_NULLIO) {
target_complete_cmd(cmd, SAM_STAT_GOOD);
@@ -317,6 +450,13 @@ rd_execute_rw(struct se_cmd *cmd)
data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
cmd->t_task_lba, rd_size, rd_page, rd_offset);
+ if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
+ data_direction == DMA_TO_DEVICE) {
+ rc = rd_do_prot_rw(cmd, false);
+ if (rc)
+ return rc;
+ }
+
src_len = PAGE_SIZE - rd_offset;
sg_miter_start(&m, sgl, sgl_nents,
data_direction == DMA_FROM_DEVICE ?
@@ -378,17 +518,25 @@ rd_execute_rw(struct se_cmd *cmd)
}
sg_miter_stop(&m);
+ if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
+ data_direction == DMA_FROM_DEVICE) {
+ rc = rd_do_prot_rw(cmd, true);
+ if (rc)
+ return rc;
+ }
+
target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
enum {
- Opt_rd_pages, Opt_rd_nullio, Opt_err
+ Opt_rd_pages, Opt_rd_nullio, Opt_rd_dummy, Opt_err
};
static match_table_t tokens = {
{Opt_rd_pages, "rd_pages=%d"},
{Opt_rd_nullio, "rd_nullio=%d"},
+ {Opt_rd_dummy, "rd_dummy=%d"},
{Opt_err, NULL}
};
@@ -398,7 +546,7 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
struct rd_dev *rd_dev = RD_DEV(dev);
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
- int ret = 0, arg, token;
+ int arg, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -427,13 +575,21 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
rd_dev->rd_flags |= RDF_NULLIO;
break;
+ case Opt_rd_dummy:
+ match_int(args, &arg);
+ if (arg != 1)
+ break;
+
+ pr_debug("RAMDISK: Setting DUMMY flag: %d\n", arg);
+ rd_dev->rd_flags |= RDF_DUMMY;
+ break;
default:
break;
}
}
kfree(orig);
- return (!ret) ? count : ret;
+ return count;
}
static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
@@ -443,12 +599,22 @@ static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
rd_dev->rd_dev_id);
bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
- " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count,
+ " SG_table_count: %u nullio: %d dummy: %d\n",
+ rd_dev->rd_page_count,
PAGE_SIZE, rd_dev->sg_table_count,
- !!(rd_dev->rd_flags & RDF_NULLIO));
+ !!(rd_dev->rd_flags & RDF_NULLIO),
+ !!(rd_dev->rd_flags & RDF_DUMMY));
return bl;
}
+static u32 rd_get_device_type(struct se_device *dev)
+{
+ if (RD_DEV(dev)->rd_flags & RDF_DUMMY)
+ return 0x3f; /* Unknown device type, not connected */
+ else
+ return sbc_get_device_type(dev);
+}
+
static sector_t rd_get_blocks(struct se_device *dev)
{
struct rd_dev *rd_dev = RD_DEV(dev);
@@ -459,46 +625,60 @@ static sector_t rd_get_blocks(struct se_device *dev)
return blocks_long;
}
-static struct sbc_ops rd_sbc_ops = {
+static int rd_init_prot(struct se_device *dev)
+{
+ struct rd_dev *rd_dev = RD_DEV(dev);
+
+ if (!dev->dev_attrib.pi_prot_type)
+ return 0;
+
+ return rd_build_prot_space(rd_dev, dev->prot_length,
+ dev->dev_attrib.block_size);
+}
+
+static void rd_free_prot(struct se_device *dev)
+{
+ struct rd_dev *rd_dev = RD_DEV(dev);
+
+ rd_release_prot_space(rd_dev);
+}
+
+static struct exec_cmd_ops rd_exec_cmd_ops = {
.execute_rw = rd_execute_rw,
};
static sense_reason_t
rd_parse_cdb(struct se_cmd *cmd)
{
- return sbc_parse_cdb(cmd, &rd_sbc_ops);
+ return sbc_parse_cdb(cmd, &rd_exec_cmd_ops);
}
-static struct se_subsystem_api rd_mcp_template = {
+static const struct target_backend_ops rd_mcp_ops = {
.name = "rd_mcp",
.inquiry_prod = "RAMDISK-MCP",
.inquiry_rev = RD_MCP_VERSION,
- .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
.attach_hba = rd_attach_hba,
.detach_hba = rd_detach_hba,
.alloc_device = rd_alloc_device,
.configure_device = rd_configure_device,
+ .destroy_device = rd_destroy_device,
.free_device = rd_free_device,
.parse_cdb = rd_parse_cdb,
.set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params,
- .get_device_type = sbc_get_device_type,
+ .get_device_type = rd_get_device_type,
.get_blocks = rd_get_blocks,
+ .init_prot = rd_init_prot,
+ .free_prot = rd_free_prot,
+ .tb_dev_attrib_attrs = sbc_attrib_attrs,
};
int __init rd_module_init(void)
{
- int ret;
-
- ret = transport_subsystem_register(&rd_mcp_template);
- if (ret < 0) {
- return ret;
- }
-
- return 0;
+ return transport_backend_register(&rd_mcp_ops);
}
void rd_module_exit(void)
{
- transport_subsystem_release(&rd_mcp_template);
+ target_backend_unregister(&rd_mcp_ops);
}
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 1789d1e14395..9ffda5c4b584 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -1,6 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef TARGET_CORE_RD_H
#define TARGET_CORE_RD_H
+#include <linux/module.h>
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
#define RD_HBA_VERSION "v4.0"
#define RD_MCP_VERSION "4.0"
@@ -23,6 +28,7 @@ struct rd_dev_sg_table {
#define RDF_HAS_PAGE_COUNT 0x01
#define RDF_NULLIO 0x02
+#define RDF_DUMMY 0x04
struct rd_dev {
struct se_device dev;
@@ -33,8 +39,12 @@ struct rd_dev {
u32 rd_page_count;
/* Number of SG tables in sg_table_array */
u32 sg_table_count;
+ /* Number of SG tables in sg_prot_array */
+ u32 sg_prot_count;
/* Array of rd_dev_sg_table_t containing scatterlists */
struct rd_dev_sg_table *sg_table_array;
+ /* Array of rd_dev_sg_table containing protection scatterlists */
+ struct rd_dev_sg_table *sg_prot_array;
/* Ramdisk HBA device is connected to */
struct rd_host *rd_host;
} ____cacheline_aligned;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 8a462773d0c8..abe91dc8722e 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1,30 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SCSI Block Commands (SBC) parsing and emulation.
*
- * (c) Copyright 2002-2012 RisingTide Systems LLC.
+ * (c) Copyright 2002-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ratelimit.h>
-#include <asm/unaligned.h>
-#include <scsi/scsi.h>
+#include <linux/crc-t10dif.h>
+#include <linux/t10-pi.h>
+#include <linux/unaligned.h>
+#include <scsi/scsi_proto.h>
+#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
@@ -32,7 +22,11 @@
#include "target_core_internal.h"
#include "target_core_ua.h"
+#include "target_core_alua.h"
+static sense_reason_t
+sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char, u32, bool);
+static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd);
static sense_reason_t
sbc_emulate_readcapacity(struct se_cmd *cmd)
@@ -64,14 +58,8 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
else
blocks = (u32)blocks_long;
- buf[0] = (blocks >> 24) & 0xff;
- buf[1] = (blocks >> 16) & 0xff;
- buf[2] = (blocks >> 8) & 0xff;
- buf[3] = blocks & 0xff;
- buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
- buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
- buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
- buf[7] = dev->dev_attrib.block_size & 0xff;
+ put_unaligned_be32(blocks, &buf[0]);
+ put_unaligned_be32(dev->dev_attrib.block_size, &buf[4]);
rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
@@ -79,7 +67,7 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
transport_kunmap_data_sg(cmd);
}
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 8);
return 0;
}
@@ -87,29 +75,54 @@ static sense_reason_t
sbc_emulate_readcapacity_16(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+ int pi_prot_type = dev->dev_attrib.pi_prot_type;
+
unsigned char *rbuf;
unsigned char buf[32];
unsigned long long blocks = dev->transport->get_blocks(dev);
memset(buf, 0, sizeof(buf));
- buf[0] = (blocks >> 56) & 0xff;
- buf[1] = (blocks >> 48) & 0xff;
- buf[2] = (blocks >> 40) & 0xff;
- buf[3] = (blocks >> 32) & 0xff;
- buf[4] = (blocks >> 24) & 0xff;
- buf[5] = (blocks >> 16) & 0xff;
- buf[6] = (blocks >> 8) & 0xff;
- buf[7] = blocks & 0xff;
- buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
- buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
- buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
- buf[11] = dev->dev_attrib.block_size & 0xff;
+ put_unaligned_be64(blocks, &buf[0]);
+ put_unaligned_be32(dev->dev_attrib.block_size, &buf[8]);
+ /*
+ * Set P_TYPE and PROT_EN bits for DIF support
+ */
+ if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+ /*
+ * Only override a device's pi_prot_type if no T10-PI is
+ * available, and sess_prot_type has been explicitly enabled.
+ */
+ if (!pi_prot_type)
+ pi_prot_type = sess->sess_prot_type;
+
+ if (pi_prot_type)
+ buf[12] = (pi_prot_type - 1) << 1 | 0x1;
+ }
+
+ if (dev->transport->get_lbppbe)
+ buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
+
+ if (dev->transport->get_alignment_offset_lbas) {
+ u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
+
+ put_unaligned_be16(lalba, &buf[14]);
+ }
+
/*
* Set Thin Provisioning Enable bit following sbc3r22 in section
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
*/
- if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
- buf[14] = 0x80;
+ if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) {
+ buf[14] |= 0x80;
+
+ /*
+ * LBPRZ signifies that zeroes will be read back from an LBA after
+ * an UNMAP or WRITE SAME w/ unmap bit (sbc3r36 5.16.2)
+ */
+ if (dev->dev_attrib.unmap_zeroes_data)
+ buf[14] |= 0x40;
+ }
rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
@@ -117,7 +130,39 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
transport_kunmap_data_sg(cmd);
}
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 32);
+ return 0;
+}
+
+static sense_reason_t
+sbc_emulate_startstop(struct se_cmd *cmd)
+{
+ unsigned char *cdb = cmd->t_task_cdb;
+
+ /*
+ * See sbc3r36 section 5.25
+ * Immediate bit should be set since there is nothing to complete
+ * POWER CONDITION MODIFIER 0h
+ */
+ if (!(cdb[1] & 1) || cdb[2] || cdb[3])
+ return TCM_INVALID_CDB_FIELD;
+
+ /*
+ * See sbc3r36 section 5.25
+ * POWER CONDITION 0h START_VALID - process START and LOEJ
+ */
+ if (cdb[4] >> 4 & 0xf)
+ return TCM_INVALID_CDB_FIELD;
+
+ /*
+ * See sbc3r36 section 5.25
+ * LOEJ 0h - nothing to load or unload
+ * START 1h - we are ready
+ */
+ if (!(cdb[4] & 1) || (cdb[4] & 2) || (cdb[4] & 4))
+ return TCM_INVALID_CDB_FIELD;
+
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
@@ -145,33 +190,32 @@ sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
EXPORT_SYMBOL(sbc_get_write_same_sectors);
static sense_reason_t
-sbc_emulate_noop(struct se_cmd *cmd)
+sbc_execute_write_same_unmap(struct se_cmd *cmd)
{
- target_complete_cmd(cmd, GOOD);
+ struct exec_cmd_ops *ops = cmd->protocol_data;
+ sector_t nolb = sbc_get_write_same_sectors(cmd);
+ sense_reason_t ret;
+
+ if (nolb) {
+ ret = ops->execute_unmap(cmd, cmd->t_task_lba, nolb);
+ if (ret)
+ return ret;
+ }
+
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
-static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
+static sense_reason_t
+sbc_emulate_noop(struct se_cmd *cmd)
{
- return cmd->se_dev->dev_attrib.block_size * sectors;
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
}
-static int sbc_check_valid_sectors(struct se_cmd *cmd)
+static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
{
- struct se_device *dev = cmd->se_dev;
- unsigned long long end_lba;
- u32 sectors;
-
- sectors = cmd->data_length / dev->dev_attrib.block_size;
- end_lba = dev->transport->get_blocks(dev) + 1;
-
- if (cmd->t_task_lba + sectors > end_lba) {
- pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
- cmd->t_task_lba, sectors, end_lba);
- return -EINVAL;
- }
-
- return 0;
+ return cmd->se_dev->dev_attrib.block_size * sectors;
}
static inline u32 transport_get_sectors_6(unsigned char *cdb)
@@ -189,18 +233,17 @@ static inline u32 transport_get_sectors_6(unsigned char *cdb)
static inline u32 transport_get_sectors_10(unsigned char *cdb)
{
- return (u32)(cdb[7] << 8) + cdb[8];
+ return get_unaligned_be16(&cdb[7]);
}
static inline u32 transport_get_sectors_12(unsigned char *cdb)
{
- return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
+ return get_unaligned_be32(&cdb[6]);
}
static inline u32 transport_get_sectors_16(unsigned char *cdb)
{
- return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
- (cdb[12] << 8) + cdb[13];
+ return get_unaligned_be32(&cdb[10]);
}
/*
@@ -208,50 +251,35 @@ static inline u32 transport_get_sectors_16(unsigned char *cdb)
*/
static inline u32 transport_get_sectors_32(unsigned char *cdb)
{
- return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
- (cdb[30] << 8) + cdb[31];
+ return get_unaligned_be32(&cdb[28]);
}
static inline u32 transport_lba_21(unsigned char *cdb)
{
- return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
+ return get_unaligned_be24(&cdb[1]) & 0x1fffff;
}
static inline u32 transport_lba_32(unsigned char *cdb)
{
- return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+ return get_unaligned_be32(&cdb[2]);
}
static inline unsigned long long transport_lba_64(unsigned char *cdb)
{
- unsigned int __v1, __v2;
-
- __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
- __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
-
- return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
-}
-
-/*
- * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
- */
-static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
-{
- unsigned int __v1, __v2;
-
- __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
- __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
-
- return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+ return get_unaligned_be64(&cdb[2]);
}
static sense_reason_t
-sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
+sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags,
+ struct exec_cmd_ops *ops)
{
+ struct se_device *dev = cmd->se_dev;
+ sector_t end_lba = dev->transport->get_blocks(dev) + 1;
unsigned int sectors = sbc_get_write_same_sectors(cmd);
+ sense_reason_t ret;
- if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
+ if ((flags & 0x04) || (flags & 0x02)) {
pr_err("WRITE_SAME PBDATA and LBDATA"
" bits not supported for Block Discard"
" Emulation\n");
@@ -263,79 +291,524 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
return TCM_INVALID_CDB_FIELD;
}
/*
+ * Sanity check for LBA wrap and request past end of device.
+ */
+ if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
+ ((cmd->t_task_lba + sectors) > end_lba)) {
+ pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
+ (unsigned long long)end_lba, cmd->t_task_lba, sectors);
+ return TCM_ADDRESS_OUT_OF_RANGE;
+ }
+
+ /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
+ if (flags & 0x10) {
+ pr_warn("WRITE SAME with ANCHOR not supported\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ if (flags & 0x01) {
+ pr_warn("WRITE SAME with NDOB not supported\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ /*
* Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
* translated into block discard requests within backend code.
*/
- if (flags[0] & 0x08) {
- if (!ops->execute_write_same_unmap)
+ if (flags & 0x08) {
+ if (!ops->execute_unmap)
return TCM_UNSUPPORTED_SCSI_OPCODE;
- cmd->execute_cmd = ops->execute_write_same_unmap;
+ if (!dev->dev_attrib.emulate_tpws) {
+ pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device"
+ " has emulate_tpws disabled\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+ cmd->execute_cmd = sbc_execute_write_same_unmap;
return 0;
}
if (!ops->execute_write_same)
return TCM_UNSUPPORTED_SCSI_OPCODE;
+ ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true);
+ if (ret)
+ return ret;
+
cmd->execute_cmd = ops->execute_write_same;
return 0;
}
-static void xdreadwrite_callback(struct se_cmd *cmd)
+static sense_reason_t
+sbc_execute_rw(struct se_cmd *cmd)
{
- unsigned char *buf, *addr;
+ struct exec_cmd_ops *ops = cmd->protocol_data;
+
+ return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
+ cmd->data_direction);
+}
+
+static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
+ int *post_ret)
+{
+ struct se_device *dev = cmd->se_dev;
+ sense_reason_t ret = TCM_NO_SENSE;
+
+ spin_lock_irq(&cmd->t_state_lock);
+ if (success) {
+ *post_ret = 1;
+
+ if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ spin_unlock_irq(&cmd->t_state_lock);
+
+ /*
+ * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
+ * before the original READ I/O submission.
+ */
+ up(&dev->caw_sem);
+
+ return ret;
+}
+
+/*
+ * compare @cmp_len bytes of @read_sgl with @cmp_sgl. On miscompare, fill
+ * @miscmp_off and return TCM_MISCOMPARE_VERIFY.
+ */
+static sense_reason_t
+compare_and_write_do_cmp(struct scatterlist *read_sgl, unsigned int read_nents,
+ struct scatterlist *cmp_sgl, unsigned int cmp_nents,
+ unsigned int cmp_len, unsigned int *miscmp_off)
+{
+ unsigned char *buf = NULL;
struct scatterlist *sg;
+ sense_reason_t ret;
unsigned int offset;
+ size_t rc;
+ int sg_cnt;
+
+ buf = kzalloc(cmp_len, GFP_KERNEL);
+ if (!buf) {
+ ret = TCM_OUT_OF_RESOURCES;
+ goto out;
+ }
+
+ rc = sg_copy_to_buffer(cmp_sgl, cmp_nents, buf, cmp_len);
+ if (!rc) {
+ pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
+ ret = TCM_OUT_OF_RESOURCES;
+ goto out;
+ }
+ /*
+ * Compare SCSI READ payload against verify payload
+ */
+ offset = 0;
+ ret = TCM_NO_SENSE;
+ for_each_sg(read_sgl, sg, read_nents, sg_cnt) {
+ unsigned int len = min(sg->length, cmp_len);
+ unsigned char *addr = kmap_atomic(sg_page(sg));
+
+ if (memcmp(addr, buf + offset, len)) {
+ unsigned int i;
+
+ for (i = 0; i < len && addr[i] == buf[offset + i]; i++)
+ ;
+ *miscmp_off = offset + i;
+ pr_warn("Detected MISCOMPARE at offset %u\n",
+ *miscmp_off);
+ ret = TCM_MISCOMPARE_VERIFY;
+ }
+ kunmap_atomic(addr);
+ if (ret != TCM_NO_SENSE)
+ goto out;
+
+ offset += len;
+ cmp_len -= len;
+ if (!cmp_len)
+ break;
+ }
+ pr_debug("COMPARE AND WRITE read data matches compare data\n");
+out:
+ kfree(buf);
+ return ret;
+}
+
+static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
+ int *post_ret)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct sg_table write_tbl = { };
+ struct scatterlist *write_sg;
+ struct sg_mapping_iter m;
+ unsigned int len;
+ unsigned int block_size = dev->dev_attrib.block_size;
+ unsigned int compare_len = (cmd->t_task_nolb * block_size);
+ unsigned int miscmp_off = 0;
+ sense_reason_t ret = TCM_NO_SENSE;
int i;
- int count;
+
+ if (!success) {
+ /*
+ * Handle early failure in transport_generic_request_failure(),
+ * which will not have taken ->caw_sem yet..
+ */
+ if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
+ return TCM_NO_SENSE;
+
+ /*
+ * The command has been stopped or aborted so
+ * we don't have to perform the write operation.
+ */
+ WARN_ON(!(cmd->transport_state &
+ (CMD_T_ABORTED | CMD_T_STOP)));
+ goto out;
+ }
/*
- * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
- *
- * 1) read the specified logical block(s);
- * 2) transfer logical blocks from the data-out buffer;
- * 3) XOR the logical blocks transferred from the data-out buffer with
- * the logical blocks read, storing the resulting XOR data in a buffer;
- * 4) if the DISABLE WRITE bit is set to zero, then write the logical
- * blocks transferred from the data-out buffer; and
- * 5) transfer the resulting XOR data to the data-in buffer.
+ * Handle special case for zero-length COMPARE_AND_WRITE
*/
- buf = kmalloc(cmd->data_length, GFP_KERNEL);
- if (!buf) {
- pr_err("Unable to allocate xor_callback buf\n");
- return;
+ if (!cmd->data_length)
+ goto out;
+ /*
+ * Immediately exit + release dev->caw_sem if command has already
+ * been failed with a non-zero SCSI status.
+ */
+ if (cmd->scsi_status) {
+ pr_debug("compare_and_write_callback: non zero scsi_status:"
+ " 0x%02x\n", cmd->scsi_status);
+ *post_ret = 1;
+ if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out;
+ }
+
+ ret = compare_and_write_do_cmp(cmd->t_bidi_data_sg,
+ cmd->t_bidi_data_nents,
+ cmd->t_data_sg,
+ cmd->t_data_nents,
+ compare_len,
+ &miscmp_off);
+ if (ret == TCM_MISCOMPARE_VERIFY) {
+ /*
+ * SBC-4 r15: 5.3 COMPARE AND WRITE command
+ * In the sense data (see 4.18 and SPC-5) the offset from the
+ * start of the Data-Out Buffer to the first byte of data that
+ * was not equal shall be reported in the INFORMATION field.
+ */
+ cmd->sense_info = miscmp_off;
+ goto out;
+ } else if (ret)
+ goto out;
+
+ if (sg_alloc_table(&write_tbl, cmd->t_data_nents, GFP_KERNEL) < 0) {
+ pr_err("Unable to allocate compare_and_write sg\n");
+ ret = TCM_OUT_OF_RESOURCES;
+ goto out;
}
+ write_sg = write_tbl.sgl;
+
+ i = 0;
+ len = compare_len;
+ sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
/*
- * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
- * into the locally allocated *buf
+ * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
*/
- sg_copy_to_buffer(cmd->t_data_sg,
- cmd->t_data_nents,
- buf,
- cmd->data_length);
+ while (len) {
+ sg_miter_next(&m);
+ if (block_size < PAGE_SIZE) {
+ sg_set_page(&write_sg[i], m.page, block_size,
+ m.piter.sg->offset + block_size);
+ } else {
+ sg_miter_next(&m);
+ sg_set_page(&write_sg[i], m.page, block_size,
+ m.piter.sg->offset);
+ }
+ len -= block_size;
+ i++;
+ }
+ sg_miter_stop(&m);
/*
- * Now perform the XOR against the BIDI read memory located at
- * cmd->t_mem_bidi_list
+ * Save the original SGL + nents values before updating to new
+ * assignments, to be released in transport_free_pages() ->
+ * transport_reset_sgl_orig()
*/
+ cmd->t_data_sg_orig = cmd->t_data_sg;
+ cmd->t_data_sg = write_sg;
+ cmd->t_data_nents_orig = cmd->t_data_nents;
+ cmd->t_data_nents = 1;
- offset = 0;
- for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
- addr = kmap_atomic(sg_page(sg));
- if (!addr)
- goto out;
+ cmd->sam_task_attr = TCM_HEAD_TAG;
+ cmd->transport_complete_callback = compare_and_write_post;
+ /*
+ * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
+ * for submitting the adjusted SGL to write instance user-data.
+ */
+ cmd->execute_cmd = sbc_execute_rw;
- for (i = 0; i < sg->length; i++)
- *(addr + sg->offset + i) ^= *(buf + offset + i);
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->t_state = TRANSPORT_PROCESSING;
+ cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
+ spin_unlock_irq(&cmd->t_state_lock);
- offset += sg->length;
- kunmap_atomic(addr);
- }
+ __target_execute_cmd(cmd, false);
+
+ return ret;
out:
- kfree(buf);
+ /*
+ * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
+ * sbc_compare_and_write() before the original READ I/O submission.
+ */
+ up(&dev->caw_sem);
+ sg_free_table(&write_tbl);
+ return ret;
+}
+
+static sense_reason_t
+sbc_compare_and_write(struct se_cmd *cmd)
+{
+ struct exec_cmd_ops *ops = cmd->protocol_data;
+ struct se_device *dev = cmd->se_dev;
+ sense_reason_t ret;
+ int rc;
+ /*
+ * Submit the READ first for COMPARE_AND_WRITE to perform the
+ * comparision using SGLs at cmd->t_bidi_data_sg..
+ */
+ rc = down_interruptible(&dev->caw_sem);
+ if (rc != 0) {
+ cmd->transport_complete_callback = NULL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+ /*
+ * Reset cmd->data_length to individual block_size in order to not
+ * confuse backend drivers that depend on this value matching the
+ * size of the I/O being submitted.
+ */
+ cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
+
+ ret = ops->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
+ DMA_FROM_DEVICE);
+ if (ret) {
+ cmd->transport_complete_callback = NULL;
+ up(&dev->caw_sem);
+ return ret;
+ }
+ /*
+ * Unlock of dev->caw_sem to occur in compare_and_write_callback()
+ * upon MISCOMPARE, or in compare_and_write_done() upon completion
+ * of WRITE instance user-data.
+ */
+ return TCM_NO_SENSE;
+}
+
+static int
+sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type,
+ bool is_write, struct se_cmd *cmd)
+{
+ if (is_write) {
+ cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP :
+ protect ? TARGET_PROT_DOUT_PASS :
+ TARGET_PROT_DOUT_INSERT;
+ switch (protect) {
+ case 0x0:
+ case 0x3:
+ cmd->prot_checks = 0;
+ break;
+ case 0x1:
+ case 0x5:
+ cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+ if (prot_type == TARGET_DIF_TYPE1_PROT)
+ cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
+ break;
+ case 0x2:
+ if (prot_type == TARGET_DIF_TYPE1_PROT)
+ cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
+ break;
+ case 0x4:
+ cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+ break;
+ default:
+ pr_err("Unsupported protect field %d\n", protect);
+ return -EINVAL;
+ }
+ } else {
+ cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT :
+ protect ? TARGET_PROT_DIN_PASS :
+ TARGET_PROT_DIN_STRIP;
+ switch (protect) {
+ case 0x0:
+ case 0x1:
+ case 0x5:
+ cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+ if (prot_type == TARGET_DIF_TYPE1_PROT)
+ cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
+ break;
+ case 0x2:
+ if (prot_type == TARGET_DIF_TYPE1_PROT)
+ cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
+ break;
+ case 0x3:
+ cmd->prot_checks = 0;
+ break;
+ case 0x4:
+ cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+ break;
+ default:
+ pr_err("Unsupported protect field %d\n", protect);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static sense_reason_t
+sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect,
+ u32 sectors, bool is_write)
+{
+ int sp_ops = cmd->se_sess->sup_prot_ops;
+ int pi_prot_type = dev->dev_attrib.pi_prot_type;
+ bool fabric_prot = false;
+
+ if (!cmd->t_prot_sg || !cmd->t_prot_nents) {
+ if (unlikely(protect &&
+ !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) {
+ pr_err("CDB contains protect bit, but device + fabric does"
+ " not advertise PROTECT=1 feature bit\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+ if (cmd->prot_pto)
+ return TCM_NO_SENSE;
+ }
+
+ switch (dev->dev_attrib.pi_prot_type) {
+ case TARGET_DIF_TYPE3_PROT:
+ cmd->reftag_seed = 0xffffffff;
+ break;
+ case TARGET_DIF_TYPE2_PROT:
+ if (protect)
+ return TCM_INVALID_CDB_FIELD;
+
+ cmd->reftag_seed = cmd->t_task_lba;
+ break;
+ case TARGET_DIF_TYPE1_PROT:
+ cmd->reftag_seed = cmd->t_task_lba;
+ break;
+ case TARGET_DIF_TYPE0_PROT:
+ /*
+ * See if the fabric supports T10-PI, and the session has been
+ * configured to allow export PROTECT=1 feature bit with backend
+ * devices that don't support T10-PI.
+ */
+ fabric_prot = is_write ?
+ !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) :
+ !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT));
+
+ if (fabric_prot && cmd->se_sess->sess_prot_type) {
+ pi_prot_type = cmd->se_sess->sess_prot_type;
+ break;
+ }
+ if (!protect)
+ return TCM_NO_SENSE;
+ fallthrough;
+ default:
+ pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
+ "PROTECT: 0x%02x\n", cmd->t_task_cdb[0], protect);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd))
+ return TCM_INVALID_CDB_FIELD;
+
+ cmd->prot_type = pi_prot_type;
+ cmd->prot_length = dev->prot_length * sectors;
+
+ /**
+ * In case protection information exists over the wire
+ * we modify command data length to describe pure data.
+ * The actual transfer length is data length + protection
+ * length
+ **/
+ if (protect)
+ cmd->data_length = sectors * dev->dev_attrib.block_size;
+
+ pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d "
+ "prot_op=%d prot_checks=%d\n",
+ __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
+ cmd->prot_op, cmd->prot_checks);
+
+ return TCM_NO_SENSE;
+}
+
+static int
+sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
+{
+ if (cdb[1] & 0x10) {
+ /* see explanation in spc_emulate_modesense */
+ if (!target_check_fua(dev)) {
+ pr_err("Got CDB: 0x%02x with DPO bit set, but device"
+ " does not advertise support for DPO\n", cdb[0]);
+ return -EINVAL;
+ }
+ }
+ if (cdb[1] & 0x8) {
+ if (!target_check_fua(dev)) {
+ pr_err("Got CDB: 0x%02x with FUA bit set, but device"
+ " does not advertise support for FUA write\n",
+ cdb[0]);
+ return -EINVAL;
+ }
+ cmd->se_cmd_flags |= SCF_FUA;
+ }
+ return 0;
+}
+
+static sense_reason_t
+sbc_check_atomic(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
+{
+ struct se_dev_attrib *attrib = &dev->dev_attrib;
+ u16 boundary, transfer_len;
+ u64 lba;
+
+ lba = transport_lba_64(cdb);
+ boundary = get_unaligned_be16(&cdb[10]);
+ transfer_len = get_unaligned_be16(&cdb[12]);
+
+ if (!attrib->atomic_max_len)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ if (boundary) {
+ if (transfer_len > attrib->atomic_max_with_boundary)
+ return TCM_INVALID_CDB_FIELD;
+
+ if (boundary > attrib->atomic_max_boundary)
+ return TCM_INVALID_CDB_FIELD;
+ } else {
+ if (transfer_len > attrib->atomic_max_len)
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ if (attrib->atomic_granularity) {
+ if (transfer_len % attrib->atomic_granularity)
+ return TCM_INVALID_CDB_FIELD;
+
+ if (boundary && boundary % attrib->atomic_granularity)
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ if (dev->dev_attrib.atomic_alignment) {
+ u64 _lba = lba;
+
+ if (do_div(_lba, dev->dev_attrib.atomic_alignment))
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ return 0;
}
sense_reason_t
-sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
{
struct se_device *dev = cmd->se_dev;
unsigned char *cdb = cmd->t_task_cdb;
@@ -343,102 +816,119 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
u32 sectors = 0;
sense_reason_t ret;
+ cmd->protocol_data = ops;
+
switch (cdb[0]) {
case READ_6:
sectors = transport_get_sectors_6(cdb);
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_cmd = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
break;
case READ_10:
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
+
+ if (sbc_check_dpofua(dev, cmd, cdb))
+ return TCM_INVALID_CDB_FIELD;
+
+ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
+ if (ret)
+ return ret;
+
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_cmd = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
break;
case READ_12:
sectors = transport_get_sectors_12(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
+
+ if (sbc_check_dpofua(dev, cmd, cdb))
+ return TCM_INVALID_CDB_FIELD;
+
+ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
+ if (ret)
+ return ret;
+
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_cmd = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
break;
case READ_16:
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
+
+ if (sbc_check_dpofua(dev, cmd, cdb))
+ return TCM_INVALID_CDB_FIELD;
+
+ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
+ if (ret)
+ return ret;
+
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_cmd = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
break;
case WRITE_6:
sectors = transport_get_sectors_6(cdb);
cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_cmd = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
break;
case WRITE_10:
case WRITE_VERIFY:
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
+
+ if (sbc_check_dpofua(dev, cmd, cdb))
+ return TCM_INVALID_CDB_FIELD;
+
+ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
+ if (ret)
+ return ret;
+
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_cmd = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
break;
case WRITE_12:
sectors = transport_get_sectors_12(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
+
+ if (sbc_check_dpofua(dev, cmd, cdb))
+ return TCM_INVALID_CDB_FIELD;
+
+ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
+ if (ret)
+ return ret;
+
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_cmd = ops->execute_rw;
+ cmd->execute_cmd = sbc_execute_rw;
break;
case WRITE_16:
+ case WRITE_VERIFY_16:
+ case WRITE_ATOMIC_16:
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
- cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
- cmd->execute_cmd = ops->execute_rw;
- break;
- case XDWRITEREAD_10:
- if (cmd->data_direction != DMA_TO_DEVICE ||
- !(cmd->se_cmd_flags & SCF_BIDI))
+
+ if (sbc_check_dpofua(dev, cmd, cdb))
return TCM_INVALID_CDB_FIELD;
- sectors = transport_get_sectors_10(cdb);
- cmd->t_task_lba = transport_lba_32(cdb);
+ ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
+ if (ret)
+ return ret;
+
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ if (cdb[0] == WRITE_ATOMIC_16) {
+ cmd->se_cmd_flags |= SCF_ATOMIC;
- /*
- * Setup BIDI XOR callback to be run after I/O completion.
- */
- cmd->execute_cmd = ops->execute_rw;
- cmd->transport_complete_callback = &xdreadwrite_callback;
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
+ ret = sbc_check_atomic(dev, cmd, cdb);
+ if (ret)
+ return ret;
+ }
+ cmd->execute_cmd = sbc_execute_rw;
break;
case VARIABLE_LENGTH_CMD:
{
u16 service_action = get_unaligned_be16(&cdb[8]);
switch (service_action) {
- case XDWRITEREAD_32:
- sectors = transport_get_sectors_32(cdb);
-
- /*
- * Use WRITE_32 and READ_32 opcodes for the emulated
- * XDWRITE_READ_32 logic.
- */
- cmd->t_task_lba = transport_lba_64_ext(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-
- /*
- * Setup BIDI XOR callback to be run during after I/O
- * completion.
- */
- cmd->execute_cmd = ops->execute_rw;
- cmd->transport_complete_callback = &xdreadwrite_callback;
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
- break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb);
if (!sectors) {
@@ -450,7 +940,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
- ret = sbc_setup_write_same(cmd, &cdb[10], ops);
+ ret = sbc_setup_write_same(cmd, cdb[10], ops);
if (ret)
return ret;
break;
@@ -461,34 +951,58 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
}
break;
}
+ case COMPARE_AND_WRITE:
+ if (!dev->dev_attrib.emulate_caw) {
+ pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject COMPARE_AND_WRITE\n",
+ dev->se_hba->backend->ops->name,
+ config_item_name(&dev->dev_group.cg_item),
+ dev->t10_wwn.unit_serial);
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+ sectors = cdb[13];
+ /*
+ * Currently enforce COMPARE_AND_WRITE for a single sector
+ */
+ if (sectors > 1) {
+ pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
+ " than 1\n", sectors);
+ return TCM_INVALID_CDB_FIELD;
+ }
+ if (sbc_check_dpofua(dev, cmd, cdb))
+ return TCM_INVALID_CDB_FIELD;
+
+ /*
+ * Double size because we have two buffers, note that
+ * zero is not an error..
+ */
+ size = 2 * sbc_get_size(cmd, sectors);
+ cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
+ cmd->t_task_nolb = sectors;
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
+ cmd->execute_cmd = sbc_compare_and_write;
+ cmd->transport_complete_callback = compare_and_write_callback;
+ break;
case READ_CAPACITY:
size = READ_CAP_LEN;
cmd->execute_cmd = sbc_emulate_readcapacity;
break;
- case SERVICE_ACTION_IN:
+ case SERVICE_ACTION_IN_16:
switch (cmd->t_task_cdb[1] & 0x1f) {
case SAI_READ_CAPACITY_16:
cmd->execute_cmd = sbc_emulate_readcapacity_16;
break;
+ case SAI_REPORT_REFERRALS:
+ cmd->execute_cmd = target_emulate_report_referrals;
+ break;
default:
pr_err("Unsupported SA: 0x%02x\n",
cmd->t_task_cdb[1] & 0x1f);
return TCM_INVALID_CDB_FIELD;
}
- size = (cdb[10] << 24) | (cdb[11] << 16) |
- (cdb[12] << 8) | cdb[13];
+ size = get_unaligned_be32(&cdb[10]);
break;
case SYNCHRONIZE_CACHE:
case SYNCHRONIZE_CACHE_16:
- if (!ops->execute_sync_cache) {
- size = 0;
- cmd->execute_cmd = sbc_emulate_noop;
- break;
- }
-
- /*
- * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
- */
if (cdb[0] == SYNCHRONIZE_CACHE) {
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
@@ -496,25 +1010,24 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
}
-
- size = sbc_get_size(cmd, sectors);
-
- /*
- * Check to ensure that LBA + Range does not exceed past end of
- * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
- */
- if (cmd->t_task_lba || sectors) {
- if (sbc_check_valid_sectors(cmd) < 0)
- return TCM_ADDRESS_OUT_OF_RANGE;
+ if (ops->execute_sync_cache) {
+ cmd->execute_cmd = ops->execute_sync_cache;
+ goto check_lba;
}
- cmd->execute_cmd = ops->execute_sync_cache;
+ size = 0;
+ cmd->execute_cmd = sbc_emulate_noop;
break;
case UNMAP:
if (!ops->execute_unmap)
return TCM_UNSUPPORTED_SCSI_OPCODE;
+ if (!dev->dev_attrib.emulate_tpu) {
+ pr_err("Got UNMAP, but backend device has"
+ " emulate_tpu disabled\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
size = get_unaligned_be16(&cdb[7]);
- cmd->execute_cmd = ops->execute_unmap;
+ cmd->execute_cmd = sbc_execute_unmap;
break;
case WRITE_SAME_16:
sectors = transport_get_sectors_16(cdb);
@@ -526,7 +1039,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
size = sbc_get_size(cmd, 1);
cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
- ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+ ret = sbc_setup_write_same(cmd, cdb[1], ops);
if (ret)
return ret;
break;
@@ -544,14 +1057,22 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
* Follow sbcr26 with WRITE_SAME (10) and check for the existence
* of byte 1 bit 3 UNMAP instead of original reserved field
*/
- ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+ ret = sbc_setup_write_same(cmd, cdb[1], ops);
if (ret)
return ret;
break;
case VERIFY:
+ case VERIFY_16:
size = 0;
+ if (cdb[0] == VERIFY) {
+ sectors = transport_get_sectors_10(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ } else {
+ sectors = transport_get_sectors_16(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
+ }
cmd->execute_cmd = sbc_emulate_noop;
- break;
+ goto check_lba;
case REZERO_UNIT:
case SEEK_6:
case SEEK_10:
@@ -564,6 +1085,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
size = 0;
cmd->execute_cmd = sbc_emulate_noop;
break;
+ case START_STOP:
+ size = 0;
+ cmd->execute_cmd = sbc_emulate_startstop;
+ break;
default:
ret = spc_parse_cdb(cmd, &size);
if (ret)
@@ -571,36 +1096,23 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
}
/* reject any command that we don't have a handler for */
- if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
+ if (!cmd->execute_cmd)
return TCM_UNSUPPORTED_SCSI_OPCODE;
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
unsigned long long end_lba;
-
- if (sectors > dev->dev_attrib.fabric_max_sectors) {
- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
- " big sectors %u exceeds fabric_max_sectors:"
- " %u\n", cdb[0], sectors,
- dev->dev_attrib.fabric_max_sectors);
- return TCM_INVALID_CDB_FIELD;
- }
- if (sectors > dev->dev_attrib.hw_max_sectors) {
- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
- " big sectors %u exceeds backend hw_max_sectors:"
- " %u\n", cdb[0], sectors,
- dev->dev_attrib.hw_max_sectors);
- return TCM_INVALID_CDB_FIELD;
- }
-
+check_lba:
end_lba = dev->transport->get_blocks(dev) + 1;
- if (cmd->t_task_lba + sectors > end_lba) {
+ if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
+ ((cmd->t_task_lba + sectors) > end_lba)) {
pr_err("cmd exceeds last lba %llu "
"(lba %llu, sectors %u)\n",
end_lba, cmd->t_task_lba, sectors);
return TCM_ADDRESS_OUT_OF_RANGE;
}
- size = sbc_get_size(cmd, sectors);
+ if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
+ size = sbc_get_size(cmd, sectors);
}
return target_cmd_size_check(cmd, size);
@@ -613,12 +1125,10 @@ u32 sbc_get_device_type(struct se_device *dev)
}
EXPORT_SYMBOL(sbc_get_device_type);
-sense_reason_t
-sbc_execute_unmap(struct se_cmd *cmd,
- sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
- sector_t, sector_t),
- void *priv)
+static sense_reason_t
+sbc_execute_unmap(struct se_cmd *cmd)
{
+ struct exec_cmd_ops *ops = cmd->protocol_data;
struct se_device *dev = cmd->se_dev;
unsigned char *buf, *ptr = NULL;
sector_t lba;
@@ -682,9 +1192,11 @@ sbc_execute_unmap(struct se_cmd *cmd,
goto err;
}
- ret = do_unmap_fn(cmd, priv, lba, range);
- if (ret)
- goto err;
+ if (range) {
+ ret = ops->execute_unmap(cmd, lba, range);
+ if (ret)
+ goto err;
+ }
ptr += 16;
size -= 16;
@@ -693,7 +1205,244 @@ sbc_execute_unmap(struct se_cmd *cmd,
err:
transport_kunmap_data_sg(cmd);
if (!ret)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
}
-EXPORT_SYMBOL(sbc_execute_unmap);
+
+void
+sbc_dif_generate(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct t10_pi_tuple *sdt;
+ struct scatterlist *dsg = cmd->t_data_sg, *psg;
+ sector_t sector = cmd->t_task_lba;
+ void *daddr, *paddr;
+ int i, j, offset = 0;
+ unsigned int block_size = dev->dev_attrib.block_size;
+
+ for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+
+ for (j = 0; j < psg->length;
+ j += sizeof(*sdt)) {
+ __u16 crc;
+ unsigned int avail;
+
+ if (offset >= dsg->length) {
+ offset -= dsg->length;
+ kunmap_atomic(daddr - dsg->offset);
+ dsg = sg_next(dsg);
+ if (!dsg) {
+ kunmap_atomic(paddr - psg->offset);
+ return;
+ }
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ }
+
+ sdt = paddr + j;
+ avail = min(block_size, dsg->length - offset);
+ crc = crc_t10dif(daddr + offset, avail);
+ if (avail < block_size) {
+ kunmap_atomic(daddr - dsg->offset);
+ dsg = sg_next(dsg);
+ if (!dsg) {
+ kunmap_atomic(paddr - psg->offset);
+ return;
+ }
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ offset = block_size - avail;
+ crc = crc_t10dif_update(crc, daddr, offset);
+ } else {
+ offset += block_size;
+ }
+
+ sdt->guard_tag = cpu_to_be16(crc);
+ if (cmd->prot_type == TARGET_DIF_TYPE1_PROT)
+ sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
+ sdt->app_tag = 0;
+
+ pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x"
+ " app_tag: 0x%04x ref_tag: %u\n",
+ (cmd->data_direction == DMA_TO_DEVICE) ?
+ "WRITE" : "READ", (unsigned long long)sector,
+ sdt->guard_tag, sdt->app_tag,
+ be32_to_cpu(sdt->ref_tag));
+
+ sector++;
+ }
+
+ kunmap_atomic(daddr - dsg->offset);
+ kunmap_atomic(paddr - psg->offset);
+ }
+}
+
+static sense_reason_t
+sbc_dif_v1_verify(struct se_cmd *cmd, struct t10_pi_tuple *sdt,
+ __u16 crc, sector_t sector, unsigned int ei_lba)
+{
+ __be16 csum;
+
+ if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
+ goto check_ref;
+
+ csum = cpu_to_be16(crc);
+
+ if (sdt->guard_tag != csum) {
+ pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
+ " csum 0x%04x\n", (unsigned long long)sector,
+ be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
+ return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+ }
+
+check_ref:
+ if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG))
+ return 0;
+
+ if (cmd->prot_type == TARGET_DIF_TYPE1_PROT &&
+ be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
+ pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
+ " sector MSB: 0x%08x\n", (unsigned long long)sector,
+ be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
+ return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+ }
+
+ if (cmd->prot_type == TARGET_DIF_TYPE2_PROT &&
+ be32_to_cpu(sdt->ref_tag) != ei_lba) {
+ pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
+ " ei_lba: 0x%08x\n", (unsigned long long)sector,
+ be32_to_cpu(sdt->ref_tag), ei_lba);
+ return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+ }
+
+ return 0;
+}
+
+void sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
+ struct scatterlist *sg, int sg_off)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct scatterlist *psg;
+ void *paddr, *addr;
+ unsigned int i, len, left;
+ unsigned int offset = sg_off;
+
+ if (!sg)
+ return;
+
+ left = sectors * dev->prot_length;
+
+ for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
+ unsigned int psg_len, copied = 0;
+
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+ psg_len = min(left, psg->length);
+ while (psg_len) {
+ len = min(psg_len, sg->length - offset);
+ addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
+
+ if (read)
+ memcpy(paddr + copied, addr, len);
+ else
+ memcpy(addr, paddr + copied, len);
+
+ left -= len;
+ offset += len;
+ copied += len;
+ psg_len -= len;
+
+ kunmap_atomic(addr - sg->offset - offset);
+
+ if (offset >= sg->length) {
+ sg = sg_next(sg);
+ offset = 0;
+ }
+ }
+ kunmap_atomic(paddr - psg->offset);
+ }
+}
+EXPORT_SYMBOL(sbc_dif_copy_prot);
+
+sense_reason_t
+sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+ unsigned int ei_lba, struct scatterlist *psg, int psg_off)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct t10_pi_tuple *sdt;
+ struct scatterlist *dsg = cmd->t_data_sg;
+ sector_t sector = start;
+ void *daddr, *paddr;
+ int i;
+ sense_reason_t rc;
+ int dsg_off = 0;
+ unsigned int block_size = dev->dev_attrib.block_size;
+
+ for (; psg && sector < start + sectors; psg = sg_next(psg)) {
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+
+ for (i = psg_off; i < psg->length &&
+ sector < start + sectors;
+ i += sizeof(*sdt)) {
+ __u16 crc;
+ unsigned int avail;
+
+ if (dsg_off >= dsg->length) {
+ dsg_off -= dsg->length;
+ kunmap_atomic(daddr - dsg->offset);
+ dsg = sg_next(dsg);
+ if (!dsg) {
+ kunmap_atomic(paddr - psg->offset);
+ return 0;
+ }
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ }
+
+ sdt = paddr + i;
+
+ pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
+ " app_tag: 0x%04x ref_tag: %u\n",
+ (unsigned long long)sector, sdt->guard_tag,
+ sdt->app_tag, be32_to_cpu(sdt->ref_tag));
+
+ if (sdt->app_tag == T10_PI_APP_ESCAPE) {
+ dsg_off += block_size;
+ goto next;
+ }
+
+ avail = min(block_size, dsg->length - dsg_off);
+ crc = crc_t10dif(daddr + dsg_off, avail);
+ if (avail < block_size) {
+ kunmap_atomic(daddr - dsg->offset);
+ dsg = sg_next(dsg);
+ if (!dsg) {
+ kunmap_atomic(paddr - psg->offset);
+ return 0;
+ }
+ daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+ dsg_off = block_size - avail;
+ crc = crc_t10dif_update(crc, daddr, dsg_off);
+ } else {
+ dsg_off += block_size;
+ }
+
+ rc = sbc_dif_v1_verify(cmd, sdt, crc, sector, ei_lba);
+ if (rc) {
+ kunmap_atomic(daddr - dsg->offset);
+ kunmap_atomic(paddr - psg->offset);
+ cmd->sense_info = sector;
+ return rc;
+ }
+next:
+ sector++;
+ ei_lba++;
+ }
+
+ psg_off = 0;
+ kunmap_atomic(daddr - dsg->offset);
+ kunmap_atomic(paddr - psg->offset);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sbc_dif_verify);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 4cb667d720a7..fe2b888bcb43 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -1,30 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SCSI Primary Commands (SPC) parsing and emulation.
*
- * (c) Copyright 2002-2012 RisingTide Systems LLC.
+ * (c) Copyright 2002-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
-#include <scsi/scsi.h>
+#include <scsi/scsi_proto.h>
+#include <scsi/scsi_common.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
@@ -35,12 +23,11 @@
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
+#include "target_core_xcopy.h"
-
-static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
+static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
/*
* Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
@@ -48,35 +35,52 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
buf[5] = 0x80;
/*
- * Set TPGS field for explict and/or implict ALUA access type
+ * Set TPGS field for explicit and/or implicit ALUA access type
* and opteration.
*
* See spc4r17 section 6.4.2 Table 135
*/
- if (!port)
- return;
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!tg_pt_gp_mem)
- return;
-
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ rcu_read_lock();
+ tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
if (tg_pt_gp)
buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ rcu_read_unlock();
+}
+
+static u16
+spc_find_scsi_transport_vd(int proto_id)
+{
+ switch (proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ return SCSI_VERSION_DESCRIPTOR_FCP4;
+ case SCSI_PROTOCOL_ISCSI:
+ return SCSI_VERSION_DESCRIPTOR_ISCSI;
+ case SCSI_PROTOCOL_SAS:
+ return SCSI_VERSION_DESCRIPTOR_SAS3;
+ case SCSI_PROTOCOL_SBP:
+ return SCSI_VERSION_DESCRIPTOR_SBP3;
+ case SCSI_PROTOCOL_SRP:
+ return SCSI_VERSION_DESCRIPTOR_SRP;
+ default:
+ pr_warn("Cannot find VERSION DESCRIPTOR value for unknown SCSI"
+ " transport PROTOCOL IDENTIFIER %#x\n", proto_id);
+ return 0;
+ }
}
sense_reason_t
spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
{
struct se_lun *lun = cmd->se_lun;
+ struct se_portal_group *tpg = lun->lun_tpg;
struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
/* Set RMB (removable media) for tape devices */
if (dev->transport->get_device_type(dev) == TYPE_TAPE)
buf[1] = 0x80;
- buf[2] = 0x05; /* SPC-3 */
+ buf[2] = 0x06; /* SPC-4 */
/*
* NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
@@ -93,14 +97,56 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
/*
* Enable SCCS and TPGS fields for Emulated ALUA
*/
- spc_fill_alua_data(lun->lun_sep, buf);
+ spc_fill_alua_data(lun, buf);
+
+ /*
+ * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY
+ */
+ if (dev->dev_attrib.emulate_3pc)
+ buf[5] |= 0x8;
+ /*
+ * Set Protection (PROTECT) bit when DIF has been enabled on the
+ * device, and the fabric supports VERIFY + PASS. Also report
+ * PROTECT=1 if sess_prot_type has been configured to allow T10-PI
+ * to unprotected devices.
+ */
+ if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+ if (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)
+ buf[5] |= 0x1;
+ }
+
+ /*
+ * Set MULTIP bit to indicate presence of multiple SCSI target ports
+ */
+ if (dev->export_count > 1)
+ buf[6] |= 0x10;
buf[7] = 0x2; /* CmdQue=1 */
- snprintf(&buf[8], 8, "LIO-ORG");
- snprintf(&buf[16], 16, "%s", dev->t10_wwn.model);
- snprintf(&buf[32], 4, "%s", dev->t10_wwn.revision);
- buf[4] = 31; /* Set additional length to 31 */
+ /*
+ * ASCII data fields described as being left-aligned shall have any
+ * unused bytes at the end of the field (i.e., highest offset) and the
+ * unused bytes shall be filled with ASCII space characters (20h).
+ */
+ memset(&buf[8], 0x20,
+ INQUIRY_VENDOR_LEN + INQUIRY_MODEL_LEN + INQUIRY_REVISION_LEN);
+ memcpy(&buf[8], dev->t10_wwn.vendor,
+ strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN));
+ memcpy(&buf[16], dev->t10_wwn.model,
+ strnlen(dev->t10_wwn.model, INQUIRY_MODEL_LEN));
+ memcpy(&buf[32], dev->t10_wwn.revision,
+ strnlen(dev->t10_wwn.revision, INQUIRY_REVISION_LEN));
+
+ /*
+ * Set the VERSION DESCRIPTOR fields
+ */
+ put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SAM5, &buf[58]);
+ put_unaligned_be16(spc_find_scsi_transport_vd(tpg->proto_id), &buf[60]);
+ put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SPC4, &buf[62]);
+ if (cmd->se_dev->transport->get_device_type(dev) == TYPE_DISK)
+ put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SBC3, &buf[64]);
+
+ buf[4] = 91; /* Set additional length to 91 */
return 0;
}
@@ -111,29 +157,39 @@ static sense_reason_t
spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
- u16 len = 0;
+ u16 len;
if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
- u32 unit_serial_len;
-
- unit_serial_len = strlen(dev->t10_wwn.unit_serial);
- unit_serial_len++; /* For NULL Terminator */
-
- len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
+ len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
len++; /* Extra Byte for NULL Terminator */
buf[3] = len;
}
return 0;
}
-static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
- unsigned char *buf)
+/*
+ * Generate NAA IEEE Registered Extended designator
+ */
+void spc_gen_naa_6h_vendor_specific(struct se_device *dev,
+ unsigned char *buf)
{
unsigned char *p = &dev->t10_wwn.unit_serial[0];
- int cnt;
+ u32 company_id = dev->t10_wwn.company_id;
+ int cnt, off = 0;
bool next = true;
/*
+ * Start NAA IEEE Registered Extended Identifier/Designator
+ */
+ buf[off] = 0x6 << 4;
+
+ /* IEEE COMPANY_ID */
+ buf[off++] |= (company_id >> 20) & 0xf;
+ buf[off++] = (company_id >> 12) & 0xff;
+ buf[off++] = (company_id >> 4) & 0xff;
+ buf[off] = (company_id & 0xf) << 4;
+
+ /*
* Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on
* byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field
* format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION
@@ -141,7 +197,7 @@ static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
* NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure
* per device uniqeness.
*/
- for (cnt = 0; *p && cnt < 13; p++) {
+ for (cnt = off + 13; *p && off < cnt; p++) {
int val = hex_to_bin(*p);
if (val < 0)
@@ -149,10 +205,10 @@ static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
if (next) {
next = false;
- buf[cnt++] |= val;
+ buf[off++] |= val;
} else {
next = true;
- buf[cnt] = val << 4;
+ buf[off] = val << 4;
}
}
}
@@ -166,14 +222,11 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
struct se_lun *lun = cmd->se_lun;
- struct se_port *port = NULL;
struct se_portal_group *tpg = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
struct t10_alua_tg_pt_gp *tg_pt_gp;
- struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char *prod = &dev->t10_wwn.model[0];
- u32 prod_len;
- u32 unit_serial_len, off = 0;
+ u32 off = 0;
u16 len = 0, id_len;
off = 4;
@@ -202,24 +255,8 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
/* Identifier/Designator length */
buf[off++] = 0x10;
- /*
- * Start NAA IEEE Registered Extended Identifier/Designator
- */
- buf[off++] = (0x6 << 4);
-
- /*
- * Use OpenFabrics IEEE Company ID: 00 14 05
- */
- buf[off++] = 0x01;
- buf[off++] = 0x40;
- buf[off] = (0x5 << 4);
-
- /*
- * Return ConfigFS Unit Serial Number information for
- * VENDOR_SPECIFIC_IDENTIFIER and
- * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
- */
- spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
+ /* NAA IEEE Registered Extended designator */
+ spc_gen_naa_6h_vendor_specific(dev, &buf[off]);
len = 20;
off = (len + 4);
@@ -229,22 +266,17 @@ check_t10_vend_desc:
* T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
*/
id_len = 8; /* For Vendor field */
- prod_len = 4; /* For VPD Header */
- prod_len += 8; /* For Vendor field */
- prod_len += strlen(prod);
- prod_len++; /* For : */
-
- if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
- unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]);
- unit_serial_len++; /* For NULL Terminator */
+ if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL)
id_len += sprintf(&buf[off+12], "%s:%s", prod,
&dev->t10_wwn.unit_serial[0]);
- }
buf[off] = 0x2; /* ASCII */
buf[off+1] = 0x1; /* T10 Vendor ID */
buf[off+2] = 0x0;
- memcpy(&buf[off+4], "LIO-ORG", 8);
+ /* left align Vendor ID and pad with spaces */
+ memset(&buf[off+4], 0x20, INQUIRY_VENDOR_LEN);
+ memcpy(&buf[off+4], dev->t10_wwn.vendor,
+ strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN));
/* Extra Byte for NULL Terminator */
id_len++;
/* Identifier Length */
@@ -252,18 +284,15 @@ check_t10_vend_desc:
/* Header size for Designation descriptor */
len += (id_len + 4);
off += (id_len + 4);
- /*
- * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
- */
- port = lun->lun_sep;
- if (port) {
+
+ if (1) {
struct t10_alua_lu_gp *lu_gp;
- u32 padding, scsi_name_len;
+ u32 padding, scsi_name_len, scsi_target_len;
u16 lu_gp_id = 0;
u16 tg_pt_gp_id = 0;
u16 tpgt;
- tpg = port->sep_tpg;
+ tpg = lun->lun_tpg;
/*
* Relative target port identifer, see spc4r17
* section 7.7.3.7
@@ -271,8 +300,7 @@ check_t10_vend_desc:
* Get the PROTOCOL IDENTIFIER as defined by spc4r17
* section 7.5.1 Table 362
*/
- buf[off] =
- (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+ buf[off] = tpg->proto_id << 4;
buf[off++] |= 0x1; /* CODE SET == Binary */
buf[off] = 0x80; /* Set PIV=1 */
/* Set ASSOCIATION == target port: 01b */
@@ -284,8 +312,8 @@ check_t10_vend_desc:
/* Skip over Obsolete field in RTPI payload
* in Table 472 */
off += 2;
- buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
- buf[off++] = (port->sep_rtpi & 0xff);
+ put_unaligned_be16(lun->lun_tpg->tpg_rtpi, &buf[off]);
+ off += 2;
len += 8; /* Header size + Designation descriptor */
/*
* Target port group identifier, see spc4r17
@@ -294,21 +322,16 @@ check_t10_vend_desc:
* Get the PROTOCOL IDENTIFIER as defined by spc4r17
* section 7.5.1 Table 362
*/
- tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!tg_pt_gp_mem)
- goto check_lu_gp;
-
- spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
+ rcu_read_lock();
+ tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
if (!tg_pt_gp) {
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ rcu_read_unlock();
goto check_lu_gp;
}
tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
- spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
+ rcu_read_unlock();
- buf[off] =
- (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+ buf[off] = tpg->proto_id << 4;
buf[off++] |= 0x1; /* CODE SET == Binary */
buf[off] = 0x80; /* Set PIV=1 */
/* Set ASSOCIATION == target port: 01b */
@@ -318,8 +341,8 @@ check_t10_vend_desc:
off++; /* Skip over Reserved */
buf[off++] = 4; /* DESIGNATOR LENGTH */
off += 2; /* Skip over Reserved Field */
- buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
- buf[off++] = (tg_pt_gp_id & 0xff);
+ put_unaligned_be16(tg_pt_gp_id, &buf[off]);
+ off += 2;
len += 8; /* Header size + Designation descriptor */
/*
* Logical Unit Group identifier, see spc4r17
@@ -345,8 +368,8 @@ check_lu_gp:
off++; /* Skip over Reserved */
buf[off++] = 4; /* DESIGNATOR LENGTH */
off += 2; /* Skip over Reserved Field */
- buf[off++] = ((lu_gp_id >> 8) & 0xff);
- buf[off++] = (lu_gp_id & 0xff);
+ put_unaligned_be16(lu_gp_id, &buf[off]);
+ off += 2;
len += 8; /* Header size + Designation descriptor */
/*
* SCSI name string designator, see spc4r17
@@ -356,18 +379,7 @@ check_lu_gp:
* section 7.5.1 Table 362
*/
check_scsi_name:
- scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg));
- /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
- scsi_name_len += 10;
- /* Check for 4-byte padding */
- padding = ((-scsi_name_len) & 3);
- if (padding != 0)
- scsi_name_len += padding;
- /* Header size + Designation descriptor */
- scsi_name_len += 4;
-
- buf[off] =
- (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
+ buf[off] = tpg->proto_id << 4;
buf[off++] |= 0x3; /* CODE SET == UTF-8 */
buf[off] = 0x80; /* Set PIV=1 */
/* Set ASSOCIATION == target port: 01b */
@@ -393,46 +405,101 @@ check_scsi_name:
* shall be no larger than 256 and shall be a multiple
* of four.
*/
+ padding = ((-scsi_name_len) & 3);
if (padding)
scsi_name_len += padding;
+ if (scsi_name_len > 256)
+ scsi_name_len = 256;
buf[off-1] = scsi_name_len;
off += scsi_name_len;
/* Header size + Designation descriptor */
len += (scsi_name_len + 4);
- }
- buf[2] = ((len >> 8) & 0xff);
- buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
- return 0;
-}
-EXPORT_SYMBOL(spc_emulate_evpd_83);
-static bool
-spc_check_dev_wce(struct se_device *dev)
-{
- bool wce = false;
+ /*
+ * Target device designator
+ */
+ buf[off] = tpg->proto_id << 4;
+ buf[off++] |= 0x3; /* CODE SET == UTF-8 */
+ buf[off] = 0x80; /* Set PIV=1 */
+ /* Set ASSOCIATION == target device: 10b */
+ buf[off] |= 0x20;
+ /* DESIGNATOR TYPE == SCSI name string */
+ buf[off++] |= 0x8;
+ off += 2; /* Skip over Reserved and length */
+ /*
+ * SCSI name string identifer containing, $FABRIC_MOD
+ * dependent information. For LIO-Target and iSCSI
+ * Target Port, this means "<iSCSI name>" in
+ * UTF-8 encoding.
+ */
+ scsi_target_len = sprintf(&buf[off], "%s",
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg));
+ scsi_target_len += 1 /* Include NULL terminator */;
+ /*
+ * The null-terminated, null-padded (see 4.4.2) SCSI
+ * NAME STRING field contains a UTF-8 format string.
+ * The number of bytes in the SCSI NAME STRING field
+ * (i.e., the value in the DESIGNATOR LENGTH field)
+ * shall be no larger than 256 and shall be a multiple
+ * of four.
+ */
+ padding = ((-scsi_target_len) & 3);
+ if (padding)
+ scsi_target_len += padding;
+ if (scsi_target_len > 256)
+ scsi_target_len = 256;
- if (dev->transport->get_write_cache)
- wce = dev->transport->get_write_cache(dev);
- else if (dev->dev_attrib.emulate_write_cache > 0)
- wce = true;
+ buf[off-1] = scsi_target_len;
+ off += scsi_target_len;
- return wce;
+ /* Header size + Designation descriptor */
+ len += (scsi_target_len + 4);
+ }
+ put_unaligned_be16(len, &buf[2]); /* Page Length for VPD 0x83 */
+ return 0;
}
+EXPORT_SYMBOL(spc_emulate_evpd_83);
/* Extended INQUIRY Data VPD Page */
static sense_reason_t
spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
buf[3] = 0x3c;
+ /*
+ * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
+ * only for TYPE3 protection.
+ */
+ if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+ if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT ||
+ cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT)
+ buf[4] = 0x5;
+ else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT ||
+ cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
+ buf[4] = 0x4;
+ }
+
+ /* logical unit supports type 1 and type 3 protection */
+ if ((dev->transport->get_device_type(dev) == TYPE_DISK) &&
+ (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) &&
+ (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) {
+ buf[4] |= (0x3 << 3);
+ }
+
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
/* If WriteCache emulation is enabled, set V_SUP */
- if (spc_check_dev_wce(dev))
+ if (target_check_wce(dev))
buf[6] = 0x01;
+ /* If an LBA map is present set R_SUP */
+ spin_lock(&cmd->se_dev->t10_alua.lba_map_lock);
+ if (!list_empty(&dev->t10_alua.lba_map_list))
+ buf[8] = 0x10;
+ spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock);
return 0;
}
@@ -441,8 +508,9 @@ static sense_reason_t
spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
- u32 max_sectors;
- int have_tp = 0;
+ u32 mtl = 0;
+ int have_tp = 0, opt, min;
+ u32 io_max_blocks;
/*
* Following spc3r22 section 6.5.3 Block Limits VPD page, when
@@ -453,33 +521,50 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
have_tp = 1;
buf[0] = dev->transport->get_device_type(dev);
- buf[3] = have_tp ? 0x3c : 0x10;
/* Set WSNZ to 1 */
buf[4] = 0x01;
+ /*
+ * Set MAXIMUM COMPARE AND WRITE LENGTH
+ */
+ if (dev->dev_attrib.emulate_caw)
+ buf[5] = 0x01;
/*
* Set OPTIMAL TRANSFER LENGTH GRANULARITY
*/
- put_unaligned_be16(1, &buf[6]);
+ if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev)))
+ put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]);
+ else
+ put_unaligned_be16(1, &buf[6]);
/*
* Set MAXIMUM TRANSFER LENGTH
+ *
+ * XXX: Currently assumes single PAGE_SIZE per scatterlist for fabrics
+ * enforcing maximum HW scatter-gather-list entry limit
*/
- max_sectors = min(dev->dev_attrib.fabric_max_sectors,
- dev->dev_attrib.hw_max_sectors);
- put_unaligned_be32(max_sectors, &buf[8]);
+ if (cmd->se_tfo->max_data_sg_nents) {
+ mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) /
+ dev->dev_attrib.block_size;
+ }
+ io_max_blocks = mult_frac(dev->dev_attrib.hw_max_sectors,
+ dev->dev_attrib.hw_block_size,
+ dev->dev_attrib.block_size);
+ put_unaligned_be32(min_not_zero(mtl, io_max_blocks), &buf[8]);
/*
* Set OPTIMAL TRANSFER LENGTH
*/
- put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
+ if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev)))
+ put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]);
+ else
+ put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
+
+ put_unaligned_be16(12, &buf[2]);
- /*
- * Exit now if we don't support TP.
- */
if (!have_tp)
- goto max_write_same;
+ goto try_atomic;
/*
* Set MAXIMUM UNMAP LBA COUNT
@@ -508,9 +593,29 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* MAXIMUM WRITE SAME LENGTH
*/
-max_write_same:
put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
+ put_unaligned_be16(40, &buf[2]);
+
+try_atomic:
+ /*
+ * ATOMIC
+ */
+ if (!dev->dev_attrib.atomic_max_len)
+ goto done;
+
+ if (dev->dev_attrib.atomic_max_len < io_max_blocks)
+ put_unaligned_be32(dev->dev_attrib.atomic_max_len, &buf[44]);
+ else
+ put_unaligned_be32(io_max_blocks, &buf[44]);
+
+ put_unaligned_be32(dev->dev_attrib.atomic_alignment, &buf[48]);
+ put_unaligned_be32(dev->dev_attrib.atomic_granularity, &buf[52]);
+ put_unaligned_be32(dev->dev_attrib.atomic_max_with_boundary, &buf[56]);
+ put_unaligned_be32(dev->dev_attrib.atomic_max_boundary, &buf[60]);
+
+ put_unaligned_be16(60, &buf[2]);
+done:
return 0;
}
@@ -574,7 +679,33 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* support the use of the WRITE SAME (16) command to unmap LBAs.
*/
if (dev->dev_attrib.emulate_tpws != 0)
- buf[5] |= 0x40;
+ buf[5] |= 0x40 | 0x20;
+
+ /*
+ * The unmap_zeroes_data set means that the underlying device supports
+ * REQ_OP_DISCARD and has the discard_zeroes_data bit set. This
+ * satisfies the SBC requirements for LBPRZ, meaning that a subsequent
+ * read will return zeroes after an UNMAP or WRITE SAME (16) to an LBA
+ * See sbc4r36 6.6.4.
+ */
+ if (((dev->dev_attrib.emulate_tpu != 0) ||
+ (dev->dev_attrib.emulate_tpws != 0)) &&
+ (dev->dev_attrib.unmap_zeroes_data != 0))
+ buf[5] |= 0x04;
+
+ return 0;
+}
+
+/* Referrals VPD page */
+static sense_reason_t
+spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = cmd->se_dev;
+
+ buf[0] = dev->transport->get_device_type(dev);
+ buf[3] = 0x0c;
+ put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]);
+ put_unaligned_be32(dev->t10_alua.lba_map_segment_multiplier, &buf[12]);
return 0;
}
@@ -593,6 +724,7 @@ static struct {
{ .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
{ .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
{ .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
+ { .page = 0xb3, .emulate = spc_emulate_evpd_b3 },
};
/* supported vital product data pages */
@@ -619,19 +751,20 @@ static sense_reason_t
spc_emulate_inquiry(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
unsigned char *rbuf;
unsigned char *cdb = cmd->t_task_cdb;
- unsigned char buf[SE_INQUIRY_BUF];
+ unsigned char *buf;
sense_reason_t ret;
int p;
+ int len = 0;
- memset(buf, 0, SE_INQUIRY_BUF);
+ buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
+ if (!buf) {
+ pr_err("Unable to allocate response buffer for INQUIRY\n");
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
- if (dev == tpg->tpg_virt_lun0.lun_se_dev)
- buf[0] = 0x3f; /* Not connected */
- else
- buf[0] = dev->transport->get_device_type(dev);
+ buf[0] = dev->transport->get_device_type(dev);
if (!(cdb[1] & 0x1)) {
if (cdb[2]) {
@@ -642,6 +775,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
}
ret = spc_emulate_inquiry_std(cmd, buf);
+ len = buf[4] + 5;
goto out;
}
@@ -649,26 +783,28 @@ spc_emulate_inquiry(struct se_cmd *cmd)
if (cdb[2] == evpd_handlers[p].page) {
buf[1] = cdb[2];
ret = evpd_handlers[p].emulate(cmd, buf);
+ len = get_unaligned_be16(&buf[2]) + 4;
goto out;
}
}
- pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+ pr_debug("Unknown VPD Code: 0x%02x\n", cdb[2]);
ret = TCM_INVALID_CDB_FIELD;
out:
rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
- memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+ memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length));
transport_kunmap_data_sg(cmd);
}
+ kfree(buf);
if (!ret)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, len);
return ret;
}
-static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p)
+static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p)
{
p[0] = 0x01;
p[1] = 0x0a;
@@ -681,8 +817,11 @@ out:
return 12;
}
-static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
+static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
{
+ struct se_device *dev = cmd->se_dev;
+ struct se_session *sess = cmd->se_sess;
+
p[0] = 0x0a;
p[1] = 0x0a;
@@ -690,7 +829,12 @@ static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
if (pc == 1)
goto out;
- p[2] = 2;
+ /* GLTSD: No implicit save of log parameters */
+ p[2] = (1 << 1);
+ if (target_sense_desc_format(dev))
+ /* D_SENSE: Descriptor format sense data for 64bit sectors */
+ p[2] |= (1 << 2);
+
/*
* From spc4r23, 7.4.7 Control mode page
*
@@ -749,8 +893,17 @@ static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
* for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
* to the number of commands completed with one of those status codes.
*/
- p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
- (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
+ switch (dev->dev_attrib.emulate_ua_intlck_ctrl) {
+ case TARGET_UA_INTLCK_CTRL_ESTABLISH_UA:
+ p[4] = 0x30;
+ break;
+ case TARGET_UA_INTLCK_CTRL_NO_CLEAR:
+ p[4] = 0x20;
+ break;
+ default: /* TARGET_UA_INTLCK_CTRL_CLEAR */
+ p[4] = 0x00;
+ break;
+ }
/*
* From spc4r17, section 7.4.6 Control mode Page
*
@@ -764,6 +917,21 @@ static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
* status (see SAM-4).
*/
p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
+ /*
+ * From spc4r30, section 7.5.7 Control mode page
+ *
+ * Application Tag Owner (ATO) bit set to one.
+ *
+ * If the ATO bit is set to one the device server shall not modify the
+ * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection
+ * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
+ * TAG field.
+ */
+ if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+ if (dev->dev_attrib.pi_prot_type || sess->sess_prot_type)
+ p[5] |= 0x80;
+ }
+
p[8] = 0xff;
p[9] = 0xff;
p[11] = 30;
@@ -772,8 +940,10 @@ out:
return 12;
}
-static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p)
+static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
{
+ struct se_device *dev = cmd->se_dev;
+
p[0] = 0x08;
p[1] = 0x12;
@@ -781,7 +951,7 @@ static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p)
if (pc == 1)
goto out;
- if (spc_check_dev_wce(dev))
+ if (target_check_wce(dev))
p[2] = 0x04; /* Write Cache Enable */
p[12] = 0x20; /* Disabled Read Ahead */
@@ -789,7 +959,7 @@ out:
return 20;
}
-static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc, unsigned char *p)
+static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p)
{
p[0] = 0x1c;
p[1] = 0x0a;
@@ -805,7 +975,7 @@ out:
static struct {
uint8_t page;
uint8_t subpage;
- int (*emulate)(struct se_device *, u8, unsigned char *);
+ int (*emulate)(struct se_cmd *, u8, unsigned char *);
} modesense_handlers[] = {
{ .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
{ .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
@@ -888,13 +1058,15 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
length = ten ? 3 : 2;
/* DEVICE-SPECIFIC PARAMETER */
- if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
- (cmd->se_deve &&
- (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+ if (cmd->se_lun->lun_access_ro || target_lun_is_rdonly(cmd))
spc_modesense_write_protect(&buf[length], type);
- if ((spc_check_dev_wce(dev)) &&
- (dev->dev_attrib.emulate_fua_write > 0))
+ /*
+ * SBC only allows us to enable FUA and DPO together. Fortunately
+ * DPO is explicitly specified as a hint, so a noop is a perfectly
+ * valid implementation.
+ */
+ if (target_check_fua(dev))
spc_modesense_dpofua(&buf[length], type);
++length;
@@ -943,7 +1115,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
* the only two possibilities).
*/
if ((modesense_handlers[i].subpage & ~subpage) == 0) {
- ret = modesense_handlers[i].emulate(dev, pc, &buf[length]);
+ ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]);
if (!ten && length + ret >= 255)
break;
length += ret;
@@ -956,7 +1128,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
if (modesense_handlers[i].page == page &&
modesense_handlers[i].subpage == subpage) {
- length += modesense_handlers[i].emulate(dev, pc, &buf[length]);
+ length += modesense_handlers[i].emulate(cmd, pc, &buf[length]);
goto set_length;
}
@@ -982,13 +1154,12 @@ set_length:
transport_kunmap_data_sg(cmd);
}
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, length);
return 0;
}
static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
{
- struct se_device *dev = cmd->se_dev;
char *cdb = cmd->t_task_cdb;
bool ten = cdb[0] == MODE_SELECT_10;
int off = ten ? 8 : 4;
@@ -997,11 +1168,11 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
unsigned char *buf;
unsigned char tbuf[SE_MODE_PAGE_BUF];
int length;
- int ret = 0;
+ sense_reason_t ret = 0;
int i;
if (!cmd->data_length) {
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
@@ -1024,7 +1195,7 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
if (modesense_handlers[i].page == page &&
modesense_handlers[i].subpage == subpage) {
memset(tbuf, 0, SE_MODE_PAGE_BUF);
- length = modesense_handlers[i].emulate(dev, 0, tbuf);
+ length = modesense_handlers[i].emulate(cmd, 0, tbuf);
goto check_contents;
}
@@ -1044,7 +1215,7 @@ out:
transport_kunmap_data_sg(cmd);
if (!ret)
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return ret;
}
@@ -1054,6 +1225,7 @@ static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
unsigned char *rbuf;
u8 ua_asc = 0, ua_ascq = 0;
unsigned char buf[SE_SENSE_BUF];
+ bool desc_format = target_sense_desc_format(cmd->se_dev);
memset(buf, 0, SE_SENSE_BUF);
@@ -1067,37 +1239,16 @@ static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
if (!rbuf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
- /*
- * CURRENT ERROR, UNIT ATTENTION
- */
- buf[0] = 0x70;
- buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
-
- /*
- * The Additional Sense Code (ASC) from the UNIT ATTENTION
- */
- buf[SPC_ASC_KEY_OFFSET] = ua_asc;
- buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
- buf[7] = 0x0A;
- } else {
- /*
- * CURRENT ERROR, NO SENSE
- */
- buf[0] = 0x70;
- buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
-
- /*
- * NO ADDITIONAL SENSE INFORMATION
- */
- buf[SPC_ASC_KEY_OFFSET] = 0x00;
- buf[7] = 0x0A;
- }
+ if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))
+ scsi_build_sense_buffer(desc_format, buf, UNIT_ATTENTION,
+ ua_asc, ua_ascq);
+ else
+ scsi_build_sense_buffer(desc_format, buf, NO_SENSE, 0x0, 0x0);
memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
transport_kunmap_data_sg(cmd);
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
@@ -1105,17 +1256,14 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
{
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
+ struct se_node_acl *nacl;
+ struct scsi_lun slun;
unsigned char *buf;
- u32 lun_count = 0, offset = 8, i;
-
- if (cmd->data_length < 16) {
- pr_warn("REPORT LUNS allocation length %u too small\n",
- cmd->data_length);
- return TCM_INVALID_CDB_FIELD;
- }
+ u32 lun_count = 0, offset = 8;
+ __be32 len;
buf = transport_kmap_data_sg(cmd);
- if (!buf)
+ if (cmd->data_length && !buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
@@ -1123,43 +1271,51 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
* coming via a target_core_mod PASSTHROUGH op, and not through
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
- if (!sess) {
- int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
- lun_count = 1;
+ if (!sess)
goto done;
- }
- spin_lock_irq(&sess->se_node_acl->device_list_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = sess->se_node_acl->device_list[i];
- if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
- continue;
+ nacl = sess->se_node_acl;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
/*
* We determine the correct LUN LIST LENGTH even once we
* have reached the initial allocation length.
* See SPC2-R20 7.19.
*/
lun_count++;
- if ((offset + 8) > cmd->data_length)
+ if (offset >= cmd->data_length)
continue;
- int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+ int_to_scsilun(deve->mapped_lun, &slun);
+ memcpy(buf + offset, &slun,
+ min(8u, cmd->data_length - offset));
offset += 8;
}
- spin_unlock_irq(&sess->se_node_acl->device_list_lock);
+ rcu_read_unlock();
/*
* See SPC3 r07, page 159.
*/
done:
- lun_count *= 8;
- buf[0] = ((lun_count >> 24) & 0xff);
- buf[1] = ((lun_count >> 16) & 0xff);
- buf[2] = ((lun_count >> 8) & 0xff);
- buf[3] = (lun_count & 0xff);
- transport_kunmap_data_sg(cmd);
+ /*
+ * If no LUNs are accessible, report virtual LUN 0.
+ */
+ if (lun_count == 0) {
+ int_to_scsilun(0, &slun);
+ if (cmd->data_length > 8)
+ memcpy(buf + offset, &slun,
+ min(8u, cmd->data_length - offset));
+ lun_count = 1;
+ }
- target_complete_cmd(cmd, GOOD);
+ if (buf) {
+ len = cpu_to_be32(lun_count * 8);
+ memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
+ transport_kunmap_data_sg(cmd);
+ }
+
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 8 + lun_count * 8);
return 0;
}
EXPORT_SYMBOL(spc_emulate_report_luns);
@@ -1167,10 +1323,986 @@ EXPORT_SYMBOL(spc_emulate_report_luns);
static sense_reason_t
spc_emulate_testunitready(struct se_cmd *cmd)
{
- target_complete_cmd(cmd, GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
+static void set_dpofua_usage_bits(u8 *usage_bits, struct se_device *dev)
+{
+ if (!target_check_fua(dev))
+ usage_bits[1] &= ~0x18;
+ else
+ usage_bits[1] |= 0x18;
+}
+
+static void set_dpofua_usage_bits32(u8 *usage_bits, struct se_device *dev)
+{
+ if (!target_check_fua(dev))
+ usage_bits[10] &= ~0x18;
+ else
+ usage_bits[10] |= 0x18;
+}
+
+static const struct target_opcode_descriptor tcm_opcode_read6 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = READ_6,
+ .cdb_size = 6,
+ .usage_bits = {READ_6, 0x1f, 0xff, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+};
+
+static const struct target_opcode_descriptor tcm_opcode_read10 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = READ_10,
+ .cdb_size = 10,
+ .usage_bits = {READ_10, 0xf8, 0xff, 0xff,
+ 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+ .update_usage_bits = set_dpofua_usage_bits,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_read12 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = READ_12,
+ .cdb_size = 12,
+ .usage_bits = {READ_12, 0xf8, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
+ .update_usage_bits = set_dpofua_usage_bits,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_read16 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = READ_16,
+ .cdb_size = 16,
+ .usage_bits = {READ_16, 0xf8, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
+ .update_usage_bits = set_dpofua_usage_bits,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_write6 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = WRITE_6,
+ .cdb_size = 6,
+ .usage_bits = {WRITE_6, 0x1f, 0xff, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+};
+
+static const struct target_opcode_descriptor tcm_opcode_write10 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = WRITE_10,
+ .cdb_size = 10,
+ .usage_bits = {WRITE_10, 0xf8, 0xff, 0xff,
+ 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+ .update_usage_bits = set_dpofua_usage_bits,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_write_verify10 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = WRITE_VERIFY,
+ .cdb_size = 10,
+ .usage_bits = {WRITE_VERIFY, 0xf0, 0xff, 0xff,
+ 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+ .update_usage_bits = set_dpofua_usage_bits,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_write12 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = WRITE_12,
+ .cdb_size = 12,
+ .usage_bits = {WRITE_12, 0xf8, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
+ .update_usage_bits = set_dpofua_usage_bits,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_write16 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = WRITE_16,
+ .cdb_size = 16,
+ .usage_bits = {WRITE_16, 0xf8, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
+ .update_usage_bits = set_dpofua_usage_bits,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_write_verify16 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = WRITE_VERIFY_16,
+ .cdb_size = 16,
+ .usage_bits = {WRITE_VERIFY_16, 0xf0, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
+ .update_usage_bits = set_dpofua_usage_bits,
+};
+
+static bool tcm_is_ws_enabled(const struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
+{
+ struct exec_cmd_ops *ops = cmd->protocol_data;
+ struct se_device *dev = cmd->se_dev;
+
+ return (dev->dev_attrib.emulate_tpws && !!ops->execute_unmap) ||
+ !!ops->execute_write_same;
+}
+
+static const struct target_opcode_descriptor tcm_opcode_write_same32 = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = VARIABLE_LENGTH_CMD,
+ .service_action = WRITE_SAME_32,
+ .cdb_size = 32,
+ .usage_bits = {VARIABLE_LENGTH_CMD, SCSI_CONTROL_MASK, 0x00, 0x00,
+ 0x00, 0x00, SCSI_GROUP_NUMBER_MASK, 0x18,
+ 0x00, WRITE_SAME_32, 0xe8, 0x00,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff},
+ .enabled = tcm_is_ws_enabled,
+ .update_usage_bits = set_dpofua_usage_bits32,
+};
+
+static bool tcm_is_atomic_enabled(const struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
+{
+ return cmd->se_dev->dev_attrib.atomic_max_len;
+}
+
+static struct target_opcode_descriptor tcm_opcode_write_atomic16 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = WRITE_ATOMIC_16,
+ .cdb_size = 16,
+ .usage_bits = {WRITE_ATOMIC_16, 0xf8, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_atomic_enabled,
+ .update_usage_bits = set_dpofua_usage_bits,
+};
+
+static bool tcm_is_caw_enabled(const struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+
+ return dev->dev_attrib.emulate_caw;
+}
+
+static const struct target_opcode_descriptor tcm_opcode_compare_write = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = COMPARE_AND_WRITE,
+ .cdb_size = 16,
+ .usage_bits = {COMPARE_AND_WRITE, 0x18, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0x00,
+ 0x00, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_caw_enabled,
+ .update_usage_bits = set_dpofua_usage_bits,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_read_capacity = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = READ_CAPACITY,
+ .cdb_size = 10,
+ .usage_bits = {READ_CAPACITY, 0x00, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0x00,
+ 0x01, SCSI_CONTROL_MASK},
+};
+
+static const struct target_opcode_descriptor tcm_opcode_read_capacity16 = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = SERVICE_ACTION_IN_16,
+ .service_action = SAI_READ_CAPACITY_16,
+ .cdb_size = 16,
+ .usage_bits = {SERVICE_ACTION_IN_16, SAI_READ_CAPACITY_16, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff,
+ 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
+};
+
+static bool tcm_is_rep_ref_enabled(const struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+
+ spin_lock(&dev->t10_alua.lba_map_lock);
+ if (list_empty(&dev->t10_alua.lba_map_list)) {
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ return false;
+ }
+ spin_unlock(&dev->t10_alua.lba_map_lock);
+ return true;
+}
+
+static const struct target_opcode_descriptor tcm_opcode_read_report_refferals = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = SERVICE_ACTION_IN_16,
+ .service_action = SAI_REPORT_REFERRALS,
+ .cdb_size = 16,
+ .usage_bits = {SERVICE_ACTION_IN_16, SAI_REPORT_REFERRALS, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff,
+ 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_rep_ref_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_sync_cache = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = SYNCHRONIZE_CACHE,
+ .cdb_size = 10,
+ .usage_bits = {SYNCHRONIZE_CACHE, 0x02, 0xff, 0xff,
+ 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+};
+
+static const struct target_opcode_descriptor tcm_opcode_sync_cache16 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = SYNCHRONIZE_CACHE_16,
+ .cdb_size = 16,
+ .usage_bits = {SYNCHRONIZE_CACHE_16, 0x02, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
+};
+
+static bool tcm_is_unmap_enabled(const struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
+{
+ struct exec_cmd_ops *ops = cmd->protocol_data;
+ struct se_device *dev = cmd->se_dev;
+
+ return ops->execute_unmap && dev->dev_attrib.emulate_tpu;
+}
+
+static const struct target_opcode_descriptor tcm_opcode_unmap = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = UNMAP,
+ .cdb_size = 10,
+ .usage_bits = {UNMAP, 0x00, 0x00, 0x00,
+ 0x00, 0x00, SCSI_GROUP_NUMBER_MASK, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_unmap_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_write_same = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = WRITE_SAME,
+ .cdb_size = 10,
+ .usage_bits = {WRITE_SAME, 0xe8, 0xff, 0xff,
+ 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_ws_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_write_same16 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = WRITE_SAME_16,
+ .cdb_size = 16,
+ .usage_bits = {WRITE_SAME_16, 0xe8, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_ws_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_verify = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = VERIFY,
+ .cdb_size = 10,
+ .usage_bits = {VERIFY, 0x00, 0xff, 0xff,
+ 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+};
+
+static const struct target_opcode_descriptor tcm_opcode_verify16 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = VERIFY_16,
+ .cdb_size = 16,
+ .usage_bits = {VERIFY_16, 0x00, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
+};
+
+static const struct target_opcode_descriptor tcm_opcode_start_stop = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = START_STOP,
+ .cdb_size = 6,
+ .usage_bits = {START_STOP, 0x01, 0x00, 0x00,
+ 0x01, SCSI_CONTROL_MASK},
+};
+
+static const struct target_opcode_descriptor tcm_opcode_mode_select = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = MODE_SELECT,
+ .cdb_size = 6,
+ .usage_bits = {MODE_SELECT, 0x10, 0x00, 0x00,
+ 0xff, SCSI_CONTROL_MASK},
+};
+
+static const struct target_opcode_descriptor tcm_opcode_mode_select10 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = MODE_SELECT_10,
+ .cdb_size = 10,
+ .usage_bits = {MODE_SELECT_10, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+};
+
+static const struct target_opcode_descriptor tcm_opcode_mode_sense = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = MODE_SENSE,
+ .cdb_size = 6,
+ .usage_bits = {MODE_SENSE, 0x08, 0xff, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+};
+
+static const struct target_opcode_descriptor tcm_opcode_mode_sense10 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = MODE_SENSE_10,
+ .cdb_size = 10,
+ .usage_bits = {MODE_SENSE_10, 0x18, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+};
+
+static const struct target_opcode_descriptor tcm_opcode_pri_read_keys = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = PERSISTENT_RESERVE_IN,
+ .service_action = PRI_READ_KEYS,
+ .cdb_size = 10,
+ .usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_KEYS, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+};
+
+static const struct target_opcode_descriptor tcm_opcode_pri_read_resrv = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = PERSISTENT_RESERVE_IN,
+ .service_action = PRI_READ_RESERVATION,
+ .cdb_size = 10,
+ .usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_RESERVATION, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+};
+
+static bool tcm_is_pr_enabled(const struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+
+ if (!dev->dev_attrib.emulate_pr)
+ return false;
+
+ if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
+ return true;
+
+ switch (descr->opcode) {
+ case RESERVE_6:
+ case RESERVE_10:
+ case RELEASE_6:
+ case RELEASE_10:
+ /*
+ * The pr_ops which are used by the backend modules don't
+ * support these commands.
+ */
+ return false;
+ case PERSISTENT_RESERVE_OUT:
+ switch (descr->service_action) {
+ case PRO_REGISTER_AND_MOVE:
+ case PRO_REPLACE_LOST_RESERVATION:
+ /*
+ * The backend modules don't have access to ports and
+ * I_T nexuses so they can't handle these type of
+ * requests.
+ */
+ return false;
+ }
+ break;
+ case PERSISTENT_RESERVE_IN:
+ if (descr->service_action == PRI_READ_FULL_STATUS)
+ return false;
+ break;
+ }
+
+ return true;
+}
+
+static const struct target_opcode_descriptor tcm_opcode_pri_read_caps = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = PERSISTENT_RESERVE_IN,
+ .service_action = PRI_REPORT_CAPABILITIES,
+ .cdb_size = 10,
+ .usage_bits = {PERSISTENT_RESERVE_IN, PRI_REPORT_CAPABILITIES, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_pr_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_pri_read_full_status = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = PERSISTENT_RESERVE_IN,
+ .service_action = PRI_READ_FULL_STATUS,
+ .cdb_size = 10,
+ .usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_FULL_STATUS, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_pr_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_pro_register = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = PERSISTENT_RESERVE_OUT,
+ .service_action = PRO_REGISTER,
+ .cdb_size = 10,
+ .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_REGISTER, 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_pr_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_pro_reserve = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = PERSISTENT_RESERVE_OUT,
+ .service_action = PRO_RESERVE,
+ .cdb_size = 10,
+ .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_RESERVE, 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_pr_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_pro_release = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = PERSISTENT_RESERVE_OUT,
+ .service_action = PRO_RELEASE,
+ .cdb_size = 10,
+ .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_RELEASE, 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_pr_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_pro_clear = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = PERSISTENT_RESERVE_OUT,
+ .service_action = PRO_CLEAR,
+ .cdb_size = 10,
+ .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_CLEAR, 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_pr_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_pro_preempt = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = PERSISTENT_RESERVE_OUT,
+ .service_action = PRO_PREEMPT,
+ .cdb_size = 10,
+ .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_PREEMPT, 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_pr_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_pro_preempt_abort = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = PERSISTENT_RESERVE_OUT,
+ .service_action = PRO_PREEMPT_AND_ABORT,
+ .cdb_size = 10,
+ .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_PREEMPT_AND_ABORT, 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_pr_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_pro_reg_ign_exist = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = PERSISTENT_RESERVE_OUT,
+ .service_action = PRO_REGISTER_AND_IGNORE_EXISTING_KEY,
+ .cdb_size = 10,
+ .usage_bits = {
+ PERSISTENT_RESERVE_OUT, PRO_REGISTER_AND_IGNORE_EXISTING_KEY,
+ 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_pr_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_pro_register_move = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = PERSISTENT_RESERVE_OUT,
+ .service_action = PRO_REGISTER_AND_MOVE,
+ .cdb_size = 10,
+ .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_REGISTER_AND_MOVE, 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_pr_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_release = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = RELEASE_6,
+ .cdb_size = 6,
+ .usage_bits = {RELEASE_6, 0x00, 0x00, 0x00,
+ 0x00, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_pr_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_release10 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = RELEASE_10,
+ .cdb_size = 10,
+ .usage_bits = {RELEASE_10, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_pr_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_reserve = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = RESERVE_6,
+ .cdb_size = 6,
+ .usage_bits = {RESERVE_6, 0x00, 0x00, 0x00,
+ 0x00, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_pr_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_reserve10 = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = RESERVE_10,
+ .cdb_size = 10,
+ .usage_bits = {RESERVE_10, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_pr_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_request_sense = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = REQUEST_SENSE,
+ .cdb_size = 6,
+ .usage_bits = {REQUEST_SENSE, 0x00, 0x00, 0x00,
+ 0xff, SCSI_CONTROL_MASK},
+};
+
+static const struct target_opcode_descriptor tcm_opcode_inquiry = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = INQUIRY,
+ .cdb_size = 6,
+ .usage_bits = {INQUIRY, 0x01, 0xff, 0xff,
+ 0xff, SCSI_CONTROL_MASK},
+};
+
+static bool tcm_is_3pc_enabled(const struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+
+ return dev->dev_attrib.emulate_3pc;
+}
+
+static const struct target_opcode_descriptor tcm_opcode_extended_copy_lid1 = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = EXTENDED_COPY,
+ .cdb_size = 16,
+ .usage_bits = {EXTENDED_COPY, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff,
+ 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_3pc_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_rcv_copy_res_op_params = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = RECEIVE_COPY_RESULTS,
+ .service_action = RCR_SA_OPERATING_PARAMETERS,
+ .cdb_size = 16,
+ .usage_bits = {RECEIVE_COPY_RESULTS, RCR_SA_OPERATING_PARAMETERS,
+ 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff,
+ 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_3pc_enabled,
+};
+
+static const struct target_opcode_descriptor tcm_opcode_report_luns = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = REPORT_LUNS,
+ .cdb_size = 12,
+ .usage_bits = {REPORT_LUNS, 0x00, 0xff, 0x00,
+ 0x00, 0x00, 0xff, 0xff,
+ 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
+};
+
+static const struct target_opcode_descriptor tcm_opcode_test_unit_ready = {
+ .support = SCSI_SUPPORT_FULL,
+ .opcode = TEST_UNIT_READY,
+ .cdb_size = 6,
+ .usage_bits = {TEST_UNIT_READY, 0x00, 0x00, 0x00,
+ 0x00, SCSI_CONTROL_MASK},
+};
+
+static const struct target_opcode_descriptor tcm_opcode_report_target_pgs = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = MAINTENANCE_IN,
+ .service_action = MI_REPORT_TARGET_PGS,
+ .cdb_size = 12,
+ .usage_bits = {MAINTENANCE_IN, 0xE0 | MI_REPORT_TARGET_PGS, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff,
+ 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
+};
+
+static bool spc_rsoc_enabled(const struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+
+ return dev->dev_attrib.emulate_rsoc;
+}
+
+static const struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = MAINTENANCE_IN,
+ .service_action = MI_REPORT_SUPPORTED_OPERATION_CODES,
+ .cdb_size = 12,
+ .usage_bits = {MAINTENANCE_IN, MI_REPORT_SUPPORTED_OPERATION_CODES,
+ 0x87, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
+ .enabled = spc_rsoc_enabled,
+};
+
+static bool tcm_is_set_tpg_enabled(const struct target_opcode_descriptor *descr,
+ struct se_cmd *cmd)
+{
+ struct t10_alua_tg_pt_gp *l_tg_pt_gp;
+ struct se_lun *l_lun = cmd->se_lun;
+
+ rcu_read_lock();
+ l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp);
+ if (!l_tg_pt_gp) {
+ rcu_read_unlock();
+ return false;
+ }
+ if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
+ rcu_read_unlock();
+ return false;
+ }
+ rcu_read_unlock();
+
+ return true;
+}
+
+static const struct target_opcode_descriptor tcm_opcode_set_tpg = {
+ .support = SCSI_SUPPORT_FULL,
+ .serv_action_valid = 1,
+ .opcode = MAINTENANCE_OUT,
+ .service_action = MO_SET_TARGET_PGS,
+ .cdb_size = 12,
+ .usage_bits = {MAINTENANCE_OUT, MO_SET_TARGET_PGS, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff,
+ 0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
+ .enabled = tcm_is_set_tpg_enabled,
+};
+
+static const struct target_opcode_descriptor *tcm_supported_opcodes[] = {
+ &tcm_opcode_read6,
+ &tcm_opcode_read10,
+ &tcm_opcode_read12,
+ &tcm_opcode_read16,
+ &tcm_opcode_write6,
+ &tcm_opcode_write10,
+ &tcm_opcode_write_verify10,
+ &tcm_opcode_write12,
+ &tcm_opcode_write16,
+ &tcm_opcode_write_verify16,
+ &tcm_opcode_write_same32,
+ &tcm_opcode_write_atomic16,
+ &tcm_opcode_compare_write,
+ &tcm_opcode_read_capacity,
+ &tcm_opcode_read_capacity16,
+ &tcm_opcode_read_report_refferals,
+ &tcm_opcode_sync_cache,
+ &tcm_opcode_sync_cache16,
+ &tcm_opcode_unmap,
+ &tcm_opcode_write_same,
+ &tcm_opcode_write_same16,
+ &tcm_opcode_verify,
+ &tcm_opcode_verify16,
+ &tcm_opcode_start_stop,
+ &tcm_opcode_mode_select,
+ &tcm_opcode_mode_select10,
+ &tcm_opcode_mode_sense,
+ &tcm_opcode_mode_sense10,
+ &tcm_opcode_pri_read_keys,
+ &tcm_opcode_pri_read_resrv,
+ &tcm_opcode_pri_read_caps,
+ &tcm_opcode_pri_read_full_status,
+ &tcm_opcode_pro_register,
+ &tcm_opcode_pro_reserve,
+ &tcm_opcode_pro_release,
+ &tcm_opcode_pro_clear,
+ &tcm_opcode_pro_preempt,
+ &tcm_opcode_pro_preempt_abort,
+ &tcm_opcode_pro_reg_ign_exist,
+ &tcm_opcode_pro_register_move,
+ &tcm_opcode_release,
+ &tcm_opcode_release10,
+ &tcm_opcode_reserve,
+ &tcm_opcode_reserve10,
+ &tcm_opcode_request_sense,
+ &tcm_opcode_inquiry,
+ &tcm_opcode_extended_copy_lid1,
+ &tcm_opcode_rcv_copy_res_op_params,
+ &tcm_opcode_report_luns,
+ &tcm_opcode_test_unit_ready,
+ &tcm_opcode_report_target_pgs,
+ &tcm_opcode_report_supp_opcodes,
+ &tcm_opcode_set_tpg,
+};
+
+static int
+spc_rsoc_encode_command_timeouts_descriptor(unsigned char *buf, u8 ctdp,
+ const struct target_opcode_descriptor *descr)
+{
+ if (!ctdp)
+ return 0;
+
+ put_unaligned_be16(0xa, buf);
+ buf[3] = descr->specific_timeout;
+ put_unaligned_be32(descr->nominal_timeout, &buf[4]);
+ put_unaligned_be32(descr->recommended_timeout, &buf[8]);
+
+ return 12;
+}
+
+static int
+spc_rsoc_encode_command_descriptor(unsigned char *buf, u8 ctdp,
+ const struct target_opcode_descriptor *descr)
+{
+ int td_size = 0;
+
+ buf[0] = descr->opcode;
+
+ put_unaligned_be16(descr->service_action, &buf[2]);
+
+ buf[5] = (ctdp << 1) | descr->serv_action_valid;
+ put_unaligned_be16(descr->cdb_size, &buf[6]);
+
+ td_size = spc_rsoc_encode_command_timeouts_descriptor(&buf[8], ctdp,
+ descr);
+
+ return 8 + td_size;
+}
+
+static int
+spc_rsoc_encode_one_command_descriptor(unsigned char *buf, u8 ctdp,
+ const struct target_opcode_descriptor *descr,
+ struct se_device *dev)
+{
+ int td_size = 0;
+
+ if (!descr) {
+ buf[1] = (ctdp << 7) | SCSI_SUPPORT_NOT_SUPPORTED;
+ return 2;
+ }
+
+ buf[1] = (ctdp << 7) | SCSI_SUPPORT_FULL;
+ put_unaligned_be16(descr->cdb_size, &buf[2]);
+ memcpy(&buf[4], descr->usage_bits, descr->cdb_size);
+ if (descr->update_usage_bits)
+ descr->update_usage_bits(&buf[4], dev);
+
+ td_size = spc_rsoc_encode_command_timeouts_descriptor(
+ &buf[4 + descr->cdb_size], ctdp, descr);
+
+ return 4 + descr->cdb_size + td_size;
+}
+
+static sense_reason_t
+spc_rsoc_get_descr(struct se_cmd *cmd, const struct target_opcode_descriptor **opcode)
+{
+ const struct target_opcode_descriptor *descr;
+ struct se_session *sess = cmd->se_sess;
+ unsigned char *cdb = cmd->t_task_cdb;
+ u8 opts = cdb[2] & 0x3;
+ u8 requested_opcode;
+ u16 requested_sa;
+ int i;
+
+ requested_opcode = cdb[3];
+ requested_sa = ((u16)cdb[4]) << 8 | cdb[5];
+ *opcode = NULL;
+
+ if (opts > 3) {
+ pr_debug("TARGET_CORE[%s]: Invalid REPORT SUPPORTED OPERATION CODES"
+ " with unsupported REPORTING OPTIONS %#x for 0x%08llx from %s\n",
+ cmd->se_tfo->fabric_name, opts,
+ cmd->se_lun->unpacked_lun,
+ sess->se_node_acl->initiatorname);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) {
+ descr = tcm_supported_opcodes[i];
+ if (descr->opcode != requested_opcode)
+ continue;
+
+ switch (opts) {
+ case 0x1:
+ /*
+ * If the REQUESTED OPERATION CODE field specifies an
+ * operation code for which the device server implements
+ * service actions, then the device server shall
+ * terminate the command with CHECK CONDITION status,
+ * with the sense key set to ILLEGAL REQUEST, and the
+ * additional sense code set to INVALID FIELD IN CDB
+ */
+ if (descr->serv_action_valid)
+ return TCM_INVALID_CDB_FIELD;
+
+ if (!descr->enabled || descr->enabled(descr, cmd)) {
+ *opcode = descr;
+ return TCM_NO_SENSE;
+ }
+ break;
+ case 0x2:
+ /*
+ * If the REQUESTED OPERATION CODE field specifies an
+ * operation code for which the device server does not
+ * implement service actions, then the device server
+ * shall terminate the command with CHECK CONDITION
+ * status, with the sense key set to ILLEGAL REQUEST,
+ * and the additional sense code set to INVALID FIELD IN CDB.
+ */
+ if (descr->serv_action_valid &&
+ descr->service_action == requested_sa) {
+ if (!descr->enabled || descr->enabled(descr,
+ cmd)) {
+ *opcode = descr;
+ return TCM_NO_SENSE;
+ }
+ } else if (!descr->serv_action_valid)
+ return TCM_INVALID_CDB_FIELD;
+ break;
+ case 0x3:
+ /*
+ * The command support data for the operation code and
+ * service action a specified in the REQUESTED OPERATION
+ * CODE field and REQUESTED SERVICE ACTION field shall
+ * be returned in the one_command parameter data format.
+ */
+ if (descr->service_action == requested_sa)
+ if (!descr->enabled || descr->enabled(descr,
+ cmd)) {
+ *opcode = descr;
+ return TCM_NO_SENSE;
+ }
+ break;
+ }
+ }
+
+ return TCM_NO_SENSE;
+}
+
+static sense_reason_t
+spc_emulate_report_supp_op_codes(struct se_cmd *cmd)
+{
+ int descr_num = ARRAY_SIZE(tcm_supported_opcodes);
+ const struct target_opcode_descriptor *descr = NULL;
+ unsigned char *cdb = cmd->t_task_cdb;
+ u8 rctd = (cdb[2] >> 7) & 0x1;
+ unsigned char *buf = NULL;
+ int response_length = 0;
+ u8 opts = cdb[2] & 0x3;
+ unsigned char *rbuf;
+ sense_reason_t ret = 0;
+ int i;
+
+ if (!cmd->se_dev->dev_attrib.emulate_rsoc)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ rbuf = transport_kmap_data_sg(cmd);
+ if (cmd->data_length && !rbuf) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out;
+ }
+
+ if (opts == 0)
+ response_length = 4 + (8 + rctd * 12) * descr_num;
+ else {
+ ret = spc_rsoc_get_descr(cmd, &descr);
+ if (ret)
+ goto out;
+
+ if (descr)
+ response_length = 4 + descr->cdb_size + rctd * 12;
+ else
+ response_length = 2;
+ }
+
+ buf = kzalloc(response_length, GFP_KERNEL);
+ if (!buf) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out;
+ }
+ response_length = 0;
+
+ if (opts == 0) {
+ response_length += 4;
+
+ for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) {
+ descr = tcm_supported_opcodes[i];
+ if (descr->enabled && !descr->enabled(descr, cmd))
+ continue;
+
+ response_length += spc_rsoc_encode_command_descriptor(
+ &buf[response_length], rctd, descr);
+ }
+ put_unaligned_be32(response_length - 4, buf);
+ } else {
+ response_length = spc_rsoc_encode_one_command_descriptor(
+ &buf[response_length], rctd, descr,
+ cmd->se_dev);
+ }
+
+ memcpy(rbuf, buf, min_t(u32, response_length, cmd->data_length));
+out:
+ kfree(buf);
+ transport_kunmap_data_sg(cmd);
+
+ if (!ret)
+ target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, response_length);
+ return ret;
+}
+
sense_reason_t
spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
{
@@ -1178,12 +2310,30 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
unsigned char *cdb = cmd->t_task_cdb;
switch (cdb[0]) {
+ case RESERVE_6:
+ case RESERVE_10:
+ case RELEASE_6:
+ case RELEASE_10:
+ if (!dev->dev_attrib.emulate_pr)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ break;
+ case PERSISTENT_RESERVE_IN:
+ case PERSISTENT_RESERVE_OUT:
+ if (!dev->dev_attrib.emulate_pr)
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ break;
+ }
+
+ switch (cdb[0]) {
case MODE_SELECT:
*size = cdb[4];
cmd->execute_cmd = spc_emulate_modeselect;
break;
case MODE_SELECT_10:
- *size = (cdb[7] << 8) + cdb[8];
+ *size = get_unaligned_be16(&cdb[7]);
cmd->execute_cmd = spc_emulate_modeselect;
break;
case MODE_SENSE:
@@ -1191,38 +2341,38 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
cmd->execute_cmd = spc_emulate_modesense;
break;
case MODE_SENSE_10:
- *size = (cdb[7] << 8) + cdb[8];
+ *size = get_unaligned_be16(&cdb[7]);
cmd->execute_cmd = spc_emulate_modesense;
break;
case LOG_SELECT:
case LOG_SENSE:
- *size = (cdb[7] << 8) + cdb[8];
+ *size = get_unaligned_be16(&cdb[7]);
break;
case PERSISTENT_RESERVE_IN:
- *size = (cdb[7] << 8) + cdb[8];
+ *size = get_unaligned_be16(&cdb[7]);
cmd->execute_cmd = target_scsi3_emulate_pr_in;
break;
case PERSISTENT_RESERVE_OUT:
- *size = (cdb[7] << 8) + cdb[8];
+ *size = get_unaligned_be32(&cdb[5]);
cmd->execute_cmd = target_scsi3_emulate_pr_out;
break;
- case RELEASE:
+ case RELEASE_6:
case RELEASE_10:
if (cdb[0] == RELEASE_10)
- *size = (cdb[7] << 8) | cdb[8];
+ *size = get_unaligned_be16(&cdb[7]);
else
*size = cmd->data_length;
cmd->execute_cmd = target_scsi2_reservation_release;
break;
- case RESERVE:
+ case RESERVE_6:
case RESERVE_10:
/*
* The SPC-2 RESERVE does not contain a size in the SCSI CDB.
* Assume the passthrough or $FABRIC_MOD will tell us about it.
*/
if (cdb[0] == RESERVE_10)
- *size = (cdb[7] << 8) | cdb[8];
+ *size = get_unaligned_be16(&cdb[7]);
else
*size = cmd->data_length;
@@ -1233,41 +2383,46 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
cmd->execute_cmd = spc_emulate_request_sense;
break;
case INQUIRY:
- *size = (cdb[3] << 8) + cdb[4];
+ *size = get_unaligned_be16(&cdb[3]);
/*
- * Do implict HEAD_OF_QUEUE processing for INQUIRY.
+ * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
* See spc4r17 section 5.3
*/
- cmd->sam_task_attr = MSG_HEAD_TAG;
+ cmd->sam_task_attr = TCM_HEAD_TAG;
cmd->execute_cmd = spc_emulate_inquiry;
break;
case SECURITY_PROTOCOL_IN:
case SECURITY_PROTOCOL_OUT:
- *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+ *size = get_unaligned_be32(&cdb[6]);
break;
case EXTENDED_COPY:
- case READ_ATTRIBUTE:
+ *size = get_unaligned_be32(&cdb[10]);
+ cmd->execute_cmd = target_do_xcopy;
+ break;
case RECEIVE_COPY_RESULTS:
+ *size = get_unaligned_be32(&cdb[10]);
+ cmd->execute_cmd = target_do_receive_copy_results;
+ break;
+ case READ_ATTRIBUTE:
case WRITE_ATTRIBUTE:
- *size = (cdb[10] << 24) | (cdb[11] << 16) |
- (cdb[12] << 8) | cdb[13];
+ *size = get_unaligned_be32(&cdb[10]);
break;
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
- *size = (cdb[3] << 8) | cdb[4];
+ *size = get_unaligned_be16(&cdb[3]);
break;
case WRITE_BUFFER:
- *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+ *size = get_unaligned_be24(&cdb[6]);
break;
case REPORT_LUNS:
cmd->execute_cmd = spc_emulate_report_luns;
- *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+ *size = get_unaligned_be32(&cdb[6]);
/*
- * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
+ * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
* See spc4r17 section 5.3
*/
- cmd->sam_task_attr = MSG_HEAD_TAG;
+ cmd->sam_task_attr = TCM_HEAD_TAG;
break;
case TEST_UNIT_READY:
cmd->execute_cmd = spc_emulate_testunitready;
@@ -1283,6 +2438,10 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
cmd->execute_cmd =
target_emulate_report_target_port_groups;
}
+ if ((cdb[1] & 0x1f) ==
+ MI_REPORT_SUPPORTED_OPERATION_CODES)
+ cmd->execute_cmd =
+ spc_emulate_report_supp_op_codes;
*size = get_unaligned_be32(&cdb[6]);
} else {
/*
@@ -1310,9 +2469,6 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
}
break;
default:
- pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
- " 0x%02x, sending CHECK_CONDITION.\n",
- cmd->se_tfo->get_fabric_name(), cdb[0]);
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index d154ce797180..083205052be2 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -1,27 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_stat.c
*
* Modern ConfigFS group context specific statistics based on original
* target_core_mib.c code
*
- * (c) Copyright 2006-2012 RisingTide Systems LLC.
+ * (c) Copyright 2006-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#include <linux/kernel.h>
@@ -32,17 +19,11 @@
#include <linux/utsname.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/blkdev.h>
#include <linux/configfs.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
-#include <target/configfs_macros.h>
#include "target_core_internal.h"
@@ -50,9 +31,6 @@
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
#endif
-#define NONE "None"
-#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
-
#define SCSI_LU_INDEX 1
#define LU_COUNT 1
@@ -60,75 +38,49 @@
* SCSI Device Table
*/
-CONFIGFS_EATTR_STRUCT(target_stat_scsi_dev, se_dev_stat_grps);
-#define DEV_STAT_SCSI_DEV_ATTR(_name, _mode) \
-static struct target_stat_scsi_dev_attribute \
- target_stat_scsi_dev_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- target_stat_scsi_dev_show_attr_##_name, \
- target_stat_scsi_dev_store_attr_##_name);
-
-#define DEV_STAT_SCSI_DEV_ATTR_RO(_name) \
-static struct target_stat_scsi_dev_attribute \
- target_stat_scsi_dev_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- target_stat_scsi_dev_show_attr_##_name);
+static struct se_device *to_stat_dev(struct config_item *item)
+{
+ struct se_dev_stat_grps *sgrps = container_of(to_config_group(item),
+ struct se_dev_stat_grps, scsi_dev_group);
+ return container_of(sgrps, struct se_device, dev_stat_grps);
+}
-static ssize_t target_stat_scsi_dev_show_attr_inst(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_inst_show(struct config_item *item, char *page)
{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
- struct se_hba *hba = dev->se_hba;
+ struct se_hba *hba = to_stat_dev(item)->se_hba;
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
}
-DEV_STAT_SCSI_DEV_ATTR_RO(inst);
-static ssize_t target_stat_scsi_dev_show_attr_indx(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_indx_show(struct config_item *item, char *page)
{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
-
- return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+ return snprintf(page, PAGE_SIZE, "%u\n", to_stat_dev(item)->dev_index);
}
-DEV_STAT_SCSI_DEV_ATTR_RO(indx);
-static ssize_t target_stat_scsi_dev_show_attr_role(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_role_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "Target\n");
}
-DEV_STAT_SCSI_DEV_ATTR_RO(role);
-static ssize_t target_stat_scsi_dev_show_attr_ports(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_ports_show(struct config_item *item, char *page)
{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
-
- return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count);
+ return snprintf(page, PAGE_SIZE, "%u\n", to_stat_dev(item)->export_count);
}
-DEV_STAT_SCSI_DEV_ATTR_RO(ports);
-CONFIGFS_EATTR_OPS(target_stat_scsi_dev, se_dev_stat_grps, scsi_dev_group);
+CONFIGFS_ATTR_RO(target_stat_, inst);
+CONFIGFS_ATTR_RO(target_stat_, indx);
+CONFIGFS_ATTR_RO(target_stat_, role);
+CONFIGFS_ATTR_RO(target_stat_, ports);
static struct configfs_attribute *target_stat_scsi_dev_attrs[] = {
- &target_stat_scsi_dev_inst.attr,
- &target_stat_scsi_dev_indx.attr,
- &target_stat_scsi_dev_role.attr,
- &target_stat_scsi_dev_ports.attr,
+ &target_stat_attr_inst,
+ &target_stat_attr_indx,
+ &target_stat_attr_role,
+ &target_stat_attr_ports,
NULL,
};
-static struct configfs_item_operations target_stat_scsi_dev_attrib_ops = {
- .show_attribute = target_stat_scsi_dev_attr_show,
- .store_attribute = target_stat_scsi_dev_attr_store,
-};
-
-static struct config_item_type target_stat_scsi_dev_cit = {
- .ct_item_ops = &target_stat_scsi_dev_attrib_ops,
+static const struct config_item_type target_stat_scsi_dev_cit = {
.ct_attrs = target_stat_scsi_dev_attrs,
.ct_owner = THIS_MODULE,
};
@@ -136,108 +88,96 @@ static struct config_item_type target_stat_scsi_dev_cit = {
/*
* SCSI Target Device Table
*/
+static struct se_device *to_stat_tgt_dev(struct config_item *item)
+{
+ struct se_dev_stat_grps *sgrps = container_of(to_config_group(item),
+ struct se_dev_stat_grps, scsi_tgt_dev_group);
+ return container_of(sgrps, struct se_device, dev_stat_grps);
+}
-CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_dev, se_dev_stat_grps);
-#define DEV_STAT_SCSI_TGT_DEV_ATTR(_name, _mode) \
-static struct target_stat_scsi_tgt_dev_attribute \
- target_stat_scsi_tgt_dev_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- target_stat_scsi_tgt_dev_show_attr_##_name, \
- target_stat_scsi_tgt_dev_store_attr_##_name);
-
-#define DEV_STAT_SCSI_TGT_DEV_ATTR_RO(_name) \
-static struct target_stat_scsi_tgt_dev_attribute \
- target_stat_scsi_tgt_dev_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- target_stat_scsi_tgt_dev_show_attr_##_name);
-
-static ssize_t target_stat_scsi_tgt_dev_show_attr_inst(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_tgt_inst_show(struct config_item *item, char *page)
{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
- struct se_hba *hba = dev->se_hba;
+ struct se_hba *hba = to_stat_tgt_dev(item)->se_hba;
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
}
-DEV_STAT_SCSI_TGT_DEV_ATTR_RO(inst);
-static ssize_t target_stat_scsi_tgt_dev_show_attr_indx(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_tgt_indx_show(struct config_item *item, char *page)
{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
-
- return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+ return snprintf(page, PAGE_SIZE, "%u\n", to_stat_tgt_dev(item)->dev_index);
}
-DEV_STAT_SCSI_TGT_DEV_ATTR_RO(indx);
-static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_tgt_num_lus_show(struct config_item *item,
+ char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT);
}
-DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus);
-static ssize_t target_stat_scsi_tgt_dev_show_attr_status(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_tgt_status_show(struct config_item *item,
+ char *page)
{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
-
- if (dev->export_count)
- return snprintf(page, PAGE_SIZE, "activated");
+ if (to_stat_tgt_dev(item)->export_count)
+ return snprintf(page, PAGE_SIZE, "activated\n");
else
- return snprintf(page, PAGE_SIZE, "deactivated");
+ return snprintf(page, PAGE_SIZE, "deactivated\n");
}
-DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status);
-static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_tgt_non_access_lus_show(struct config_item *item,
+ char *page)
{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
int non_accessible_lus;
- if (dev->export_count)
+ if (to_stat_tgt_dev(item)->export_count)
non_accessible_lus = 0;
else
non_accessible_lus = 1;
return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus);
}
-DEV_STAT_SCSI_TGT_DEV_ATTR_RO(non_access_lus);
-static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_tgt_resets_show(struct config_item *item,
+ char *page)
{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
+ return snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&to_stat_tgt_dev(item)->num_resets));
+}
- return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
+static ssize_t target_stat_tgt_aborts_complete_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&to_stat_tgt_dev(item)->aborts_complete));
}
-DEV_STAT_SCSI_TGT_DEV_ATTR_RO(resets);
+static ssize_t target_stat_tgt_aborts_no_task_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&to_stat_tgt_dev(item)->aborts_no_task));
+}
-CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_dev, se_dev_stat_grps, scsi_tgt_dev_group);
+CONFIGFS_ATTR_RO(target_stat_tgt_, inst);
+CONFIGFS_ATTR_RO(target_stat_tgt_, indx);
+CONFIGFS_ATTR_RO(target_stat_tgt_, num_lus);
+CONFIGFS_ATTR_RO(target_stat_tgt_, status);
+CONFIGFS_ATTR_RO(target_stat_tgt_, non_access_lus);
+CONFIGFS_ATTR_RO(target_stat_tgt_, resets);
+CONFIGFS_ATTR_RO(target_stat_tgt_, aborts_complete);
+CONFIGFS_ATTR_RO(target_stat_tgt_, aborts_no_task);
static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[] = {
- &target_stat_scsi_tgt_dev_inst.attr,
- &target_stat_scsi_tgt_dev_indx.attr,
- &target_stat_scsi_tgt_dev_num_lus.attr,
- &target_stat_scsi_tgt_dev_status.attr,
- &target_stat_scsi_tgt_dev_non_access_lus.attr,
- &target_stat_scsi_tgt_dev_resets.attr,
+ &target_stat_tgt_attr_inst,
+ &target_stat_tgt_attr_indx,
+ &target_stat_tgt_attr_num_lus,
+ &target_stat_tgt_attr_status,
+ &target_stat_tgt_attr_non_access_lus,
+ &target_stat_tgt_attr_resets,
+ &target_stat_tgt_attr_aborts_complete,
+ &target_stat_tgt_attr_aborts_no_task,
NULL,
};
-static struct configfs_item_operations target_stat_scsi_tgt_dev_attrib_ops = {
- .show_attribute = target_stat_scsi_tgt_dev_attr_show,
- .store_attribute = target_stat_scsi_tgt_dev_attr_store,
-};
-
-static struct config_item_type target_stat_scsi_tgt_dev_cit = {
- .ct_item_ops = &target_stat_scsi_tgt_dev_attrib_ops,
+static const struct config_item_type target_stat_scsi_tgt_dev_cit = {
.ct_attrs = target_stat_scsi_tgt_dev_attrs,
.ct_owner = THIS_MODULE,
};
@@ -246,254 +186,205 @@ static struct config_item_type target_stat_scsi_tgt_dev_cit = {
* SCSI Logical Unit Table
*/
-CONFIGFS_EATTR_STRUCT(target_stat_scsi_lu, se_dev_stat_grps);
-#define DEV_STAT_SCSI_LU_ATTR(_name, _mode) \
-static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- target_stat_scsi_lu_show_attr_##_name, \
- target_stat_scsi_lu_store_attr_##_name);
-
-#define DEV_STAT_SCSI_LU_ATTR_RO(_name) \
-static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- target_stat_scsi_lu_show_attr_##_name);
+static struct se_device *to_stat_lu_dev(struct config_item *item)
+{
+ struct se_dev_stat_grps *sgrps = container_of(to_config_group(item),
+ struct se_dev_stat_grps, scsi_lu_group);
+ return container_of(sgrps, struct se_device, dev_stat_grps);
+}
-static ssize_t target_stat_scsi_lu_show_attr_inst(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_lu_inst_show(struct config_item *item, char *page)
{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
- struct se_hba *hba = dev->se_hba;
+ struct se_hba *hba = to_stat_lu_dev(item)->se_hba;
return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
}
-DEV_STAT_SCSI_LU_ATTR_RO(inst);
-static ssize_t target_stat_scsi_lu_show_attr_dev(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_lu_dev_show(struct config_item *item, char *page)
{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
-
- return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+ return snprintf(page, PAGE_SIZE, "%u\n",
+ to_stat_lu_dev(item)->dev_index);
}
-DEV_STAT_SCSI_LU_ATTR_RO(dev);
-static ssize_t target_stat_scsi_lu_show_attr_indx(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_lu_indx_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX);
}
-DEV_STAT_SCSI_LU_ATTR_RO(indx);
-static ssize_t target_stat_scsi_lu_show_attr_lun(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_lu_lun_show(struct config_item *item, char *page)
{
/* FIXME: scsiLuDefaultLun */
return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0);
}
-DEV_STAT_SCSI_LU_ATTR_RO(lun);
-static ssize_t target_stat_scsi_lu_show_attr_lu_name(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_lu_lu_name_show(struct config_item *item, char *page)
{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
+ struct se_device *dev = to_stat_lu_dev(item);
/* scsiLuWwnName */
return snprintf(page, PAGE_SIZE, "%s\n",
(strlen(dev->t10_wwn.unit_serial)) ?
dev->t10_wwn.unit_serial : "None");
}
-DEV_STAT_SCSI_LU_ATTR_RO(lu_name);
-static ssize_t target_stat_scsi_lu_show_attr_vend(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_lu_vend_show(struct config_item *item, char *page)
{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
- int i;
- char str[sizeof(dev->t10_wwn.vendor)+1];
+ struct se_device *dev = to_stat_lu_dev(item);
- /* scsiLuVendorId */
- for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
- str[i] = ISPRINT(dev->t10_wwn.vendor[i]) ?
- dev->t10_wwn.vendor[i] : ' ';
- str[i] = '\0';
- return snprintf(page, PAGE_SIZE, "%s\n", str);
+ return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_VENDOR_LEN)
+ "s\n", dev->t10_wwn.vendor);
}
-DEV_STAT_SCSI_LU_ATTR_RO(vend);
-static ssize_t target_stat_scsi_lu_show_attr_prod(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page)
{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
- int i;
- char str[sizeof(dev->t10_wwn.model)+1];
+ struct se_device *dev = to_stat_lu_dev(item);
- /* scsiLuProductId */
- for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
- str[i] = ISPRINT(dev->t10_wwn.model[i]) ?
- dev->t10_wwn.model[i] : ' ';
- str[i] = '\0';
- return snprintf(page, PAGE_SIZE, "%s\n", str);
+ return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_MODEL_LEN)
+ "s\n", dev->t10_wwn.model);
}
-DEV_STAT_SCSI_LU_ATTR_RO(prod);
-static ssize_t target_stat_scsi_lu_show_attr_rev(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_lu_rev_show(struct config_item *item, char *page)
{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
- int i;
- char str[sizeof(dev->t10_wwn.revision)+1];
+ struct se_device *dev = to_stat_lu_dev(item);
- /* scsiLuRevisionId */
- for (i = 0; i < sizeof(dev->t10_wwn.revision); i++)
- str[i] = ISPRINT(dev->t10_wwn.revision[i]) ?
- dev->t10_wwn.revision[i] : ' ';
- str[i] = '\0';
- return snprintf(page, PAGE_SIZE, "%s\n", str);
+ return snprintf(page, PAGE_SIZE, "%-" __stringify(INQUIRY_REVISION_LEN)
+ "s\n", dev->t10_wwn.revision);
}
-DEV_STAT_SCSI_LU_ATTR_RO(rev);
-static ssize_t target_stat_scsi_lu_show_attr_dev_type(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_lu_dev_type_show(struct config_item *item, char *page)
{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
+ struct se_device *dev = to_stat_lu_dev(item);
/* scsiLuPeripheralType */
return snprintf(page, PAGE_SIZE, "%u\n",
dev->transport->get_device_type(dev));
}
-DEV_STAT_SCSI_LU_ATTR_RO(dev_type);
-static ssize_t target_stat_scsi_lu_show_attr_status(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_lu_status_show(struct config_item *item, char *page)
{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
+ struct se_device *dev = to_stat_lu_dev(item);
/* scsiLuStatus */
return snprintf(page, PAGE_SIZE, "%s\n",
(dev->export_count) ? "available" : "notavailable");
}
-DEV_STAT_SCSI_LU_ATTR_RO(status);
-static ssize_t target_stat_scsi_lu_show_attr_state_bit(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_lu_state_bit_show(struct config_item *item,
+ char *page)
{
/* scsiLuState */
return snprintf(page, PAGE_SIZE, "exposed\n");
}
-DEV_STAT_SCSI_LU_ATTR_RO(state_bit);
-
-static ssize_t target_stat_scsi_lu_show_attr_num_cmds(
- struct se_dev_stat_grps *sgrps, char *page)
-{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
- /* scsiLuNumCommands */
- return snprintf(page, PAGE_SIZE, "%llu\n",
- (unsigned long long)dev->num_cmds);
-}
-DEV_STAT_SCSI_LU_ATTR_RO(num_cmds);
-
-static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(
- struct se_dev_stat_grps *sgrps, char *page)
-{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
-
- /* scsiLuReadMegaBytes */
- return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20));
-}
-DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes);
-
-static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(
- struct se_dev_stat_grps *sgrps, char *page)
-{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
-
- /* scsiLuWrittenMegaBytes */
- return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20));
-}
-DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes);
-
-static ssize_t target_stat_scsi_lu_show_attr_resets(
- struct se_dev_stat_grps *sgrps, char *page)
-{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
+#define per_cpu_stat_snprintf(stats_struct, prefix, field, shift) \
+static ssize_t \
+per_cpu_stat_##prefix##_snprintf(struct stats_struct __percpu *per_cpu_stats, \
+ char *page) \
+{ \
+ struct stats_struct *stats; \
+ unsigned int cpu; \
+ u64 sum = 0; \
+ \
+ for_each_possible_cpu(cpu) { \
+ stats = per_cpu_ptr(per_cpu_stats, cpu); \
+ sum += stats->field; \
+ } \
+ \
+ return snprintf(page, PAGE_SIZE, "%llu\n", sum >> shift); \
+}
+
+#define lu_show_per_cpu_stat(prefix, field, shift) \
+per_cpu_stat_snprintf(se_dev_io_stats, prefix, field, shift); \
+static ssize_t \
+target_stat_##prefix##_show(struct config_item *item, char *page) \
+{ \
+ struct se_device *dev = to_stat_lu_dev(item); \
+ \
+ return per_cpu_stat_##prefix##_snprintf(dev->stats, page); \
+} \
+
+/* scsiLuNumCommands */
+lu_show_per_cpu_stat(lu_num_cmds, total_cmds, 0);
+/* scsiLuReadMegaBytes */
+lu_show_per_cpu_stat(lu_read_mbytes, read_bytes, 20);
+/* scsiLuWrittenMegaBytes */
+lu_show_per_cpu_stat(lu_write_mbytes, write_bytes, 20);
+
+static ssize_t target_stat_lu_resets_show(struct config_item *item, char *page)
+{
+ struct se_device *dev = to_stat_lu_dev(item);
/* scsiLuInResets */
- return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
+ return snprintf(page, PAGE_SIZE, "%lu\n",
+ atomic_long_read(&dev->num_resets));
}
-DEV_STAT_SCSI_LU_ATTR_RO(resets);
-static ssize_t target_stat_scsi_lu_show_attr_full_stat(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_lu_full_stat_show(struct config_item *item,
+ char *page)
{
/* FIXME: scsiLuOutTaskSetFullStatus */
return snprintf(page, PAGE_SIZE, "%u\n", 0);
}
-DEV_STAT_SCSI_LU_ATTR_RO(full_stat);
-static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_lu_hs_num_cmds_show(struct config_item *item,
+ char *page)
{
/* FIXME: scsiLuHSInCommands */
return snprintf(page, PAGE_SIZE, "%u\n", 0);
}
-DEV_STAT_SCSI_LU_ATTR_RO(hs_num_cmds);
-static ssize_t target_stat_scsi_lu_show_attr_creation_time(
- struct se_dev_stat_grps *sgrps, char *page)
+static ssize_t target_stat_lu_creation_time_show(struct config_item *item,
+ char *page)
{
- struct se_device *dev =
- container_of(sgrps, struct se_device, dev_stat_grps);
+ struct se_device *dev = to_stat_lu_dev(item);
/* scsiLuCreationTime */
return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time -
INITIAL_JIFFIES) * 100 / HZ));
}
-DEV_STAT_SCSI_LU_ATTR_RO(creation_time);
-CONFIGFS_EATTR_OPS(target_stat_scsi_lu, se_dev_stat_grps, scsi_lu_group);
+CONFIGFS_ATTR_RO(target_stat_lu_, inst);
+CONFIGFS_ATTR_RO(target_stat_lu_, dev);
+CONFIGFS_ATTR_RO(target_stat_lu_, indx);
+CONFIGFS_ATTR_RO(target_stat_lu_, lun);
+CONFIGFS_ATTR_RO(target_stat_lu_, lu_name);
+CONFIGFS_ATTR_RO(target_stat_lu_, vend);
+CONFIGFS_ATTR_RO(target_stat_lu_, prod);
+CONFIGFS_ATTR_RO(target_stat_lu_, rev);
+CONFIGFS_ATTR_RO(target_stat_lu_, dev_type);
+CONFIGFS_ATTR_RO(target_stat_lu_, status);
+CONFIGFS_ATTR_RO(target_stat_lu_, state_bit);
+CONFIGFS_ATTR_RO(target_stat_lu_, num_cmds);
+CONFIGFS_ATTR_RO(target_stat_lu_, read_mbytes);
+CONFIGFS_ATTR_RO(target_stat_lu_, write_mbytes);
+CONFIGFS_ATTR_RO(target_stat_lu_, resets);
+CONFIGFS_ATTR_RO(target_stat_lu_, full_stat);
+CONFIGFS_ATTR_RO(target_stat_lu_, hs_num_cmds);
+CONFIGFS_ATTR_RO(target_stat_lu_, creation_time);
static struct configfs_attribute *target_stat_scsi_lu_attrs[] = {
- &target_stat_scsi_lu_inst.attr,
- &target_stat_scsi_lu_dev.attr,
- &target_stat_scsi_lu_indx.attr,
- &target_stat_scsi_lu_lun.attr,
- &target_stat_scsi_lu_lu_name.attr,
- &target_stat_scsi_lu_vend.attr,
- &target_stat_scsi_lu_prod.attr,
- &target_stat_scsi_lu_rev.attr,
- &target_stat_scsi_lu_dev_type.attr,
- &target_stat_scsi_lu_status.attr,
- &target_stat_scsi_lu_state_bit.attr,
- &target_stat_scsi_lu_num_cmds.attr,
- &target_stat_scsi_lu_read_mbytes.attr,
- &target_stat_scsi_lu_write_mbytes.attr,
- &target_stat_scsi_lu_resets.attr,
- &target_stat_scsi_lu_full_stat.attr,
- &target_stat_scsi_lu_hs_num_cmds.attr,
- &target_stat_scsi_lu_creation_time.attr,
+ &target_stat_lu_attr_inst,
+ &target_stat_lu_attr_dev,
+ &target_stat_lu_attr_indx,
+ &target_stat_lu_attr_lun,
+ &target_stat_lu_attr_lu_name,
+ &target_stat_lu_attr_vend,
+ &target_stat_lu_attr_prod,
+ &target_stat_lu_attr_rev,
+ &target_stat_lu_attr_dev_type,
+ &target_stat_lu_attr_status,
+ &target_stat_lu_attr_state_bit,
+ &target_stat_lu_attr_num_cmds,
+ &target_stat_lu_attr_read_mbytes,
+ &target_stat_lu_attr_write_mbytes,
+ &target_stat_lu_attr_resets,
+ &target_stat_lu_attr_full_stat,
+ &target_stat_lu_attr_hs_num_cmds,
+ &target_stat_lu_attr_creation_time,
NULL,
};
-static struct configfs_item_operations target_stat_scsi_lu_attrib_ops = {
- .show_attribute = target_stat_scsi_lu_attr_show,
- .store_attribute = target_stat_scsi_lu_attr_store,
-};
-
-static struct config_item_type target_stat_scsi_lu_cit = {
- .ct_item_ops = &target_stat_scsi_lu_attrib_ops,
+static const struct config_item_type target_stat_scsi_lu_cit = {
.ct_attrs = target_stat_scsi_lu_attrs,
.ct_owner = THIS_MODULE,
};
@@ -504,161 +395,122 @@ static struct config_item_type target_stat_scsi_lu_cit = {
*/
void target_stat_setup_dev_default_groups(struct se_device *dev)
{
- struct config_group *dev_stat_grp = &dev->dev_stat_grps.stat_group;
-
config_group_init_type_name(&dev->dev_stat_grps.scsi_dev_group,
"scsi_dev", &target_stat_scsi_dev_cit);
+ configfs_add_default_group(&dev->dev_stat_grps.scsi_dev_group,
+ &dev->dev_stat_grps.stat_group);
+
config_group_init_type_name(&dev->dev_stat_grps.scsi_tgt_dev_group,
"scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
+ configfs_add_default_group(&dev->dev_stat_grps.scsi_tgt_dev_group,
+ &dev->dev_stat_grps.stat_group);
+
config_group_init_type_name(&dev->dev_stat_grps.scsi_lu_group,
"scsi_lu", &target_stat_scsi_lu_cit);
-
- dev_stat_grp->default_groups[0] = &dev->dev_stat_grps.scsi_dev_group;
- dev_stat_grp->default_groups[1] = &dev->dev_stat_grps.scsi_tgt_dev_group;
- dev_stat_grp->default_groups[2] = &dev->dev_stat_grps.scsi_lu_group;
- dev_stat_grp->default_groups[3] = NULL;
+ configfs_add_default_group(&dev->dev_stat_grps.scsi_lu_group,
+ &dev->dev_stat_grps.stat_group);
}
/*
* SCSI Port Table
*/
-CONFIGFS_EATTR_STRUCT(target_stat_scsi_port, se_port_stat_grps);
-#define DEV_STAT_SCSI_PORT_ATTR(_name, _mode) \
-static struct target_stat_scsi_port_attribute \
- target_stat_scsi_port_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- target_stat_scsi_port_show_attr_##_name, \
- target_stat_scsi_port_store_attr_##_name);
-
-#define DEV_STAT_SCSI_PORT_ATTR_RO(_name) \
-static struct target_stat_scsi_port_attribute \
- target_stat_scsi_port_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- target_stat_scsi_port_show_attr_##_name);
-
-static ssize_t target_stat_scsi_port_show_attr_inst(
- struct se_port_stat_grps *pgrps, char *page)
-{
- struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- struct se_device *dev = lun->lun_se_dev;
- struct se_hba *hba;
- ssize_t ret;
+static struct se_lun *to_stat_port(struct config_item *item)
+{
+ struct se_port_stat_grps *pgrps = container_of(to_config_group(item),
+ struct se_port_stat_grps, scsi_port_group);
+ return container_of(pgrps, struct se_lun, port_stat_grps);
+}
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- hba = dev->se_hba;
- ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
- spin_unlock(&lun->lun_sep_lock);
+static ssize_t target_stat_port_inst_show(struct config_item *item, char *page)
+{
+ struct se_lun *lun = to_stat_port(item);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_PORT_ATTR_RO(inst);
-static ssize_t target_stat_scsi_port_show_attr_dev(
- struct se_port_stat_grps *pgrps, char *page)
+static ssize_t target_stat_port_dev_show(struct config_item *item, char *page)
{
- struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- struct se_device *dev = lun->lun_se_dev;
- ssize_t ret;
+ struct se_lun *lun = to_stat_port(item);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
- spin_unlock(&lun->lun_sep_lock);
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_PORT_ATTR_RO(dev);
-static ssize_t target_stat_scsi_port_show_attr_indx(
- struct se_port_stat_grps *pgrps, char *page)
+static ssize_t target_stat_port_indx_show(struct config_item *item, char *page)
{
- struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- ssize_t ret;
+ struct se_lun *lun = to_stat_port(item);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index);
- spin_unlock(&lun->lun_sep_lock);
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_tpg->tpg_rtpi);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_PORT_ATTR_RO(indx);
-static ssize_t target_stat_scsi_port_show_attr_role(
- struct se_port_stat_grps *pgrps, char *page)
+static ssize_t target_stat_port_role_show(struct config_item *item, char *page)
{
- struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_device *dev = lun->lun_se_dev;
- struct se_port *sep;
- ssize_t ret;
+ struct se_lun *lun = to_stat_port(item);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
- if (!dev)
- return -ENODEV;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index);
- spin_unlock(&lun->lun_sep_lock);
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%s%u\n", "Device", dev->dev_index);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_PORT_ATTR_RO(role);
-static ssize_t target_stat_scsi_port_show_attr_busy_count(
- struct se_port_stat_grps *pgrps, char *page)
+static ssize_t target_stat_port_busy_count_show(struct config_item *item,
+ char *page)
{
- struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- ssize_t ret;
+ struct se_lun *lun = to_stat_port(item);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev) {
+ /* FIXME: scsiPortBusyStatuses */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
}
- /* FIXME: scsiPortBusyStatuses */
- ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
- spin_unlock(&lun->lun_sep_lock);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_PORT_ATTR_RO(busy_count);
-CONFIGFS_EATTR_OPS(target_stat_scsi_port, se_port_stat_grps, scsi_port_group);
+CONFIGFS_ATTR_RO(target_stat_port_, inst);
+CONFIGFS_ATTR_RO(target_stat_port_, dev);
+CONFIGFS_ATTR_RO(target_stat_port_, indx);
+CONFIGFS_ATTR_RO(target_stat_port_, role);
+CONFIGFS_ATTR_RO(target_stat_port_, busy_count);
static struct configfs_attribute *target_stat_scsi_port_attrs[] = {
- &target_stat_scsi_port_inst.attr,
- &target_stat_scsi_port_dev.attr,
- &target_stat_scsi_port_indx.attr,
- &target_stat_scsi_port_role.attr,
- &target_stat_scsi_port_busy_count.attr,
+ &target_stat_port_attr_inst,
+ &target_stat_port_attr_dev,
+ &target_stat_port_attr_indx,
+ &target_stat_port_attr_role,
+ &target_stat_port_attr_busy_count,
NULL,
};
-static struct configfs_item_operations target_stat_scsi_port_attrib_ops = {
- .show_attribute = target_stat_scsi_port_attr_show,
- .store_attribute = target_stat_scsi_port_attr_store,
-};
-
-static struct config_item_type target_stat_scsi_port_cit = {
- .ct_item_ops = &target_stat_scsi_port_attrib_ops,
+static const struct config_item_type target_stat_scsi_port_cit = {
.ct_attrs = target_stat_scsi_port_attrs,
.ct_owner = THIS_MODULE,
};
@@ -666,370 +518,280 @@ static struct config_item_type target_stat_scsi_port_cit = {
/*
* SCSI Target Port Table
*/
-CONFIGFS_EATTR_STRUCT(target_stat_scsi_tgt_port, se_port_stat_grps);
-#define DEV_STAT_SCSI_TGT_PORT_ATTR(_name, _mode) \
-static struct target_stat_scsi_tgt_port_attribute \
- target_stat_scsi_tgt_port_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- target_stat_scsi_tgt_port_show_attr_##_name, \
- target_stat_scsi_tgt_port_store_attr_##_name);
-
-#define DEV_STAT_SCSI_TGT_PORT_ATTR_RO(_name) \
-static struct target_stat_scsi_tgt_port_attribute \
- target_stat_scsi_tgt_port_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- target_stat_scsi_tgt_port_show_attr_##_name);
-
-static ssize_t target_stat_scsi_tgt_port_show_attr_inst(
- struct se_port_stat_grps *pgrps, char *page)
-{
- struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_device *dev = lun->lun_se_dev;
- struct se_port *sep;
- struct se_hba *hba;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- hba = dev->se_hba;
- ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
- spin_unlock(&lun->lun_sep_lock);
- return ret;
-}
-DEV_STAT_SCSI_TGT_PORT_ATTR_RO(inst);
-
-static ssize_t target_stat_scsi_tgt_port_show_attr_dev(
- struct se_port_stat_grps *pgrps, char *page)
+static struct se_lun *to_stat_tgt_port(struct config_item *item)
{
- struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_device *dev = lun->lun_se_dev;
- struct se_port *sep;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
- spin_unlock(&lun->lun_sep_lock);
- return ret;
+ struct se_port_stat_grps *pgrps = container_of(to_config_group(item),
+ struct se_port_stat_grps, scsi_tgt_port_group);
+ return container_of(pgrps, struct se_lun, port_stat_grps);
}
-DEV_STAT_SCSI_TGT_PORT_ATTR_RO(dev);
-static ssize_t target_stat_scsi_tgt_port_show_attr_indx(
- struct se_port_stat_grps *pgrps, char *page)
+static ssize_t target_stat_tgt_port_inst_show(struct config_item *item,
+ char *page)
{
- struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- ssize_t ret;
+ struct se_lun *lun = to_stat_tgt_port(item);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- ret = snprintf(page, PAGE_SIZE, "%u\n", sep->sep_index);
- spin_unlock(&lun->lun_sep_lock);
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_TGT_PORT_ATTR_RO(indx);
-static ssize_t target_stat_scsi_tgt_port_show_attr_name(
- struct se_port_stat_grps *pgrps, char *page)
+static ssize_t target_stat_tgt_port_dev_show(struct config_item *item,
+ char *page)
{
- struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- struct se_portal_group *tpg;
- ssize_t ret;
+ struct se_lun *lun = to_stat_tgt_port(item);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- tpg = sep->sep_tpg;
-
- ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
- tpg->se_tpg_tfo->get_fabric_name(), sep->sep_index);
- spin_unlock(&lun->lun_sep_lock);
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_TGT_PORT_ATTR_RO(name);
-static ssize_t target_stat_scsi_tgt_port_show_attr_port_index(
- struct se_port_stat_grps *pgrps, char *page)
+static ssize_t target_stat_tgt_port_indx_show(struct config_item *item,
+ char *page)
{
- struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- struct se_portal_group *tpg;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- tpg = sep->sep_tpg;
+ struct se_lun *lun = to_stat_tgt_port(item);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
- ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
- tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+",
- tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock(&lun->lun_sep_lock);
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_tpg->tpg_rtpi);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_TGT_PORT_ATTR_RO(port_index);
-static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds(
- struct se_port_stat_grps *pgrps, char *page)
+static ssize_t target_stat_tgt_port_name_show(struct config_item *item,
+ char *page)
{
- struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
+ struct se_lun *lun = to_stat_tgt_port(item);
+ struct se_portal_group *tpg = lun->lun_tpg;
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
- ret = snprintf(page, PAGE_SIZE, "%llu\n", sep->sep_stats.cmd_pdus);
- spin_unlock(&lun->lun_sep_lock);
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
+ tpg->se_tpg_tfo->fabric_name,
+ lun->lun_tpg->tpg_rtpi);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_TGT_PORT_ATTR_RO(in_cmds);
-static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes(
- struct se_port_stat_grps *pgrps, char *page)
+static ssize_t target_stat_tgt_port_port_index_show(struct config_item *item,
+ char *page)
{
- struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- ssize_t ret;
+ struct se_lun *lun = to_stat_tgt_port(item);
+ struct se_portal_group *tpg = lun->lun_tpg;
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
-
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- (u32)(sep->sep_stats.rx_data_octets >> 20));
- spin_unlock(&lun->lun_sep_lock);
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+",
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_TGT_PORT_ATTR_RO(write_mbytes);
-static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes(
- struct se_port_stat_grps *pgrps, char *page)
-{
- struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
+#define tgt_port_show_per_cpu_stat(prefix, field, shift) \
+per_cpu_stat_snprintf(scsi_port_stats, prefix, field, shift); \
+static ssize_t \
+target_stat_##prefix##_show(struct config_item *item, char *page) \
+{ \
+ struct se_lun *lun = to_stat_tgt_port(item); \
+ struct se_device *dev; \
+ int ret; \
+ \
+ rcu_read_lock(); \
+ dev = rcu_dereference(lun->lun_se_dev); \
+ if (!dev) { \
+ rcu_read_unlock(); \
+ return -ENODEV; \
+ } \
+ \
+ ret = per_cpu_stat_##prefix##_snprintf(lun->lun_stats, page); \
+ rcu_read_unlock(); \
+ return ret; \
+}
+
+tgt_port_show_per_cpu_stat(tgt_port_in_cmds, cmd_pdus, 0);
+tgt_port_show_per_cpu_stat(tgt_port_write_mbytes, rx_data_octets, 20);
+tgt_port_show_per_cpu_stat(tgt_port_read_mbytes, tx_data_octets, 20);
+
+static ssize_t target_stat_tgt_port_hs_in_cmds_show(struct config_item *item,
+ char *page)
+{
+ struct se_lun *lun = to_stat_tgt_port(item);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev) {
+ /* FIXME: scsiTgtPortHsInCommands */
+ ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
}
-
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- (u32)(sep->sep_stats.tx_data_octets >> 20));
- spin_unlock(&lun->lun_sep_lock);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_TGT_PORT_ATTR_RO(read_mbytes);
-static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds(
- struct se_port_stat_grps *pgrps, char *page)
-{
- struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
-
- /* FIXME: scsiTgtPortHsInCommands */
- ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
- spin_unlock(&lun->lun_sep_lock);
- return ret;
-}
-DEV_STAT_SCSI_TGT_PORT_ATTR_RO(hs_in_cmds);
-
-CONFIGFS_EATTR_OPS(target_stat_scsi_tgt_port, se_port_stat_grps,
- scsi_tgt_port_group);
+CONFIGFS_ATTR_RO(target_stat_tgt_port_, inst);
+CONFIGFS_ATTR_RO(target_stat_tgt_port_, dev);
+CONFIGFS_ATTR_RO(target_stat_tgt_port_, indx);
+CONFIGFS_ATTR_RO(target_stat_tgt_port_, name);
+CONFIGFS_ATTR_RO(target_stat_tgt_port_, port_index);
+CONFIGFS_ATTR_RO(target_stat_tgt_port_, in_cmds);
+CONFIGFS_ATTR_RO(target_stat_tgt_port_, write_mbytes);
+CONFIGFS_ATTR_RO(target_stat_tgt_port_, read_mbytes);
+CONFIGFS_ATTR_RO(target_stat_tgt_port_, hs_in_cmds);
static struct configfs_attribute *target_stat_scsi_tgt_port_attrs[] = {
- &target_stat_scsi_tgt_port_inst.attr,
- &target_stat_scsi_tgt_port_dev.attr,
- &target_stat_scsi_tgt_port_indx.attr,
- &target_stat_scsi_tgt_port_name.attr,
- &target_stat_scsi_tgt_port_port_index.attr,
- &target_stat_scsi_tgt_port_in_cmds.attr,
- &target_stat_scsi_tgt_port_write_mbytes.attr,
- &target_stat_scsi_tgt_port_read_mbytes.attr,
- &target_stat_scsi_tgt_port_hs_in_cmds.attr,
+ &target_stat_tgt_port_attr_inst,
+ &target_stat_tgt_port_attr_dev,
+ &target_stat_tgt_port_attr_indx,
+ &target_stat_tgt_port_attr_name,
+ &target_stat_tgt_port_attr_port_index,
+ &target_stat_tgt_port_attr_in_cmds,
+ &target_stat_tgt_port_attr_write_mbytes,
+ &target_stat_tgt_port_attr_read_mbytes,
+ &target_stat_tgt_port_attr_hs_in_cmds,
NULL,
};
-static struct configfs_item_operations target_stat_scsi_tgt_port_attrib_ops = {
- .show_attribute = target_stat_scsi_tgt_port_attr_show,
- .store_attribute = target_stat_scsi_tgt_port_attr_store,
-};
-
-static struct config_item_type target_stat_scsi_tgt_port_cit = {
- .ct_item_ops = &target_stat_scsi_tgt_port_attrib_ops,
+static const struct config_item_type target_stat_scsi_tgt_port_cit = {
.ct_attrs = target_stat_scsi_tgt_port_attrs,
.ct_owner = THIS_MODULE,
};
/*
* SCSI Transport Table
-o */
-
-CONFIGFS_EATTR_STRUCT(target_stat_scsi_transport, se_port_stat_grps);
-#define DEV_STAT_SCSI_TRANSPORT_ATTR(_name, _mode) \
-static struct target_stat_scsi_transport_attribute \
- target_stat_scsi_transport_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- target_stat_scsi_transport_show_attr_##_name, \
- target_stat_scsi_transport_store_attr_##_name);
-
-#define DEV_STAT_SCSI_TRANSPORT_ATTR_RO(_name) \
-static struct target_stat_scsi_transport_attribute \
- target_stat_scsi_transport_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- target_stat_scsi_transport_show_attr_##_name);
-
-static ssize_t target_stat_scsi_transport_show_attr_inst(
- struct se_port_stat_grps *pgrps, char *page)
-{
- struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_device *dev = lun->lun_se_dev;
- struct se_port *sep;
- struct se_hba *hba;
- ssize_t ret;
+ */
+static struct se_lun *to_transport_stat(struct config_item *item)
+{
+ struct se_port_stat_grps *pgrps = container_of(to_config_group(item),
+ struct se_port_stat_grps, scsi_transport_group);
+ return container_of(pgrps, struct se_lun, port_stat_grps);
+}
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
+static ssize_t target_stat_transport_inst_show(struct config_item *item,
+ char *page)
+{
+ struct se_lun *lun = to_transport_stat(item);
+ struct se_device *dev;
+ ssize_t ret = -ENODEV;
- hba = dev->se_hba;
- ret = snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
- spin_unlock(&lun->lun_sep_lock);
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", dev->hba_index);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_TRANSPORT_ATTR_RO(inst);
-static ssize_t target_stat_scsi_transport_show_attr_device(
- struct se_port_stat_grps *pgrps, char *page)
+static ssize_t target_stat_transport_device_show(struct config_item *item,
+ char *page)
{
- struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- struct se_portal_group *tpg;
- ssize_t ret;
+ struct se_lun *lun = to_transport_stat(item);
+ struct se_device *dev;
+ struct se_portal_group *tpg = lun->lun_tpg;
+ ssize_t ret = -ENODEV;
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev) {
+ /* scsiTransportType */
+ ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
+ tpg->se_tpg_tfo->fabric_name);
}
- tpg = sep->sep_tpg;
- /* scsiTransportType */
- ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
- tpg->se_tpg_tfo->get_fabric_name());
- spin_unlock(&lun->lun_sep_lock);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_TRANSPORT_ATTR_RO(device);
-static ssize_t target_stat_scsi_transport_show_attr_indx(
- struct se_port_stat_grps *pgrps, char *page)
+static ssize_t target_stat_transport_indx_show(struct config_item *item,
+ char *page)
{
- struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_port *sep;
- struct se_portal_group *tpg;
- ssize_t ret;
+ struct se_lun *lun = to_transport_stat(item);
+ struct se_device *dev;
+ struct se_portal_group *tpg = lun->lun_tpg;
+ ssize_t ret = -ENODEV;
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
- }
- tpg = sep->sep_tpg;
- ret = snprintf(page, PAGE_SIZE, "%u\n",
- tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
- spin_unlock(&lun->lun_sep_lock);
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n",
+ tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_TRANSPORT_ATTR_RO(indx);
-static ssize_t target_stat_scsi_transport_show_attr_dev_name(
- struct se_port_stat_grps *pgrps, char *page)
+static ssize_t target_stat_transport_dev_name_show(struct config_item *item,
+ char *page)
{
- struct se_lun *lun = container_of(pgrps, struct se_lun, port_stat_grps);
- struct se_device *dev = lun->lun_se_dev;
- struct se_port *sep;
- struct se_portal_group *tpg;
+ struct se_lun *lun = to_transport_stat(item);
+ struct se_device *dev;
+ struct se_portal_group *tpg = lun->lun_tpg;
struct t10_wwn *wwn;
- ssize_t ret;
-
- spin_lock(&lun->lun_sep_lock);
- sep = lun->lun_sep;
- if (!sep) {
- spin_unlock(&lun->lun_sep_lock);
- return -ENODEV;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev) {
+ wwn = &dev->t10_wwn;
+ /* scsiTransportDevName */
+ ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+ (strlen(wwn->unit_serial)) ? wwn->unit_serial :
+ wwn->vendor);
}
- tpg = sep->sep_tpg;
- wwn = &dev->t10_wwn;
- /* scsiTransportDevName */
- ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
- tpg->se_tpg_tfo->tpg_get_wwn(tpg),
- (strlen(wwn->unit_serial)) ? wwn->unit_serial :
- wwn->vendor);
- spin_unlock(&lun->lun_sep_lock);
+ rcu_read_unlock();
+ return ret;
+}
+
+static ssize_t target_stat_transport_proto_id_show(struct config_item *item,
+ char *page)
+{
+ struct se_lun *lun = to_transport_stat(item);
+ struct se_device *dev;
+ struct se_portal_group *tpg = lun->lun_tpg;
+ ssize_t ret = -ENODEV;
+
+ rcu_read_lock();
+ dev = rcu_dereference(lun->lun_se_dev);
+ if (dev)
+ ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->proto_id);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_TRANSPORT_ATTR_RO(dev_name);
-CONFIGFS_EATTR_OPS(target_stat_scsi_transport, se_port_stat_grps,
- scsi_transport_group);
+CONFIGFS_ATTR_RO(target_stat_transport_, inst);
+CONFIGFS_ATTR_RO(target_stat_transport_, device);
+CONFIGFS_ATTR_RO(target_stat_transport_, indx);
+CONFIGFS_ATTR_RO(target_stat_transport_, dev_name);
+CONFIGFS_ATTR_RO(target_stat_transport_, proto_id);
static struct configfs_attribute *target_stat_scsi_transport_attrs[] = {
- &target_stat_scsi_transport_inst.attr,
- &target_stat_scsi_transport_device.attr,
- &target_stat_scsi_transport_indx.attr,
- &target_stat_scsi_transport_dev_name.attr,
+ &target_stat_transport_attr_inst,
+ &target_stat_transport_attr_device,
+ &target_stat_transport_attr_indx,
+ &target_stat_transport_attr_dev_name,
+ &target_stat_transport_attr_proto_id,
NULL,
};
-static struct configfs_item_operations target_stat_scsi_transport_attrib_ops = {
- .show_attribute = target_stat_scsi_transport_attr_show,
- .store_attribute = target_stat_scsi_transport_attr_store,
-};
-
-static struct config_item_type target_stat_scsi_transport_cit = {
- .ct_item_ops = &target_stat_scsi_transport_attrib_ops,
+static const struct config_item_type target_stat_scsi_transport_cit = {
.ct_attrs = target_stat_scsi_transport_attrs,
.ct_owner = THIS_MODULE,
};
@@ -1040,383 +802,323 @@ static struct config_item_type target_stat_scsi_transport_cit = {
*/
void target_stat_setup_port_default_groups(struct se_lun *lun)
{
- struct config_group *port_stat_grp = &lun->port_stat_grps.stat_group;
-
config_group_init_type_name(&lun->port_stat_grps.scsi_port_group,
"scsi_port", &target_stat_scsi_port_cit);
+ configfs_add_default_group(&lun->port_stat_grps.scsi_port_group,
+ &lun->port_stat_grps.stat_group);
+
config_group_init_type_name(&lun->port_stat_grps.scsi_tgt_port_group,
"scsi_tgt_port", &target_stat_scsi_tgt_port_cit);
+ configfs_add_default_group(&lun->port_stat_grps.scsi_tgt_port_group,
+ &lun->port_stat_grps.stat_group);
+
config_group_init_type_name(&lun->port_stat_grps.scsi_transport_group,
"scsi_transport", &target_stat_scsi_transport_cit);
-
- port_stat_grp->default_groups[0] = &lun->port_stat_grps.scsi_port_group;
- port_stat_grp->default_groups[1] = &lun->port_stat_grps.scsi_tgt_port_group;
- port_stat_grp->default_groups[2] = &lun->port_stat_grps.scsi_transport_group;
- port_stat_grp->default_groups[3] = NULL;
+ configfs_add_default_group(&lun->port_stat_grps.scsi_transport_group,
+ &lun->port_stat_grps.stat_group);
}
/*
* SCSI Authorized Initiator Table
*/
-CONFIGFS_EATTR_STRUCT(target_stat_scsi_auth_intr, se_ml_stat_grps);
-#define DEV_STAT_SCSI_AUTH_INTR_ATTR(_name, _mode) \
-static struct target_stat_scsi_auth_intr_attribute \
- target_stat_scsi_auth_intr_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- target_stat_scsi_auth_intr_show_attr_##_name, \
- target_stat_scsi_auth_intr_store_attr_##_name);
-
-#define DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(_name) \
-static struct target_stat_scsi_auth_intr_attribute \
- target_stat_scsi_auth_intr_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- target_stat_scsi_auth_intr_show_attr_##_name);
-
-static ssize_t target_stat_scsi_auth_intr_show_attr_inst(
- struct se_ml_stat_grps *lgrps, char *page)
-{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
+static struct se_lun_acl *auth_to_lacl(struct config_item *item)
+{
+ struct se_ml_stat_grps *lgrps = container_of(to_config_group(item),
+ struct se_ml_stat_grps, scsi_auth_intr_group);
+ return container_of(lgrps, struct se_lun_acl, ml_stat_grps);
+}
+
+static ssize_t target_stat_auth_inst_show(struct config_item *item,
+ char *page)
+{
+ struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
struct se_portal_group *tpg;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
tpg = nacl->se_tpg;
/* scsiInstIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(inst);
-static ssize_t target_stat_scsi_auth_intr_show_attr_dev(
- struct se_ml_stat_grps *lgrps, char *page)
+static ssize_t target_stat_auth_dev_show(struct config_item *item,
+ char *page)
{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
+ struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
- struct se_lun *lun;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
- lun = deve->se_lun;
+
/* scsiDeviceIndex */
- ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index);
- spin_unlock_irq(&nacl->device_list_lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", deve->se_lun->lun_index);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev);
-static ssize_t target_stat_scsi_auth_intr_show_attr_port(
- struct se_ml_stat_grps *lgrps, char *page)
+static ssize_t target_stat_auth_port_show(struct config_item *item,
+ char *page)
{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
+ struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
struct se_portal_group *tpg;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
tpg = nacl->se_tpg;
/* scsiAuthIntrTgtPortIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(port);
-static ssize_t target_stat_scsi_auth_intr_show_attr_indx(
- struct se_ml_stat_grps *lgrps, char *page)
+static ssize_t target_stat_auth_indx_show(struct config_item *item,
+ char *page)
{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
+ struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(indx);
-static ssize_t target_stat_scsi_auth_intr_show_attr_dev_or_port(
- struct se_ml_stat_grps *lgrps, char *page)
+static ssize_t target_stat_auth_dev_or_port_show(struct config_item *item,
+ char *page)
{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
+ struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrDevOrPort */
ret = snprintf(page, PAGE_SIZE, "%u\n", 1);
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(dev_or_port);
-static ssize_t target_stat_scsi_auth_intr_show_attr_intr_name(
- struct se_ml_stat_grps *lgrps, char *page)
+static ssize_t target_stat_auth_intr_name_show(struct config_item *item,
+ char *page)
{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
+ struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrName */
ret = snprintf(page, PAGE_SIZE, "%s\n", nacl->initiatorname);
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(intr_name);
-static ssize_t target_stat_scsi_auth_intr_show_attr_map_indx(
- struct se_ml_stat_grps *lgrps, char *page)
+static ssize_t target_stat_auth_map_indx_show(struct config_item *item,
+ char *page)
{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
+ struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* FIXME: scsiAuthIntrLunMapIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(map_indx);
-static ssize_t target_stat_scsi_auth_intr_show_attr_att_count(
- struct se_ml_stat_grps *lgrps, char *page)
+static ssize_t target_stat_auth_att_count_show(struct config_item *item,
+ char *page)
{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
+ struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrAttachedTimes */
ret = snprintf(page, PAGE_SIZE, "%u\n", deve->attach_count);
- spin_unlock_irq(&nacl->device_list_lock);
- return ret;
-}
-DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(att_count);
-
-static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds(
- struct se_ml_stat_grps *lgrps, char *page)
-{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
- struct se_node_acl *nacl = lacl->se_lun_nacl;
- struct se_dev_entry *deve;
- ssize_t ret;
-
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
- return -ENODEV;
- }
- /* scsiAuthIntrOutCommands */
- ret = snprintf(page, PAGE_SIZE, "%u\n", deve->total_cmds);
- spin_unlock_irq(&nacl->device_list_lock);
- return ret;
-}
-DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(num_cmds);
-
-static ssize_t target_stat_scsi_auth_intr_show_attr_read_mbytes(
- struct se_ml_stat_grps *lgrps, char *page)
-{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
- struct se_node_acl *nacl = lacl->se_lun_nacl;
- struct se_dev_entry *deve;
- ssize_t ret;
-
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
- return -ENODEV;
- }
- /* scsiAuthIntrReadMegaBytes */
- ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->read_bytes >> 20));
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(read_mbytes);
-
-static ssize_t target_stat_scsi_auth_intr_show_attr_write_mbytes(
- struct se_ml_stat_grps *lgrps, char *page)
-{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
- struct se_node_acl *nacl = lacl->se_lun_nacl;
- struct se_dev_entry *deve;
- ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
- return -ENODEV;
- }
- /* scsiAuthIntrWrittenMegaBytes */
- ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(deve->write_bytes >> 20));
- spin_unlock_irq(&nacl->device_list_lock);
- return ret;
-}
-DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(write_mbytes);
-
-static ssize_t target_stat_scsi_auth_intr_show_attr_hs_num_cmds(
- struct se_ml_stat_grps *lgrps, char *page)
-{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
+#define auth_show_per_cpu_stat(prefix, field, shift) \
+per_cpu_stat_snprintf(se_dev_entry_io_stats, prefix, field, shift); \
+static ssize_t \
+target_stat_##prefix##_show(struct config_item *item, char *page) \
+{ \
+ struct se_lun_acl *lacl = auth_to_lacl(item); \
+ struct se_node_acl *nacl = lacl->se_lun_nacl; \
+ struct se_dev_entry *deve; \
+ int ret; \
+ \
+ rcu_read_lock(); \
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun); \
+ if (!deve) { \
+ rcu_read_unlock(); \
+ return -ENODEV; \
+ } \
+ \
+ ret = per_cpu_stat_##prefix##_snprintf(deve->stats, page); \
+ rcu_read_unlock(); \
+ return ret; \
+}
+
+/* scsiAuthIntrOutCommands */
+auth_show_per_cpu_stat(auth_num_cmds, total_cmds, 0);
+/* scsiAuthIntrReadMegaBytes */
+auth_show_per_cpu_stat(auth_read_mbytes, read_bytes, 20);
+/* scsiAuthIntrWrittenMegaBytes */
+auth_show_per_cpu_stat(auth_write_mbytes, write_bytes, 20);
+
+static ssize_t target_stat_auth_hs_num_cmds_show(struct config_item *item,
+ char *page)
+{
+ struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* FIXME: scsiAuthIntrHSOutCommands */
ret = snprintf(page, PAGE_SIZE, "%u\n", 0);
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(hs_num_cmds);
-static ssize_t target_stat_scsi_auth_intr_show_attr_creation_time(
- struct se_ml_stat_grps *lgrps, char *page)
+static ssize_t target_stat_auth_creation_time_show(struct config_item *item,
+ char *page)
{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
+ struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* scsiAuthIntrLastCreation */
ret = snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)deve->creation_time -
INITIAL_JIFFIES) * 100 / HZ));
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(creation_time);
-static ssize_t target_stat_scsi_auth_intr_show_attr_row_status(
- struct se_ml_stat_grps *lgrps, char *page)
+static ssize_t target_stat_auth_row_status_show(struct config_item *item,
+ char *page)
{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
+ struct se_lun_acl *lacl = auth_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* FIXME: scsiAuthIntrRowStatus */
ret = snprintf(page, PAGE_SIZE, "Ready\n");
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_AUTH_INTR_ATTR_RO(row_status);
-CONFIGFS_EATTR_OPS(target_stat_scsi_auth_intr, se_ml_stat_grps,
- scsi_auth_intr_group);
+CONFIGFS_ATTR_RO(target_stat_auth_, inst);
+CONFIGFS_ATTR_RO(target_stat_auth_, dev);
+CONFIGFS_ATTR_RO(target_stat_auth_, port);
+CONFIGFS_ATTR_RO(target_stat_auth_, indx);
+CONFIGFS_ATTR_RO(target_stat_auth_, dev_or_port);
+CONFIGFS_ATTR_RO(target_stat_auth_, intr_name);
+CONFIGFS_ATTR_RO(target_stat_auth_, map_indx);
+CONFIGFS_ATTR_RO(target_stat_auth_, att_count);
+CONFIGFS_ATTR_RO(target_stat_auth_, num_cmds);
+CONFIGFS_ATTR_RO(target_stat_auth_, read_mbytes);
+CONFIGFS_ATTR_RO(target_stat_auth_, write_mbytes);
+CONFIGFS_ATTR_RO(target_stat_auth_, hs_num_cmds);
+CONFIGFS_ATTR_RO(target_stat_auth_, creation_time);
+CONFIGFS_ATTR_RO(target_stat_auth_, row_status);
static struct configfs_attribute *target_stat_scsi_auth_intr_attrs[] = {
- &target_stat_scsi_auth_intr_inst.attr,
- &target_stat_scsi_auth_intr_dev.attr,
- &target_stat_scsi_auth_intr_port.attr,
- &target_stat_scsi_auth_intr_indx.attr,
- &target_stat_scsi_auth_intr_dev_or_port.attr,
- &target_stat_scsi_auth_intr_intr_name.attr,
- &target_stat_scsi_auth_intr_map_indx.attr,
- &target_stat_scsi_auth_intr_att_count.attr,
- &target_stat_scsi_auth_intr_num_cmds.attr,
- &target_stat_scsi_auth_intr_read_mbytes.attr,
- &target_stat_scsi_auth_intr_write_mbytes.attr,
- &target_stat_scsi_auth_intr_hs_num_cmds.attr,
- &target_stat_scsi_auth_intr_creation_time.attr,
- &target_stat_scsi_auth_intr_row_status.attr,
+ &target_stat_auth_attr_inst,
+ &target_stat_auth_attr_dev,
+ &target_stat_auth_attr_port,
+ &target_stat_auth_attr_indx,
+ &target_stat_auth_attr_dev_or_port,
+ &target_stat_auth_attr_intr_name,
+ &target_stat_auth_attr_map_indx,
+ &target_stat_auth_attr_att_count,
+ &target_stat_auth_attr_num_cmds,
+ &target_stat_auth_attr_read_mbytes,
+ &target_stat_auth_attr_write_mbytes,
+ &target_stat_auth_attr_hs_num_cmds,
+ &target_stat_auth_attr_creation_time,
+ &target_stat_auth_attr_row_status,
NULL,
};
-static struct configfs_item_operations target_stat_scsi_auth_intr_attrib_ops = {
- .show_attribute = target_stat_scsi_auth_intr_attr_show,
- .store_attribute = target_stat_scsi_auth_intr_attr_store,
-};
-
-static struct config_item_type target_stat_scsi_auth_intr_cit = {
- .ct_item_ops = &target_stat_scsi_auth_intr_attrib_ops,
+static const struct config_item_type target_stat_scsi_auth_intr_cit = {
.ct_attrs = target_stat_scsi_auth_intr_attrs,
.ct_owner = THIS_MODULE,
};
@@ -1425,98 +1127,83 @@ static struct config_item_type target_stat_scsi_auth_intr_cit = {
* SCSI Attached Initiator Port Table
*/
-CONFIGFS_EATTR_STRUCT(target_stat_scsi_att_intr_port, se_ml_stat_grps);
-#define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR(_name, _mode) \
-static struct target_stat_scsi_att_intr_port_attribute \
- target_stat_scsi_att_intr_port_##_name = \
- __CONFIGFS_EATTR(_name, _mode, \
- target_stat_scsi_att_intr_port_show_attr_##_name, \
- target_stat_scsi_att_intr_port_store_attr_##_name);
-
-#define DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(_name) \
-static struct target_stat_scsi_att_intr_port_attribute \
- target_stat_scsi_att_intr_port_##_name = \
- __CONFIGFS_EATTR_RO(_name, \
- target_stat_scsi_att_intr_port_show_attr_##_name);
-
-static ssize_t target_stat_scsi_att_intr_port_show_attr_inst(
- struct se_ml_stat_grps *lgrps, char *page)
-{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
+static struct se_lun_acl *iport_to_lacl(struct config_item *item)
+{
+ struct se_ml_stat_grps *lgrps = container_of(to_config_group(item),
+ struct se_ml_stat_grps, scsi_att_intr_port_group);
+ return container_of(lgrps, struct se_lun_acl, ml_stat_grps);
+}
+
+static ssize_t target_stat_iport_inst_show(struct config_item *item,
+ char *page)
+{
+ struct se_lun_acl *lacl = iport_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
struct se_portal_group *tpg;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
tpg = nacl->se_tpg;
/* scsiInstIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(inst);
-static ssize_t target_stat_scsi_att_intr_port_show_attr_dev(
- struct se_ml_stat_grps *lgrps, char *page)
+static ssize_t target_stat_iport_dev_show(struct config_item *item,
+ char *page)
{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
+ struct se_lun_acl *lacl = iport_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
- struct se_lun *lun;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
- lun = deve->se_lun;
+
/* scsiDeviceIndex */
- ret = snprintf(page, PAGE_SIZE, "%u\n", lun->lun_se_dev->dev_index);
- spin_unlock_irq(&nacl->device_list_lock);
+ ret = snprintf(page, PAGE_SIZE, "%u\n", deve->se_lun->lun_index);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(dev);
-static ssize_t target_stat_scsi_att_intr_port_show_attr_port(
- struct se_ml_stat_grps *lgrps, char *page)
+static ssize_t target_stat_iport_port_show(struct config_item *item,
+ char *page)
{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
+ struct se_lun_acl *lacl = iport_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
struct se_portal_group *tpg;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
tpg = nacl->se_tpg;
/* scsiPortIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port);
-static ssize_t target_stat_scsi_att_intr_port_show_attr_indx(
- struct se_ml_stat_grps *lgrps, char *page)
+static ssize_t target_stat_iport_indx_show(struct config_item *item,
+ char *page)
{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
+ struct se_lun_acl *lacl = iport_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_session *se_sess;
struct se_portal_group *tpg;
@@ -1536,35 +1223,31 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_indx(
spin_unlock_irq(&nacl->nacl_sess_lock);
return ret;
}
-DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(indx);
-static ssize_t target_stat_scsi_att_intr_port_show_attr_port_auth_indx(
- struct se_ml_stat_grps *lgrps, char *page)
+static ssize_t target_stat_iport_port_auth_indx_show(struct config_item *item,
+ char *page)
{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
+ struct se_lun_acl *lacl = iport_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t ret;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[lacl->mapped_lun];
- if (!deve->se_lun || !deve->se_lun_acl) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
+ if (!deve) {
+ rcu_read_unlock();
return -ENODEV;
}
/* scsiAttIntrPortAuthIntrIdx */
ret = snprintf(page, PAGE_SIZE, "%u\n", nacl->acl_index);
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
return ret;
}
-DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_auth_indx);
-static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(
- struct se_ml_stat_grps *lgrps, char *page)
+static ssize_t target_stat_iport_port_ident_show(struct config_item *item,
+ char *page)
{
- struct se_lun_acl *lacl = container_of(lgrps,
- struct se_lun_acl, ml_stat_grps);
+ struct se_lun_acl *lacl = iport_to_lacl(item);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_session *se_sess;
struct se_portal_group *tpg;
@@ -1588,28 +1271,25 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(
spin_unlock_irq(&nacl->nacl_sess_lock);
return ret;
}
-DEV_STAT_SCSI_ATTR_INTR_PORT_ATTR_RO(port_ident);
-CONFIGFS_EATTR_OPS(target_stat_scsi_att_intr_port, se_ml_stat_grps,
- scsi_att_intr_port_group);
+CONFIGFS_ATTR_RO(target_stat_iport_, inst);
+CONFIGFS_ATTR_RO(target_stat_iport_, dev);
+CONFIGFS_ATTR_RO(target_stat_iport_, port);
+CONFIGFS_ATTR_RO(target_stat_iport_, indx);
+CONFIGFS_ATTR_RO(target_stat_iport_, port_auth_indx);
+CONFIGFS_ATTR_RO(target_stat_iport_, port_ident);
static struct configfs_attribute *target_stat_scsi_ath_intr_port_attrs[] = {
- &target_stat_scsi_att_intr_port_inst.attr,
- &target_stat_scsi_att_intr_port_dev.attr,
- &target_stat_scsi_att_intr_port_port.attr,
- &target_stat_scsi_att_intr_port_indx.attr,
- &target_stat_scsi_att_intr_port_port_auth_indx.attr,
- &target_stat_scsi_att_intr_port_port_ident.attr,
+ &target_stat_iport_attr_inst,
+ &target_stat_iport_attr_dev,
+ &target_stat_iport_attr_port,
+ &target_stat_iport_attr_indx,
+ &target_stat_iport_attr_port_auth_indx,
+ &target_stat_iport_attr_port_ident,
NULL,
};
-static struct configfs_item_operations target_stat_scsi_att_intr_port_attrib_ops = {
- .show_attribute = target_stat_scsi_att_intr_port_attr_show,
- .store_attribute = target_stat_scsi_att_intr_port_attr_store,
-};
-
-static struct config_item_type target_stat_scsi_att_intr_port_cit = {
- .ct_item_ops = &target_stat_scsi_att_intr_port_attrib_ops,
+static const struct config_item_type target_stat_scsi_att_intr_port_cit = {
.ct_attrs = target_stat_scsi_ath_intr_port_attrs,
.ct_owner = THIS_MODULE,
};
@@ -1620,14 +1300,13 @@ static struct config_item_type target_stat_scsi_att_intr_port_cit = {
*/
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl)
{
- struct config_group *ml_stat_grp = &lacl->ml_stat_grps.stat_group;
-
config_group_init_type_name(&lacl->ml_stat_grps.scsi_auth_intr_group,
"scsi_auth_intr", &target_stat_scsi_auth_intr_cit);
+ configfs_add_default_group(&lacl->ml_stat_grps.scsi_auth_intr_group,
+ &lacl->ml_stat_grps.stat_group);
+
config_group_init_type_name(&lacl->ml_stat_grps.scsi_att_intr_port_group,
"scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit);
-
- ml_stat_grp->default_groups[0] = &lacl->ml_stat_grps.scsi_auth_intr_group;
- ml_stat_grp->default_groups[1] = &lacl->ml_stat_grps.scsi_att_intr_port_group;
- ml_stat_grp->default_groups[2] = NULL;
+ configfs_add_default_group(&lacl->ml_stat_grps.scsi_att_intr_port_group,
+ &lacl->ml_stat_grps.stat_group);
}
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 0d7cacb91107..4718db628222 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -1,39 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_tmr.c
*
* This file contains SPC-3 task management infrastructure
*
- * (c) Copyright 2009-2012 RisingTide Systems LLC.
+ * (c) Copyright 2009-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/export.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
@@ -64,39 +48,11 @@ int core_tmr_alloc_req(
}
EXPORT_SYMBOL(core_tmr_alloc_req);
-void core_tmr_release_req(
- struct se_tmr_req *tmr)
+void core_tmr_release_req(struct se_tmr_req *tmr)
{
- struct se_device *dev = tmr->tmr_dev;
- unsigned long flags;
-
- if (!dev) {
- kfree(tmr);
- return;
- }
-
- spin_lock_irqsave(&dev->se_tmr_lock, flags);
- list_del(&tmr->tmr_list);
- spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
-
kfree(tmr);
}
-static void core_tmr_handle_tas_abort(
- struct se_node_acl *tmr_nacl,
- struct se_cmd *cmd,
- int tas)
-{
- /*
- * TASK ABORTED status (TAS) bit support
- */
- if ((tmr_nacl &&
- (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
- transport_send_task_abort(cmd);
-
- transport_cmd_finish_abort(cmd, 0);
-}
-
static int target_check_cdb_and_preempt(struct list_head *list,
struct se_cmd *cmd)
{
@@ -112,68 +68,108 @@ static int target_check_cdb_and_preempt(struct list_head *list,
return 1;
}
-void core_tmr_abort_task(
- struct se_device *dev,
- struct se_tmr_req *tmr,
- struct se_session *se_sess)
+static bool __target_check_io_state(struct se_cmd *se_cmd,
+ struct se_session *tmr_sess, bool tas)
{
- struct se_cmd *se_cmd, *tmp_cmd;
- unsigned long flags;
- int ref_tag;
-
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- list_for_each_entry_safe(se_cmd, tmp_cmd,
- &se_sess->sess_cmd_list, se_cmd_list) {
-
- if (dev != se_cmd->se_dev)
- continue;
- ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd);
- if (tmr->ref_task_tag != ref_tag)
- continue;
+ struct se_session *sess = se_cmd->se_sess;
- printk("ABORT_TASK: Found referenced %s task_tag: %u\n",
- se_cmd->se_tfo->get_fabric_name(), ref_tag);
+ lockdep_assert_held(&sess->sess_cmd_lock);
- spin_lock(&se_cmd->t_state_lock);
- if (se_cmd->transport_state & CMD_T_COMPLETE) {
- printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag);
- spin_unlock(&se_cmd->t_state_lock);
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- goto out;
- }
- se_cmd->transport_state |= CMD_T_ABORTED;
+ /*
+ * If command already reached CMD_T_COMPLETE state within
+ * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
+ * this se_cmd has been passed to fabric driver and will
+ * not be aborted.
+ *
+ * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
+ * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
+ * long as se_cmd->cmd_kref is still active unless zero.
+ */
+ spin_lock(&se_cmd->t_state_lock);
+ if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
+ pr_debug("Attempted to abort io tag: %llu already complete or"
+ " fabric stop, skipping\n", se_cmd->tag);
spin_unlock(&se_cmd->t_state_lock);
+ return false;
+ }
+ se_cmd->transport_state |= CMD_T_ABORTED;
- list_del_init(&se_cmd->se_cmd_list);
- kref_get(&se_cmd->cmd_kref);
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ if ((tmr_sess != se_cmd->se_sess) && tas)
+ se_cmd->transport_state |= CMD_T_TAS;
- cancel_work_sync(&se_cmd->work);
- transport_wait_for_tasks(se_cmd);
- /*
- * Now send SAM_STAT_TASK_ABORTED status for the referenced
- * se_cmd descriptor..
- */
- transport_send_task_abort(se_cmd);
- /*
- * Also deal with possible extra acknowledge reference..
- */
- if (se_cmd->se_cmd_flags & SCF_ACK_KREF)
- target_put_sess_cmd(se_sess, se_cmd);
+ spin_unlock(&se_cmd->t_state_lock);
- target_put_sess_cmd(se_sess, se_cmd);
+ return kref_get_unless_zero(&se_cmd->cmd_kref);
+}
- printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
- " ref_tag: %d\n", ref_tag);
- tmr->response = TMR_FUNCTION_COMPLETE;
- return;
+void core_tmr_abort_task(
+ struct se_device *dev,
+ struct se_tmr_req *tmr,
+ struct se_session *se_sess)
+{
+ LIST_HEAD(aborted_list);
+ struct se_cmd *se_cmd, *next;
+ unsigned long flags;
+ bool rc;
+ u64 ref_tag;
+ int i;
+
+ for (i = 0; i < dev->queue_cnt; i++) {
+ flush_work(&dev->queues[i].sq.work);
+
+ spin_lock_irqsave(&dev->queues[i].lock, flags);
+ list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list,
+ state_list) {
+ if (se_sess != se_cmd->se_sess)
+ continue;
+
+ /*
+ * skip task management functions, including
+ * tmr->task_cmd
+ */
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+ continue;
+
+ ref_tag = se_cmd->tag;
+ if (tmr->ref_task_tag != ref_tag)
+ continue;
+
+ pr_err("ABORT_TASK: Found referenced %s task_tag: %llu\n",
+ se_cmd->se_tfo->fabric_name, ref_tag);
+
+ spin_lock(&se_sess->sess_cmd_lock);
+ rc = __target_check_io_state(se_cmd, se_sess, 0);
+ spin_unlock(&se_sess->sess_cmd_lock);
+ if (!rc)
+ continue;
+
+ list_move_tail(&se_cmd->state_list, &aborted_list);
+ se_cmd->state_active = false;
+ spin_unlock_irqrestore(&dev->queues[i].lock, flags);
+
+ if (dev->transport->tmr_notify)
+ dev->transport->tmr_notify(dev, TMR_ABORT_TASK,
+ &aborted_list);
+
+ list_del_init(&se_cmd->state_list);
+ target_put_cmd_and_wait(se_cmd);
+
+ pr_err("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for ref_tag: %llu\n",
+ ref_tag);
+ tmr->response = TMR_FUNCTION_COMPLETE;
+ atomic_long_inc(&dev->aborts_complete);
+ return;
+ }
+ spin_unlock_irqrestore(&dev->queues[i].lock, flags);
}
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
-out:
- printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %d\n",
+ if (dev->transport->tmr_notify)
+ dev->transport->tmr_notify(dev, TMR_ABORT_TASK, &aborted_list);
+
+ printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %lld\n",
tmr->ref_task_tag);
tmr->response = TMR_TASK_DOES_NOT_EXIST;
+ atomic_long_inc(&dev->aborts_no_task);
}
static void core_tmr_drain_tmr_list(
@@ -182,18 +178,17 @@ static void core_tmr_drain_tmr_list(
struct list_head *preempt_and_abort_list)
{
LIST_HEAD(drain_tmr_list);
+ struct se_session *sess;
struct se_tmr_req *tmr_p, *tmr_pp;
struct se_cmd *cmd;
unsigned long flags;
+ bool rc;
/*
* Release all pending and outgoing TMRs aside from the received
* LUN_RESET tmr..
*/
spin_lock_irqsave(&dev->se_tmr_lock, flags);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
- /*
- * Allow the received TMR to return with FUNCTION_COMPLETE.
- */
if (tmr_p == tmr)
continue;
@@ -202,26 +197,37 @@ static void core_tmr_drain_tmr_list(
pr_err("Unable to locate struct se_cmd for TMR\n");
continue;
}
+
+ /*
+ * We only execute one LUN_RESET at a time so we can't wait
+ * on them below.
+ */
+ if (tmr_p->function == TMR_LUN_RESET)
+ continue;
+
/*
* If this function was called with a valid pr_res_key
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action
- * skip non regisration key matching TMRs.
+ * skip non registration key matching TMRs.
*/
if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
- spin_lock(&cmd->t_state_lock);
- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
- spin_unlock(&cmd->t_state_lock);
+ sess = cmd->se_sess;
+ if (WARN_ON_ONCE(!sess))
continue;
- }
- if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
- spin_unlock(&cmd->t_state_lock);
+
+ spin_lock(&sess->sess_cmd_lock);
+ rc = __target_check_io_state(cmd, sess, 0);
+ spin_unlock(&sess->sess_cmd_lock);
+
+ if (!rc) {
+ printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
continue;
}
- spin_unlock(&cmd->t_state_lock);
list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
+ tmr_p->tmr_dev = NULL;
}
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
@@ -234,20 +240,40 @@ static void core_tmr_drain_tmr_list(
(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
tmr_p->function, tmr_p->response, cmd->t_state);
- transport_cmd_finish_abort(cmd, 1);
+ target_put_cmd_and_wait(cmd);
}
}
+/**
+ * core_tmr_drain_state_list() - abort SCSI commands associated with a device
+ *
+ * @dev: Device for which to abort outstanding SCSI commands.
+ * @prout_cmd: Pointer to the SCSI PREEMPT AND ABORT if this function is called
+ * to realize the PREEMPT AND ABORT functionality.
+ * @tmr_sess: Session through which the LUN RESET has been received.
+ * @tas: Task Aborted Status (TAS) bit from the SCSI control mode page.
+ * A quote from SPC-4, paragraph "7.5.10 Control mode page":
+ * "A task aborted status (TAS) bit set to zero specifies that
+ * aborted commands shall be terminated by the device server
+ * without any response to the application client. A TAS bit set
+ * to one specifies that commands aborted by the actions of an I_T
+ * nexus other than the I_T nexus on which the command was
+ * received shall be completed with TASK ABORTED status."
+ * @preempt_and_abort_list: For the PREEMPT AND ABORT functionality, a list
+ * with registrations that will be preempted.
+ */
static void core_tmr_drain_state_list(
struct se_device *dev,
struct se_cmd *prout_cmd,
- struct se_node_acl *tmr_nacl,
- int tas,
+ struct se_session *tmr_sess,
+ bool tas,
struct list_head *preempt_and_abort_list)
{
LIST_HEAD(drain_task_list);
+ struct se_session *sess;
struct se_cmd *cmd, *next;
unsigned long flags;
+ int rc, i;
/*
* Complete outstanding commands with TASK_ABORTED SAM status.
@@ -271,62 +297,57 @@ static void core_tmr_drain_state_list(
* Note that this seems to be independent of TAS (Task Aborted Status)
* in the Control Mode Page.
*/
- spin_lock_irqsave(&dev->execute_task_lock, flags);
- list_for_each_entry_safe(cmd, next, &dev->state_list, state_list) {
- /*
- * For PREEMPT_AND_ABORT usage, only process commands
- * with a matching reservation key.
- */
- if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
- continue;
-
- /*
- * Not aborting PROUT PREEMPT_AND_ABORT CDB..
- */
- if (prout_cmd == cmd)
- continue;
-
- list_move_tail(&cmd->state_list, &drain_task_list);
- cmd->state_active = false;
+ for (i = 0; i < dev->queue_cnt; i++) {
+ flush_work(&dev->queues[i].sq.work);
+
+ spin_lock_irqsave(&dev->queues[i].lock, flags);
+ list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list,
+ state_list) {
+ /*
+ * For PREEMPT_AND_ABORT usage, only process commands
+ * with a matching reservation key.
+ */
+ if (target_check_cdb_and_preempt(preempt_and_abort_list,
+ cmd))
+ continue;
+
+ /*
+ * Not aborting PROUT PREEMPT_AND_ABORT CDB..
+ */
+ if (prout_cmd == cmd)
+ continue;
+
+ sess = cmd->se_sess;
+ if (WARN_ON_ONCE(!sess))
+ continue;
+
+ spin_lock(&sess->sess_cmd_lock);
+ rc = __target_check_io_state(cmd, tmr_sess, tas);
+ spin_unlock(&sess->sess_cmd_lock);
+ if (!rc)
+ continue;
+
+ list_move_tail(&cmd->state_list, &drain_task_list);
+ cmd->state_active = false;
+ }
+ spin_unlock_irqrestore(&dev->queues[i].lock, flags);
}
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+
+ if (dev->transport->tmr_notify)
+ dev->transport->tmr_notify(dev, preempt_and_abort_list ?
+ TMR_LUN_RESET_PRO : TMR_LUN_RESET,
+ &drain_task_list);
while (!list_empty(&drain_task_list)) {
cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
- list_del(&cmd->state_list);
-
- pr_debug("LUN_RESET: %s cmd: %p"
- " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d"
- "cdb: 0x%02x\n",
- (preempt_and_abort_list) ? "Preempt" : "", cmd,
- cmd->se_tfo->get_task_tag(cmd), 0,
- cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
- cmd->t_task_cdb[0]);
- pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
- " -- CMD_T_ACTIVE: %d"
- " CMD_T_STOP: %d CMD_T_SENT: %d\n",
- cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
- (cmd->transport_state & CMD_T_ACTIVE) != 0,
- (cmd->transport_state & CMD_T_STOP) != 0,
- (cmd->transport_state & CMD_T_SENT) != 0);
-
- /*
- * If the command may be queued onto a workqueue cancel it now.
- *
- * This is equivalent to removal from the execute queue in the
- * loop above, but we do it down here given that
- * cancel_work_sync may block.
- */
- if (cmd->t_state == TRANSPORT_COMPLETE)
- cancel_work_sync(&cmd->work);
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- target_stop_cmd(cmd, &flags);
+ list_del_init(&cmd->state_list);
- cmd->transport_state |= CMD_T_ABORTED;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ target_show_cmd("LUN_RESET: ", cmd);
+ pr_debug("LUN_RESET: ITT[0x%08llx] - %s pr_res_key: 0x%016Lx\n",
+ cmd->tag, (preempt_and_abort_list) ? "preempt" : "",
+ cmd->pr_res_key);
- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
+ target_put_cmd_and_wait(cmd);
}
}
@@ -338,7 +359,8 @@ int core_tmr_lun_reset(
{
struct se_node_acl *tmr_nacl = NULL;
struct se_portal_group *tmr_tpg = NULL;
- int tas;
+ struct se_session *tmr_sess = NULL;
+ bool tas;
/*
* TASK_ABORTED status bit, this is configurable via ConfigFS
* struct se_device attributes. spc4r17 section 7.4.6 Control mode page
@@ -356,23 +378,35 @@ int core_tmr_lun_reset(
* or struct se_device passthrough..
*/
if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
- tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
- tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
+ tmr_sess = tmr->task_cmd->se_sess;
+ tmr_nacl = tmr_sess->se_node_acl;
+ tmr_tpg = tmr_sess->se_tpg;
if (tmr_nacl && tmr_tpg) {
pr_debug("LUN_RESET: TMR caller fabric: %s"
" initiator port %s\n",
- tmr_tpg->se_tpg_tfo->get_fabric_name(),
+ tmr_tpg->se_tpg_tfo->fabric_name,
tmr_nacl->initiatorname);
}
}
+
+
+ /*
+ * We only allow one reset or preempt and abort to execute at a time
+ * to prevent one call from claiming all the cmds causing a second
+ * call from returning while cmds it should have waited on are still
+ * running.
+ */
+ mutex_lock(&dev->lun_reset_mutex);
+
pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
dev->transport->name, tas);
-
core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
- core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
+ core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
preempt_and_abort_list);
+ mutex_unlock(&dev->lun_reset_mutex);
+
/*
* Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET
@@ -380,15 +414,13 @@ int core_tmr_lun_reset(
if (!preempt_and_abort_list &&
(dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock);
- dev->dev_reserved_node_acl = NULL;
+ dev->reservation_holder = NULL;
dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock);
pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
}
- spin_lock_irq(&dev->stats_lock);
- dev->num_resets++;
- spin_unlock_irq(&dev->stats_lock);
+ atomic_long_inc(&dev->num_resets);
pr_debug("LUN_RESET: %s for [%s] Complete\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index aac9d2727e3c..8b5ad50baa43 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -1,26 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_tpg.c
*
* This file contains generic Target Portal Group related functions.
*
- * (c) Copyright 2002-2012 RisingTide Systems LLC.
+ * (c) Copyright 2002-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#include <linux/net.h>
@@ -32,59 +19,23 @@
#include <linux/export.h>
#include <net/sock.h>
#include <net/tcp.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
extern struct se_device *g_lun0_dev;
-
-static DEFINE_SPINLOCK(tpg_lock);
-static LIST_HEAD(tpg_list);
-
-/* core_clear_initiator_node_from_tpg():
- *
- *
- */
-static void core_clear_initiator_node_from_tpg(
- struct se_node_acl *nacl,
- struct se_portal_group *tpg)
-{
- int i;
- struct se_dev_entry *deve;
- struct se_lun *lun;
-
- spin_lock_irq(&nacl->device_list_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = nacl->device_list[i];
-
- if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
- continue;
-
- if (!deve->se_lun) {
- pr_err("%s device entries device pointer is"
- " NULL, but Initiator has access.\n",
- tpg->se_tpg_tfo->get_fabric_name());
- continue;
- }
-
- lun = deve->se_lun;
- spin_unlock_irq(&nacl->device_list_lock);
- core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
- TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
-
- spin_lock_irq(&nacl->device_list_lock);
- }
- spin_unlock_irq(&nacl->device_list_lock);
-}
+static DEFINE_XARRAY_ALLOC(tpg_xa);
/* __core_tpg_get_initiator_node_acl():
*
- * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
+ * mutex_lock(&tpg->acl_node_mutex); must be held when calling
*/
struct se_node_acl *__core_tpg_get_initiator_node_acl(
struct se_portal_group *tpg,
@@ -109,13 +60,42 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
unsigned char *initiatorname)
{
struct se_node_acl *acl;
-
- spin_lock_irq(&tpg->acl_node_lock);
+ /*
+ * Obtain se_node_acl->acl_kref using fabric driver provided
+ * initiatorname[] during node acl endpoint lookup driven by
+ * new se_session login.
+ *
+ * The reference is held until se_session shutdown -> release
+ * occurs via fabric driver invoked transport_deregister_session()
+ * or transport_free_session() code.
+ */
+ mutex_lock(&tpg->acl_node_mutex);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
- spin_unlock_irq(&tpg->acl_node_lock);
+ if (acl) {
+ if (!kref_get_unless_zero(&acl->acl_kref))
+ acl = NULL;
+ }
+ mutex_unlock(&tpg->acl_node_mutex);
return acl;
}
+EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
+
+void core_allocate_nexus_loss_ua(
+ struct se_node_acl *nacl)
+{
+ struct se_dev_entry *deve;
+
+ if (!nacl)
+ return;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
+ core_scsi3_ua_allocate(deve, 0x29,
+ ASCQ_29H_NEXUS_LOSS_OCCURRED);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
/* core_tpg_add_node_to_devs():
*
@@ -123,132 +103,142 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
*/
void core_tpg_add_node_to_devs(
struct se_node_acl *acl,
- struct se_portal_group *tpg)
+ struct se_portal_group *tpg,
+ struct se_lun *lun_orig)
{
- int i = 0;
- u32 lun_access = 0;
+ bool lun_access_ro = true;
struct se_lun *lun;
struct se_device *dev;
- spin_lock(&tpg->tpg_lun_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- lun = tpg->tpg_lun_list[i];
- if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
+ mutex_lock(&tpg->tpg_lun_mutex);
+ hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
+ if (lun_orig && lun != lun_orig)
continue;
- spin_unlock(&tpg->tpg_lun_lock);
-
- dev = lun->lun_se_dev;
+ dev = rcu_dereference_check(lun->lun_se_dev,
+ lockdep_is_held(&tpg->tpg_lun_mutex));
/*
* By default in LIO-Target $FABRIC_MOD,
* demo_mode_write_protect is ON, or READ_ONLY;
*/
if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
- lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+ lun_access_ro = false;
} else {
/*
* Allow only optical drives to issue R/W in default RO
* demo mode.
*/
if (dev->transport->get_device_type(dev) == TYPE_DISK)
- lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+ lun_access_ro = true;
else
- lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+ lun_access_ro = false;
}
- pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
+ pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
" access for LUN in Demo Mode\n",
- tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
- (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
- "READ-WRITE" : "READ-ONLY");
+ lun_access_ro ? "READ-ONLY" : "READ-WRITE");
core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
- lun_access, acl, tpg);
- spin_lock(&tpg->tpg_lun_lock);
+ lun_access_ro, acl, tpg);
+ /*
+ * Check to see if there are any existing persistent reservation
+ * APTPL pre-registrations that need to be enabled for this dynamic
+ * LUN ACL now..
+ */
+ core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
+ lun->unpacked_lun);
}
- spin_unlock(&tpg->tpg_lun_lock);
+ mutex_unlock(&tpg->tpg_lun_mutex);
}
-/* core_set_queue_depth_for_node():
- *
- *
- */
-static int core_set_queue_depth_for_node(
- struct se_portal_group *tpg,
- struct se_node_acl *acl)
+static void
+target_set_nacl_queue_depth(struct se_portal_group *tpg,
+ struct se_node_acl *acl, u32 queue_depth)
{
+ acl->queue_depth = queue_depth;
+
if (!acl->queue_depth) {
- pr_err("Queue depth for %s Initiator Node: %s is 0,"
- "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
+ pr_warn("Queue depth for %s Initiator Node: %s is 0,"
+ "defaulting to 1.\n", tpg->se_tpg_tfo->fabric_name,
acl->initiatorname);
acl->queue_depth = 1;
}
-
- return 0;
}
-void array_free(void *array, int n)
+static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
+ const unsigned char *initiatorname)
{
- void **a = array;
- int i;
+ struct se_node_acl *acl;
+ u32 queue_depth;
+
+ acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
+ GFP_KERNEL);
+ if (!acl)
+ return NULL;
+
+ INIT_LIST_HEAD(&acl->acl_list);
+ INIT_LIST_HEAD(&acl->acl_sess_list);
+ INIT_HLIST_HEAD(&acl->lun_entry_hlist);
+ kref_init(&acl->acl_kref);
+ init_completion(&acl->acl_free_comp);
+ spin_lock_init(&acl->nacl_sess_lock);
+ mutex_init(&acl->lun_entry_mutex);
+ atomic_set(&acl->acl_pr_ref_count, 0);
- for (i = 0; i < n; i++)
- kfree(a[i]);
- kfree(a);
+ if (tpg->se_tpg_tfo->tpg_get_default_depth)
+ queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
+ else
+ queue_depth = 1;
+ target_set_nacl_queue_depth(tpg, acl, queue_depth);
+
+ snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
+ acl->se_tpg = tpg;
+ acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
+
+ tpg->se_tpg_tfo->set_default_node_attributes(acl);
+
+ return acl;
}
-static void *array_zalloc(int n, size_t size, gfp_t flags)
+static void target_add_node_acl(struct se_node_acl *acl)
{
- void **a;
- int i;
+ struct se_portal_group *tpg = acl->se_tpg;
- a = kzalloc(n * sizeof(void*), flags);
- if (!a)
- return NULL;
- for (i = 0; i < n; i++) {
- a[i] = kzalloc(size, flags);
- if (!a[i]) {
- array_free(a, n);
- return NULL;
- }
- }
- return a;
+ mutex_lock(&tpg->acl_node_mutex);
+ list_add_tail(&acl->acl_list, &tpg->acl_node_list);
+ mutex_unlock(&tpg->acl_node_mutex);
+
+ pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
+ " Initiator Node: %s\n",
+ tpg->se_tpg_tfo->fabric_name,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
+ acl->dynamic_node_acl ? "DYNAMIC" : "",
+ acl->queue_depth,
+ tpg->se_tpg_tfo->fabric_name,
+ acl->initiatorname);
}
-/* core_create_device_list_for_node():
- *
- *
- */
-static int core_create_device_list_for_node(struct se_node_acl *nacl)
+bool target_tpg_has_node_acl(struct se_portal_group *tpg,
+ const char *initiatorname)
{
- struct se_dev_entry *deve;
- int i;
-
- nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
- sizeof(struct se_dev_entry), GFP_KERNEL);
- if (!nacl->device_list) {
- pr_err("Unable to allocate memory for"
- " struct se_node_acl->device_list\n");
- return -ENOMEM;
- }
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = nacl->device_list[i];
-
- atomic_set(&deve->ua_count, 0);
- atomic_set(&deve->pr_ref_count, 0);
- spin_lock_init(&deve->ua_lock);
- INIT_LIST_HEAD(&deve->alua_port_list);
- INIT_LIST_HEAD(&deve->ua_list);
+ struct se_node_acl *acl;
+ bool found = false;
+
+ mutex_lock(&tpg->acl_node_mutex);
+ list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+ if (!strcmp(acl->initiatorname, initiatorname)) {
+ found = true;
+ break;
+ }
}
+ mutex_unlock(&tpg->acl_node_mutex);
- return 0;
+ return found;
}
+EXPORT_SYMBOL(target_tpg_has_node_acl);
-/* core_tpg_check_initiator_node_acl()
- *
- *
- */
struct se_node_acl *core_tpg_check_initiator_node_acl(
struct se_portal_group *tpg,
unsigned char *initiatorname)
@@ -262,36 +252,20 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
return NULL;
- acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
+ acl = target_alloc_node_acl(tpg, initiatorname);
if (!acl)
return NULL;
-
- INIT_LIST_HEAD(&acl->acl_list);
- INIT_LIST_HEAD(&acl->acl_sess_list);
- kref_init(&acl->acl_kref);
- init_completion(&acl->acl_free_comp);
- spin_lock_init(&acl->device_list_lock);
- spin_lock_init(&acl->nacl_sess_lock);
- atomic_set(&acl->acl_pr_ref_count, 0);
- acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
- snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
- acl->se_tpg = tpg;
- acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
- spin_lock_init(&acl->stats_lock);
+ /*
+ * When allocating a dynamically generated node_acl, go ahead
+ * and take the extra kref now before returning to the fabric
+ * driver caller.
+ *
+ * Note this reference will be released at session shutdown
+ * time within transport_free_session() code.
+ */
+ kref_get(&acl->acl_kref);
acl->dynamic_node_acl = 1;
- tpg->se_tpg_tfo->set_default_node_attributes(acl);
-
- if (core_create_device_list_for_node(acl) < 0) {
- tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
- return NULL;
- }
-
- if (core_set_queue_depth_for_node(tpg, acl) < 0) {
- core_free_device_list_for_node(acl, tpg);
- tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
- return NULL;
- }
/*
* Here we only create demo-mode MappedLUNs from the active
* TPG LUNs if the fabric is not explicitly asking for
@@ -299,18 +273,9 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
*/
if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
(tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
- core_tpg_add_node_to_devs(acl, tpg);
-
- spin_lock_irq(&tpg->acl_node_lock);
- list_add_tail(&acl->acl_list, &tpg->acl_node_list);
- tpg->num_node_acls++;
- spin_unlock_irq(&tpg->acl_node_lock);
-
- pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
- " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
- tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
- tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
+ core_tpg_add_node_to_devs(acl, tpg, NULL);
+ target_add_node_acl(acl);
return acl;
}
EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
@@ -321,164 +286,74 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
cpu_relax();
}
-void core_tpg_clear_object_luns(struct se_portal_group *tpg)
-{
- int i;
- struct se_lun *lun;
-
- spin_lock(&tpg->tpg_lun_lock);
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- lun = tpg->tpg_lun_list[i];
-
- if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
- (lun->lun_se_dev == NULL))
- continue;
-
- spin_unlock(&tpg->tpg_lun_lock);
- core_dev_del_lun(tpg, lun->unpacked_lun);
- spin_lock(&tpg->tpg_lun_lock);
- }
- spin_unlock(&tpg->tpg_lun_lock);
-}
-EXPORT_SYMBOL(core_tpg_clear_object_luns);
-
-/* core_tpg_add_initiator_node_acl():
- *
- *
- */
struct se_node_acl *core_tpg_add_initiator_node_acl(
struct se_portal_group *tpg,
- struct se_node_acl *se_nacl,
- const char *initiatorname,
- u32 queue_depth)
+ const char *initiatorname)
{
- struct se_node_acl *acl = NULL;
+ struct se_node_acl *acl;
- spin_lock_irq(&tpg->acl_node_lock);
+ mutex_lock(&tpg->acl_node_mutex);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (acl) {
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
- " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ " for %s\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
- spin_unlock_irq(&tpg->acl_node_lock);
- /*
- * Release the locally allocated struct se_node_acl
- * because * core_tpg_add_initiator_node_acl() returned
- * a pointer to an existing demo mode node ACL.
- */
- if (se_nacl)
- tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
- se_nacl);
- goto done;
+ mutex_unlock(&tpg->acl_node_mutex);
+ return acl;
}
pr_err("ACL entry for %s Initiator"
" Node %s already exists for TPG %u, ignoring"
- " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
+ " request.\n", tpg->se_tpg_tfo->fabric_name,
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock_irq(&tpg->acl_node_lock);
+ mutex_unlock(&tpg->acl_node_mutex);
return ERR_PTR(-EEXIST);
}
- spin_unlock_irq(&tpg->acl_node_lock);
-
- if (!se_nacl) {
- pr_err("struct se_node_acl pointer is NULL\n");
- return ERR_PTR(-EINVAL);
- }
- /*
- * For v4.x logic the se_node_acl_s is hanging off a fabric
- * dependent structure allocated via
- * struct target_core_fabric_ops->fabric_make_nodeacl()
- */
- acl = se_nacl;
-
- INIT_LIST_HEAD(&acl->acl_list);
- INIT_LIST_HEAD(&acl->acl_sess_list);
- kref_init(&acl->acl_kref);
- init_completion(&acl->acl_free_comp);
- spin_lock_init(&acl->device_list_lock);
- spin_lock_init(&acl->nacl_sess_lock);
- atomic_set(&acl->acl_pr_ref_count, 0);
- acl->queue_depth = queue_depth;
- snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
- acl->se_tpg = tpg;
- acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
- spin_lock_init(&acl->stats_lock);
-
- tpg->se_tpg_tfo->set_default_node_attributes(acl);
+ mutex_unlock(&tpg->acl_node_mutex);
- if (core_create_device_list_for_node(acl) < 0) {
- tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
+ acl = target_alloc_node_acl(tpg, initiatorname);
+ if (!acl)
return ERR_PTR(-ENOMEM);
- }
-
- if (core_set_queue_depth_for_node(tpg, acl) < 0) {
- core_free_device_list_for_node(acl, tpg);
- tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
- return ERR_PTR(-EINVAL);
- }
-
- spin_lock_irq(&tpg->acl_node_lock);
- list_add_tail(&acl->acl_list, &tpg->acl_node_list);
- tpg->num_node_acls++;
- spin_unlock_irq(&tpg->acl_node_lock);
-
-done:
- pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
- " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
- tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
- tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
+ target_add_node_acl(acl);
return acl;
}
-EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
-/* core_tpg_del_initiator_node_acl():
- *
- *
- */
-int core_tpg_del_initiator_node_acl(
- struct se_portal_group *tpg,
- struct se_node_acl *acl,
- int force)
+static void target_shutdown_sessions(struct se_node_acl *acl)
{
- LIST_HEAD(sess_list);
- struct se_session *sess, *sess_tmp;
+ struct se_session *sess;
unsigned long flags;
- int rc;
-
- spin_lock_irq(&tpg->acl_node_lock);
- if (acl->dynamic_node_acl) {
- acl->dynamic_node_acl = 0;
- }
- list_del(&acl->acl_list);
- tpg->num_node_acls--;
- spin_unlock_irq(&tpg->acl_node_lock);
+restart:
spin_lock_irqsave(&acl->nacl_sess_lock, flags);
- acl->acl_stop = 1;
-
- list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
- sess_acl_list) {
- if (sess->sess_tearing_down != 0)
+ list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
+ if (sess->cmd_cnt && atomic_read(&sess->cmd_cnt->stopped))
continue;
- target_get_session(sess);
- list_move(&sess->sess_acl_list, &sess_list);
+ list_del_init(&sess->sess_acl_list);
+ spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+
+ if (acl->se_tpg->se_tpg_tfo->close_session)
+ acl->se_tpg->se_tpg_tfo->close_session(sess);
+ goto restart;
}
spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+}
- list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
- list_del(&sess->sess_acl_list);
+void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
+{
+ struct se_portal_group *tpg = acl->se_tpg;
+
+ mutex_lock(&tpg->acl_node_mutex);
+ if (acl->dynamic_node_acl)
+ acl->dynamic_node_acl = 0;
+ list_del_init(&acl->acl_list);
+ mutex_unlock(&tpg->acl_node_mutex);
+
+ target_shutdown_sessions(acl);
- rc = tpg->se_tpg_tfo->shutdown_session(sess);
- target_put_session(sess);
- if (!rc)
- continue;
- target_put_session(sess);
- }
target_put_nacl(acl);
/*
* Wait for last target_put_nacl() to complete in target_complete_nacl()
@@ -487,125 +362,50 @@ int core_tpg_del_initiator_node_acl(
wait_for_completion(&acl->acl_free_comp);
core_tpg_wait_for_nacl_pr_ref(acl);
- core_clear_initiator_node_from_tpg(acl, tpg);
core_free_device_list_for_node(acl, tpg);
pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
- " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ " Initiator Node: %s\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
- tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
+ tpg->se_tpg_tfo->fabric_name, acl->initiatorname);
- return 0;
+ kfree(acl);
}
-EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
/* core_tpg_set_initiator_node_queue_depth():
*
*
*/
int core_tpg_set_initiator_node_queue_depth(
- struct se_portal_group *tpg,
- unsigned char *initiatorname,
- u32 queue_depth,
- int force)
+ struct se_node_acl *acl,
+ u32 queue_depth)
{
- struct se_session *sess, *init_sess = NULL;
- struct se_node_acl *acl;
- unsigned long flags;
- int dynamic_acl = 0;
-
- spin_lock_irq(&tpg->acl_node_lock);
- acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
- if (!acl) {
- pr_err("Access Control List entry for %s Initiator"
- " Node %s does not exists for TPG %hu, ignoring"
- " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
- initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock_irq(&tpg->acl_node_lock);
- return -ENODEV;
- }
- if (acl->dynamic_node_acl) {
- acl->dynamic_node_acl = 0;
- dynamic_acl = 1;
- }
- spin_unlock_irq(&tpg->acl_node_lock);
-
- spin_lock_irqsave(&tpg->session_lock, flags);
- list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
- if (sess->se_node_acl != acl)
- continue;
-
- if (!force) {
- pr_err("Unable to change queue depth for %s"
- " Initiator Node: %s while session is"
- " operational. To forcefully change the queue"
- " depth and force session reinstatement"
- " use the \"force=1\" parameter.\n",
- tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
- spin_unlock_irqrestore(&tpg->session_lock, flags);
-
- spin_lock_irq(&tpg->acl_node_lock);
- if (dynamic_acl)
- acl->dynamic_node_acl = 1;
- spin_unlock_irq(&tpg->acl_node_lock);
- return -EEXIST;
- }
- /*
- * Determine if the session needs to be closed by our context.
- */
- if (!tpg->se_tpg_tfo->shutdown_session(sess))
- continue;
-
- init_sess = sess;
- break;
- }
+ struct se_portal_group *tpg = acl->se_tpg;
/*
+ * Allow the setting of se_node_acl queue_depth to be idempotent,
+ * and not force a session shutdown event if the value is not
+ * changing.
+ */
+ if (acl->queue_depth == queue_depth)
+ return 0;
+ /*
* User has requested to change the queue depth for a Initiator Node.
* Change the value in the Node's struct se_node_acl, and call
- * core_set_queue_depth_for_node() to add the requested queue depth.
- *
- * Finally call tpg->se_tpg_tfo->close_session() to force session
- * reinstatement to occur if there is an active session for the
- * $FABRIC_MOD Initiator Node in question.
+ * target_set_nacl_queue_depth() to set the new queue depth.
*/
- acl->queue_depth = queue_depth;
+ target_set_nacl_queue_depth(tpg, acl, queue_depth);
- if (core_set_queue_depth_for_node(tpg, acl) < 0) {
- spin_unlock_irqrestore(&tpg->session_lock, flags);
- /*
- * Force session reinstatement if
- * core_set_queue_depth_for_node() failed, because we assume
- * the $FABRIC_MOD has already the set session reinstatement
- * bit from tpg->se_tpg_tfo->shutdown_session() called above.
- */
- if (init_sess)
- tpg->se_tpg_tfo->close_session(init_sess);
-
- spin_lock_irq(&tpg->acl_node_lock);
- if (dynamic_acl)
- acl->dynamic_node_acl = 1;
- spin_unlock_irq(&tpg->acl_node_lock);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&tpg->session_lock, flags);
/*
- * If the $FABRIC_MOD session for the Initiator Node ACL exists,
- * forcefully shutdown the $FABRIC_MOD session/nexus.
+ * Shutdown all pending sessions to force session reinstatement.
*/
- if (init_sess)
- tpg->se_tpg_tfo->close_session(init_sess);
+ target_shutdown_sessions(acl);
pr_debug("Successfully changed queue depth to: %d for Initiator"
- " Node: %s on %s Target Portal Group: %u\n", queue_depth,
- initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
+ " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
+ acl->initiatorname, tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_lock_irq(&tpg->acl_node_lock);
- if (dynamic_acl)
- acl->dynamic_node_acl = 1;
- spin_unlock_irq(&tpg->acl_node_lock);
-
return 0;
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
@@ -633,99 +433,131 @@ int core_tpg_set_initiator_node_tag(
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
-static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
+static void core_tpg_lun_ref_release(struct percpu_ref *ref)
+{
+ struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
+
+ complete(&lun->lun_shutdown_comp);
+}
+
+static int target_tpg_register_rtpi(struct se_portal_group *se_tpg)
{
- /* Set in core_dev_setup_virtual_lun0() */
- struct se_device *dev = g_lun0_dev;
- struct se_lun *lun = &se_tpg->tpg_virt_lun0;
- u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+ u32 val;
int ret;
- lun->unpacked_lun = 0;
- lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
- atomic_set(&lun->lun_acl_count, 0);
- init_completion(&lun->lun_shutdown_comp);
- INIT_LIST_HEAD(&lun->lun_acl_list);
- INIT_LIST_HEAD(&lun->lun_cmd_list);
- spin_lock_init(&lun->lun_acl_lock);
- spin_lock_init(&lun->lun_cmd_lock);
- spin_lock_init(&lun->lun_sep_lock);
+ if (se_tpg->rtpi_manual) {
+ ret = xa_insert(&tpg_xa, se_tpg->tpg_rtpi, se_tpg, GFP_KERNEL);
+ if (ret) {
+ pr_info("%s_TPG[%hu] - Can not set RTPI %#x, it is already busy",
+ se_tpg->se_tpg_tfo->fabric_name,
+ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
+ se_tpg->tpg_rtpi);
+ return -EINVAL;
+ }
+ } else {
+ ret = xa_alloc(&tpg_xa, &val, se_tpg,
+ XA_LIMIT(1, USHRT_MAX), GFP_KERNEL);
+ if (!ret)
+ se_tpg->tpg_rtpi = val;
+ }
- ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
- if (ret < 0)
+ return ret;
+}
+
+static void target_tpg_deregister_rtpi(struct se_portal_group *se_tpg)
+{
+ if (se_tpg->tpg_rtpi && se_tpg->enabled)
+ xa_erase(&tpg_xa, se_tpg->tpg_rtpi);
+}
+
+int target_tpg_enable(struct se_portal_group *se_tpg)
+{
+ int ret;
+
+ ret = target_tpg_register_rtpi(se_tpg);
+ if (ret)
+ return ret;
+
+ ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, true);
+ if (ret) {
+ target_tpg_deregister_rtpi(se_tpg);
return ret;
+ }
+
+ se_tpg->enabled = true;
return 0;
}
-static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
+int target_tpg_disable(struct se_portal_group *se_tpg)
{
- struct se_lun *lun = &se_tpg->tpg_virt_lun0;
+ int ret;
+
+ target_tpg_deregister_rtpi(se_tpg);
- core_tpg_post_dellun(se_tpg, lun);
+ ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, false);
+ if (!ret)
+ se_tpg->enabled = false;
+
+ return ret;
}
+/* Does not change se_wwn->priv. */
int core_tpg_register(
- struct target_core_fabric_ops *tfo,
struct se_wwn *se_wwn,
struct se_portal_group *se_tpg,
- void *tpg_fabric_ptr,
- int se_tpg_type)
+ int proto_id)
{
- struct se_lun *lun;
- u32 i;
-
- se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
- sizeof(struct se_lun), GFP_KERNEL);
- if (!se_tpg->tpg_lun_list) {
- pr_err("Unable to allocate struct se_portal_group->"
- "tpg_lun_list\n");
- return -ENOMEM;
- }
+ int ret;
+
+ if (!se_tpg)
+ return -EINVAL;
+ /*
+ * For the typical case where core_tpg_register() is called by a
+ * fabric driver from target_core_fabric_ops->fabric_make_tpg()
+ * configfs context, use the original tf_ops pointer already saved
+ * by target-core in target_fabric_make_wwn().
+ *
+ * Otherwise, for special cases like iscsi-target discovery TPGs
+ * the caller is responsible for setting ->se_tpg_tfo ahead of
+ * calling core_tpg_register().
+ */
+ if (se_wwn)
+ se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
- for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- lun = se_tpg->tpg_lun_list[i];
- lun->unpacked_lun = i;
- lun->lun_link_magic = SE_LUN_LINK_MAGIC;
- lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
- atomic_set(&lun->lun_acl_count, 0);
- init_completion(&lun->lun_shutdown_comp);
- INIT_LIST_HEAD(&lun->lun_acl_list);
- INIT_LIST_HEAD(&lun->lun_cmd_list);
- spin_lock_init(&lun->lun_acl_lock);
- spin_lock_init(&lun->lun_cmd_lock);
- spin_lock_init(&lun->lun_sep_lock);
+ if (!se_tpg->se_tpg_tfo) {
+ pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
+ return -EINVAL;
}
- se_tpg->se_tpg_type = se_tpg_type;
- se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
- se_tpg->se_tpg_tfo = tfo;
+ INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
+ se_tpg->proto_id = proto_id;
se_tpg->se_tpg_wwn = se_wwn;
atomic_set(&se_tpg->tpg_pr_ref_count, 0);
INIT_LIST_HEAD(&se_tpg->acl_node_list);
- INIT_LIST_HEAD(&se_tpg->se_tpg_node);
INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
- spin_lock_init(&se_tpg->acl_node_lock);
spin_lock_init(&se_tpg->session_lock);
- spin_lock_init(&se_tpg->tpg_lun_lock);
-
- if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
- if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
- array_free(se_tpg->tpg_lun_list,
- TRANSPORT_MAX_LUNS_PER_TPG);
- return -ENOMEM;
+ mutex_init(&se_tpg->tpg_lun_mutex);
+ mutex_init(&se_tpg->acl_node_mutex);
+
+ if (se_tpg->proto_id >= 0) {
+ se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
+ if (IS_ERR(se_tpg->tpg_virt_lun0))
+ return PTR_ERR(se_tpg->tpg_virt_lun0);
+
+ ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
+ true, g_lun0_dev);
+ if (ret < 0) {
+ target_tpg_free_lun(&se_tpg->tpg_virt_lun0->rcu_head);
+ return ret;
}
}
- spin_lock_bh(&tpg_lock);
- list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
- spin_unlock_bh(&tpg_lock);
-
- pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
- " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
- (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
- "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
- "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
+ pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
+ "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->fabric_name,
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
+ se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
return 0;
}
@@ -733,149 +565,166 @@ EXPORT_SYMBOL(core_tpg_register);
int core_tpg_deregister(struct se_portal_group *se_tpg)
{
+ const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
struct se_node_acl *nacl, *nacl_tmp;
+ LIST_HEAD(node_list);
- pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
- " for endpoint: %s Portal Tag %u\n",
- (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
- "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
- se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
- se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
-
- spin_lock_bh(&tpg_lock);
- list_del(&se_tpg->se_tpg_node);
- spin_unlock_bh(&tpg_lock);
+ pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
+ "Proto: %d, Portal Tag: %u\n", tfo->fabric_name,
+ tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
+ se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
cpu_relax();
+
+ mutex_lock(&se_tpg->acl_node_mutex);
+ list_splice_init(&se_tpg->acl_node_list, &node_list);
+ mutex_unlock(&se_tpg->acl_node_mutex);
/*
* Release any remaining demo-mode generated se_node_acl that have
* not been released because of TFO->tpg_check_demo_mode_cache() == 1
* in transport_deregister_session().
*/
- spin_lock_irq(&se_tpg->acl_node_lock);
- list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
- acl_list) {
- list_del(&nacl->acl_list);
- se_tpg->num_node_acls--;
- spin_unlock_irq(&se_tpg->acl_node_lock);
+ list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
+ list_del_init(&nacl->acl_list);
core_tpg_wait_for_nacl_pr_ref(nacl);
core_free_device_list_for_node(nacl, se_tpg);
- se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
+ kfree(nacl);
+ }
- spin_lock_irq(&se_tpg->acl_node_lock);
+ if (se_tpg->proto_id >= 0) {
+ core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
+ call_rcu(&se_tpg->tpg_virt_lun0->rcu_head, target_tpg_free_lun);
}
- spin_unlock_irq(&se_tpg->acl_node_lock);
- if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
- core_tpg_release_virtual_lun0(se_tpg);
+ target_tpg_deregister_rtpi(se_tpg);
- se_tpg->se_tpg_fabric_ptr = NULL;
- array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
return 0;
}
EXPORT_SYMBOL(core_tpg_deregister);
-struct se_lun *core_tpg_pre_addlun(
+struct se_lun *core_tpg_alloc_lun(
struct se_portal_group *tpg,
- u32 unpacked_lun)
+ u64 unpacked_lun)
{
struct se_lun *lun;
- if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
- "-1: %u for Target Portal Group: %u\n",
- tpg->se_tpg_tfo->get_fabric_name(),
- unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
- tpg->se_tpg_tfo->tpg_get_tag(tpg));
- return ERR_PTR(-EOVERFLOW);
+ lun = kzalloc(sizeof(*lun), GFP_KERNEL);
+ if (!lun) {
+ pr_err("Unable to allocate se_lun memory\n");
+ return ERR_PTR(-ENOMEM);
}
- spin_lock(&tpg->tpg_lun_lock);
- lun = tpg->tpg_lun_list[unpacked_lun];
- if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
- pr_err("TPG Logical Unit Number: %u is already active"
- " on %s Target Portal Group: %u, ignoring request.\n",
- unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
- tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock(&tpg->tpg_lun_lock);
- return ERR_PTR(-EINVAL);
+ lun->lun_stats = alloc_percpu(struct scsi_port_stats);
+ if (!lun->lun_stats) {
+ pr_err("Unable to allocate se_lun stats memory\n");
+ goto free_lun;
}
- spin_unlock(&tpg->tpg_lun_lock);
+
+ lun->unpacked_lun = unpacked_lun;
+ atomic_set(&lun->lun_acl_count, 0);
+ init_completion(&lun->lun_shutdown_comp);
+ INIT_LIST_HEAD(&lun->lun_deve_list);
+ INIT_LIST_HEAD(&lun->lun_dev_link);
+ atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
+ spin_lock_init(&lun->lun_deve_lock);
+ mutex_init(&lun->lun_tg_pt_md_mutex);
+ INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
+ spin_lock_init(&lun->lun_tg_pt_gp_lock);
+ lun->lun_tpg = tpg;
return lun;
+
+free_lun:
+ kfree(lun);
+ return ERR_PTR(-ENOMEM);
}
-int core_tpg_post_addlun(
+void target_tpg_free_lun(struct rcu_head *head)
+{
+ struct se_lun *lun = container_of(head, struct se_lun, rcu_head);
+
+ free_percpu(lun->lun_stats);
+ kfree(lun);
+}
+
+int core_tpg_add_lun(
struct se_portal_group *tpg,
struct se_lun *lun,
- u32 lun_access,
- void *lun_ptr)
+ bool lun_access_ro,
+ struct se_device *dev)
{
int ret;
- ret = core_dev_export(lun_ptr, tpg, lun);
+ ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
+ GFP_KERNEL);
if (ret < 0)
- return ret;
+ goto out;
- spin_lock(&tpg->tpg_lun_lock);
- lun->lun_access = lun_access;
- lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
- spin_unlock(&tpg->tpg_lun_lock);
+ if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
+ !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
+ target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
- return 0;
-}
+ mutex_lock(&tpg->tpg_lun_mutex);
-static void core_tpg_shutdown_lun(
- struct se_portal_group *tpg,
- struct se_lun *lun)
-{
- core_clear_lun_from_tpg(lun, tpg);
- transport_clear_lun_from_sessions(lun);
-}
+ spin_lock(&dev->se_port_lock);
+ lun->lun_index = dev->dev_index;
+ rcu_assign_pointer(lun->lun_se_dev, dev);
+ dev->export_count++;
+ list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
+ spin_unlock(&dev->se_port_lock);
-struct se_lun *core_tpg_pre_dellun(
- struct se_portal_group *tpg,
- u32 unpacked_lun)
-{
- struct se_lun *lun;
-
- if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
- "-1: %u for Target Portal Group: %u\n",
- tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
- TRANSPORT_MAX_LUNS_PER_TPG-1,
- tpg->se_tpg_tfo->tpg_get_tag(tpg));
- return ERR_PTR(-EOVERFLOW);
- }
+ if (dev->dev_flags & DF_READ_ONLY)
+ lun->lun_access_ro = true;
+ else
+ lun->lun_access_ro = lun_access_ro;
+ if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
+ hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
+ mutex_unlock(&tpg->tpg_lun_mutex);
- spin_lock(&tpg->tpg_lun_lock);
- lun = tpg->tpg_lun_list[unpacked_lun];
- if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
- pr_err("%s Logical Unit Number: %u is not active on"
- " Target Portal Group: %u, ignoring request.\n",
- tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
- tpg->se_tpg_tfo->tpg_get_tag(tpg));
- spin_unlock(&tpg->tpg_lun_lock);
- return ERR_PTR(-ENODEV);
- }
- spin_unlock(&tpg->tpg_lun_lock);
+ return 0;
- return lun;
+out:
+ return ret;
}
-int core_tpg_post_dellun(
+void core_tpg_remove_lun(
struct se_portal_group *tpg,
struct se_lun *lun)
{
- core_tpg_shutdown_lun(tpg, lun);
+ /*
+ * rcu_dereference_raw protected by se_lun->lun_group symlink
+ * reference to se_device->dev_group.
+ */
+ struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
- core_dev_unexport(lun->lun_se_dev, tpg, lun);
+ lun->lun_shutdown = true;
- spin_lock(&tpg->tpg_lun_lock);
- lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
- spin_unlock(&tpg->tpg_lun_lock);
+ core_clear_lun_from_tpg(lun, tpg);
+ /*
+ * Wait for any active I/O references to percpu se_lun->lun_ref to
+ * be released. Also, se_lun->lun_ref is now used by PR and ALUA
+ * logic when referencing a remote target port during ALL_TGT_PT=1
+ * and generating UNIT_ATTENTIONs for ALUA access state transition.
+ */
+ transport_clear_lun_ref(lun);
- return 0;
+ mutex_lock(&tpg->tpg_lun_mutex);
+ if (lun->lun_se_dev) {
+ target_detach_tg_pt_gp(lun);
+
+ spin_lock(&dev->se_port_lock);
+ list_del(&lun->lun_dev_link);
+ dev->export_count--;
+ rcu_assign_pointer(lun->lun_se_dev, NULL);
+ spin_unlock(&dev->se_port_lock);
+ }
+ if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
+ hlist_del_rcu(&lun->link);
+
+ lun->lun_shutdown = false;
+ mutex_unlock(&tpg->tpg_lun_mutex);
+
+ percpu_ref_exit(&lun->lun_ref);
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7172d005d063..e8b7955d40f2 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1,26 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_transport.c
*
* This file contains the Generic Target Engine Core.
*
- * (c) Copyright 2002-2012 RisingTide Systems LLC.
+ * (c) Copyright 2002-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#include <linux/net.h>
@@ -28,24 +15,22 @@
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
-#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/cdrom.h>
#include <linux/module.h>
#include <linux/ratelimit.h>
-#include <asm/unaligned.h>
+#include <linux/vmalloc.h>
+#include <linux/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_proto.h>
+#include <scsi/scsi_common.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
@@ -56,19 +41,20 @@
#include <trace/events/target.h>
static struct workqueue_struct *target_completion_wq;
+static struct workqueue_struct *target_submission_wq;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_ua_cache;
struct kmem_cache *t10_pr_reg_cache;
struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
-struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
+struct kmem_cache *t10_alua_lba_map_cache;
+struct kmem_cache *t10_alua_lba_map_mem_cache;
static void transport_complete_task_attr(struct se_cmd *cmd);
+static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
static void transport_handle_queue_full(struct se_cmd *cmd,
- struct se_device *dev);
-static int transport_generic_get_mem(struct se_cmd *cmd);
-static int transport_put_cmd(struct se_cmd *cmd);
+ struct se_device *dev, int err, bool write_pending);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
@@ -120,26 +106,43 @@ int init_se_kmem_caches(void)
"cache failed\n");
goto out_free_lu_gp_mem_cache;
}
- t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
- "t10_alua_tg_pt_gp_mem_cache",
- sizeof(struct t10_alua_tg_pt_gp_member),
- __alignof__(struct t10_alua_tg_pt_gp_member),
- 0, NULL);
- if (!t10_alua_tg_pt_gp_mem_cache) {
- pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
- "mem_t failed\n");
+ t10_alua_lba_map_cache = kmem_cache_create(
+ "t10_alua_lba_map_cache",
+ sizeof(struct t10_alua_lba_map),
+ __alignof__(struct t10_alua_lba_map), 0, NULL);
+ if (!t10_alua_lba_map_cache) {
+ pr_err("kmem_cache_create() for t10_alua_lba_map_"
+ "cache failed\n");
goto out_free_tg_pt_gp_cache;
}
+ t10_alua_lba_map_mem_cache = kmem_cache_create(
+ "t10_alua_lba_map_mem_cache",
+ sizeof(struct t10_alua_lba_map_member),
+ __alignof__(struct t10_alua_lba_map_member), 0, NULL);
+ if (!t10_alua_lba_map_mem_cache) {
+ pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
+ "cache failed\n");
+ goto out_free_lba_map_cache;
+ }
target_completion_wq = alloc_workqueue("target_completion",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!target_completion_wq)
- goto out_free_tg_pt_gp_mem_cache;
+ goto out_free_lba_map_mem_cache;
+
+ target_submission_wq = alloc_workqueue("target_submission",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
+ if (!target_submission_wq)
+ goto out_free_completion_wq;
return 0;
-out_free_tg_pt_gp_mem_cache:
- kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+out_free_completion_wq:
+ destroy_workqueue(target_completion_wq);
+out_free_lba_map_mem_cache:
+ kmem_cache_destroy(t10_alua_lba_map_mem_cache);
+out_free_lba_map_cache:
+ kmem_cache_destroy(t10_alua_lba_map_cache);
out_free_tg_pt_gp_cache:
kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
out_free_lu_gp_mem_cache:
@@ -158,6 +161,7 @@ out:
void release_se_kmem_caches(void)
{
+ destroy_workqueue(target_submission_wq);
destroy_workqueue(target_completion_wq);
kmem_cache_destroy(se_sess_cache);
kmem_cache_destroy(se_ua_cache);
@@ -165,7 +169,8 @@ void release_se_kmem_caches(void)
kmem_cache_destroy(t10_alua_lu_gp_cache);
kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
- kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
+ kmem_cache_destroy(t10_alua_lba_map_cache);
+ kmem_cache_destroy(t10_alua_lba_map_mem_cache);
}
/* This code ensures unique mib indexes are handed out. */
@@ -196,22 +201,92 @@ void transport_subsystem_check_init(void)
if (sub_api_initialized)
return;
- ret = request_module("target_core_iblock");
+ ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock");
if (ret != 0)
pr_err("Unable to load target_core_iblock\n");
- ret = request_module("target_core_file");
+ ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file");
if (ret != 0)
pr_err("Unable to load target_core_file\n");
- ret = request_module("target_core_pscsi");
+ ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi");
if (ret != 0)
pr_err("Unable to load target_core_pscsi\n");
+ ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user");
+ if (ret != 0)
+ pr_err("Unable to load target_core_user\n");
+
sub_api_initialized = 1;
}
-struct se_session *transport_init_session(void)
+static void target_release_cmd_refcnt(struct percpu_ref *ref)
+{
+ struct target_cmd_counter *cmd_cnt = container_of(ref,
+ typeof(*cmd_cnt),
+ refcnt);
+ wake_up(&cmd_cnt->refcnt_wq);
+}
+
+struct target_cmd_counter *target_alloc_cmd_counter(void)
+{
+ struct target_cmd_counter *cmd_cnt;
+ int rc;
+
+ cmd_cnt = kzalloc(sizeof(*cmd_cnt), GFP_KERNEL);
+ if (!cmd_cnt)
+ return NULL;
+
+ init_completion(&cmd_cnt->stop_done);
+ init_waitqueue_head(&cmd_cnt->refcnt_wq);
+ atomic_set(&cmd_cnt->stopped, 0);
+
+ rc = percpu_ref_init(&cmd_cnt->refcnt, target_release_cmd_refcnt, 0,
+ GFP_KERNEL);
+ if (rc)
+ goto free_cmd_cnt;
+
+ return cmd_cnt;
+
+free_cmd_cnt:
+ kfree(cmd_cnt);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(target_alloc_cmd_counter);
+
+void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt)
+{
+ /*
+ * Drivers like loop do not call target_stop_session during session
+ * shutdown so we have to drop the ref taken at init time here.
+ */
+ if (!atomic_read(&cmd_cnt->stopped))
+ percpu_ref_put(&cmd_cnt->refcnt);
+
+ percpu_ref_exit(&cmd_cnt->refcnt);
+ kfree(cmd_cnt);
+}
+EXPORT_SYMBOL_GPL(target_free_cmd_counter);
+
+/**
+ * transport_init_session - initialize a session object
+ * @se_sess: Session object pointer.
+ *
+ * The caller must have zero-initialized @se_sess before calling this function.
+ */
+void transport_init_session(struct se_session *se_sess)
+{
+ INIT_LIST_HEAD(&se_sess->sess_list);
+ INIT_LIST_HEAD(&se_sess->sess_acl_list);
+ spin_lock_init(&se_sess->sess_cmd_lock);
+}
+EXPORT_SYMBOL(transport_init_session);
+
+/**
+ * transport_alloc_session - allocate a session object and initialize it
+ * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
+ */
+struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
{
struct se_session *se_sess;
@@ -221,16 +296,83 @@ struct se_session *transport_init_session(void)
" se_sess_cache\n");
return ERR_PTR(-ENOMEM);
}
- INIT_LIST_HEAD(&se_sess->sess_list);
- INIT_LIST_HEAD(&se_sess->sess_acl_list);
- INIT_LIST_HEAD(&se_sess->sess_cmd_list);
- INIT_LIST_HEAD(&se_sess->sess_wait_list);
- spin_lock_init(&se_sess->sess_cmd_lock);
- kref_init(&se_sess->sess_kref);
+ transport_init_session(se_sess);
+ se_sess->sup_prot_ops = sup_prot_ops;
+
+ return se_sess;
+}
+EXPORT_SYMBOL(transport_alloc_session);
+
+/**
+ * transport_alloc_session_tags - allocate target driver private data
+ * @se_sess: Session pointer.
+ * @tag_num: Maximum number of in-flight commands between initiator and target.
+ * @tag_size: Size in bytes of the private data a target driver associates with
+ * each command.
+ */
+int transport_alloc_session_tags(struct se_session *se_sess,
+ unsigned int tag_num, unsigned int tag_size)
+{
+ int rc;
+
+ se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num,
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+ if (!se_sess->sess_cmd_map) {
+ pr_err("Unable to allocate se_sess->sess_cmd_map\n");
+ return -ENOMEM;
+ }
+
+ rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
+ false, GFP_KERNEL, NUMA_NO_NODE);
+ if (rc < 0) {
+ pr_err("Unable to init se_sess->sess_tag_pool,"
+ " tag_num: %u\n", tag_num);
+ kvfree(se_sess->sess_cmd_map);
+ se_sess->sess_cmd_map = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(transport_alloc_session_tags);
+
+/**
+ * transport_init_session_tags - allocate a session and target driver private data
+ * @tag_num: Maximum number of in-flight commands between initiator and target.
+ * @tag_size: Size in bytes of the private data a target driver associates with
+ * each command.
+ * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
+ */
+static struct se_session *
+transport_init_session_tags(unsigned int tag_num, unsigned int tag_size,
+ enum target_prot_op sup_prot_ops)
+{
+ struct se_session *se_sess;
+ int rc;
+
+ if (tag_num != 0 && !tag_size) {
+ pr_err("init_session_tags called with percpu-ida tag_num:"
+ " %u, but zero tag_size\n", tag_num);
+ return ERR_PTR(-EINVAL);
+ }
+ if (!tag_num && tag_size) {
+ pr_err("init_session_tags called with percpu-ida tag_size:"
+ " %u, but zero tag_num\n", tag_size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ se_sess = transport_alloc_session(sup_prot_ops);
+ if (IS_ERR(se_sess))
+ return se_sess;
+
+ rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
+ if (rc < 0) {
+ transport_free_session(se_sess);
+ return ERR_PTR(-ENOMEM);
+ }
return se_sess;
}
-EXPORT_SYMBOL(transport_init_session);
/*
* Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
@@ -241,7 +383,9 @@ void __transport_register_session(
struct se_session *se_sess,
void *fabric_sess_ptr)
{
+ const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
unsigned char buf[PR_REG_ISID_LEN];
+ unsigned long flags;
se_sess->se_tpg = se_tpg;
se_sess->fabric_sess_ptr = fabric_sess_ptr;
@@ -253,6 +397,21 @@ void __transport_register_session(
*/
if (se_nacl) {
/*
+ *
+ * Determine if fabric allows for T10-PI feature bits exposed to
+ * initiators for device backends with !dev->dev_attrib.pi_prot_type.
+ *
+ * If so, then always save prot_type on a per se_node_acl node
+ * basis and re-instate the previous sess_prot_type to avoid
+ * disabling PI from below any previously initiator side
+ * registered LUNs.
+ */
+ if (se_nacl->saved_prot_type)
+ se_sess->sess_prot_type = se_nacl->saved_prot_type;
+ else if (tfo->tpg_check_prot_fabric_only)
+ se_sess->sess_prot_type = se_nacl->saved_prot_type =
+ tfo->tpg_check_prot_fabric_only(se_tpg);
+ /*
* If the fabric module supports an ISID based TransportID,
* save this value in binary from the fabric I_T Nexus now.
*/
@@ -262,9 +421,8 @@ void __transport_register_session(
&buf[0], PR_REG_ISID_LEN);
se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
}
- kref_get(&se_nacl->acl_kref);
- spin_lock_irq(&se_nacl->nacl_sess_lock);
+ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
/*
* The se_nacl->nacl_sess pointer will be set to the
* last active I_T Nexus for each struct se_node_acl.
@@ -273,12 +431,12 @@ void __transport_register_session(
list_add_tail(&se_sess->sess_acl_list,
&se_nacl->acl_sess_list);
- spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
}
list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
- se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
+ se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr);
}
EXPORT_SYMBOL(__transport_register_session);
@@ -296,45 +454,114 @@ void transport_register_session(
}
EXPORT_SYMBOL(transport_register_session);
-static void target_release_session(struct kref *kref)
+struct se_session *
+target_setup_session(struct se_portal_group *tpg,
+ unsigned int tag_num, unsigned int tag_size,
+ enum target_prot_op prot_op,
+ const char *initiatorname, void *private,
+ int (*callback)(struct se_portal_group *,
+ struct se_session *, void *))
{
- struct se_session *se_sess = container_of(kref,
- struct se_session, sess_kref);
- struct se_portal_group *se_tpg = se_sess->se_tpg;
+ struct target_cmd_counter *cmd_cnt;
+ struct se_session *sess;
+ int rc;
- se_tpg->se_tpg_tfo->close_session(se_sess);
-}
+ cmd_cnt = target_alloc_cmd_counter();
+ if (!cmd_cnt)
+ return ERR_PTR(-ENOMEM);
+ /*
+ * If the fabric driver is using percpu-ida based pre allocation
+ * of I/O descriptor tags, go ahead and perform that setup now..
+ */
+ if (tag_num != 0)
+ sess = transport_init_session_tags(tag_num, tag_size, prot_op);
+ else
+ sess = transport_alloc_session(prot_op);
-void target_get_session(struct se_session *se_sess)
-{
- kref_get(&se_sess->sess_kref);
+ if (IS_ERR(sess)) {
+ rc = PTR_ERR(sess);
+ goto free_cnt;
+ }
+ sess->cmd_cnt = cmd_cnt;
+
+ sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
+ (unsigned char *)initiatorname);
+ if (!sess->se_node_acl) {
+ rc = -EACCES;
+ goto free_sess;
+ }
+ /*
+ * Go ahead and perform any remaining fabric setup that is
+ * required before transport_register_session().
+ */
+ if (callback != NULL) {
+ rc = callback(tpg, sess, private);
+ if (rc)
+ goto free_sess;
+ }
+
+ transport_register_session(tpg, sess->se_node_acl, sess, private);
+ return sess;
+
+free_sess:
+ transport_free_session(sess);
+ return ERR_PTR(rc);
+
+free_cnt:
+ target_free_cmd_counter(cmd_cnt);
+ return ERR_PTR(rc);
}
-EXPORT_SYMBOL(target_get_session);
+EXPORT_SYMBOL(target_setup_session);
-void target_put_session(struct se_session *se_sess)
+ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
{
- struct se_portal_group *tpg = se_sess->se_tpg;
+ struct se_session *se_sess;
+ ssize_t len = 0;
- if (tpg->se_tpg_tfo->put_session != NULL) {
- tpg->se_tpg_tfo->put_session(se_sess);
- return;
+ spin_lock_bh(&se_tpg->session_lock);
+ list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
+ if (!se_sess->se_node_acl)
+ continue;
+ if (!se_sess->se_node_acl->dynamic_node_acl)
+ continue;
+ if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
+ break;
+
+ len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
+ se_sess->se_node_acl->initiatorname);
+ len += 1; /* Include NULL terminator */
}
- kref_put(&se_sess->sess_kref, target_release_session);
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ return len;
}
-EXPORT_SYMBOL(target_put_session);
+EXPORT_SYMBOL(target_show_dynamic_sessions);
static void target_complete_nacl(struct kref *kref)
{
struct se_node_acl *nacl = container_of(kref,
struct se_node_acl, acl_kref);
+ struct se_portal_group *se_tpg = nacl->se_tpg;
+
+ if (!nacl->dynamic_stop) {
+ complete(&nacl->acl_free_comp);
+ return;
+ }
- complete(&nacl->acl_free_comp);
+ mutex_lock(&se_tpg->acl_node_mutex);
+ list_del_init(&nacl->acl_list);
+ mutex_unlock(&se_tpg->acl_node_mutex);
+
+ core_tpg_wait_for_nacl_pr_ref(nacl);
+ core_free_device_list_for_node(nacl, se_tpg);
+ kfree(nacl);
}
void target_put_nacl(struct se_node_acl *nacl)
{
kref_put(&nacl->acl_kref, target_complete_nacl);
}
+EXPORT_SYMBOL(target_put_nacl);
void transport_deregister_session_configfs(struct se_session *se_sess)
{
@@ -346,8 +573,8 @@ void transport_deregister_session_configfs(struct se_session *se_sess)
se_nacl = se_sess->se_node_acl;
if (se_nacl) {
spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
- if (se_nacl->acl_stop == 0)
- list_del(&se_sess->sess_acl_list);
+ if (!list_empty(&se_sess->sess_acl_list))
+ list_del_init(&se_sess->sess_acl_list);
/*
* If the session list is empty, then clear the pointer.
* Otherwise, set the struct se_session pointer from the tail
@@ -367,23 +594,70 @@ EXPORT_SYMBOL(transport_deregister_session_configfs);
void transport_free_session(struct se_session *se_sess)
{
+ struct se_node_acl *se_nacl = se_sess->se_node_acl;
+
+ /*
+ * Drop the se_node_acl->nacl_kref obtained from within
+ * core_tpg_get_initiator_node_acl().
+ */
+ if (se_nacl) {
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
+ unsigned long flags;
+
+ se_sess->se_node_acl = NULL;
+
+ /*
+ * Also determine if we need to drop the extra ->cmd_kref if
+ * it had been previously dynamically generated, and
+ * the endpoint is not caching dynamic ACLs.
+ */
+ mutex_lock(&se_tpg->acl_node_mutex);
+ if (se_nacl->dynamic_node_acl &&
+ !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
+ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
+ if (list_empty(&se_nacl->acl_sess_list))
+ se_nacl->dynamic_stop = true;
+ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
+
+ if (se_nacl->dynamic_stop)
+ list_del_init(&se_nacl->acl_list);
+ }
+ mutex_unlock(&se_tpg->acl_node_mutex);
+
+ if (se_nacl->dynamic_stop)
+ target_put_nacl(se_nacl);
+
+ target_put_nacl(se_nacl);
+ }
+ if (se_sess->sess_cmd_map) {
+ sbitmap_queue_free(&se_sess->sess_tag_pool);
+ kvfree(se_sess->sess_cmd_map);
+ }
+ if (se_sess->cmd_cnt)
+ target_free_cmd_counter(se_sess->cmd_cnt);
kmem_cache_free(se_sess_cache, se_sess);
}
EXPORT_SYMBOL(transport_free_session);
+static int target_release_res(struct se_device *dev, void *data)
+{
+ struct se_session *sess = data;
+
+ if (dev->reservation_holder == sess)
+ target_release_reservation(dev);
+ return 0;
+}
+
void transport_deregister_session(struct se_session *se_sess)
{
struct se_portal_group *se_tpg = se_sess->se_tpg;
- struct target_core_fabric_ops *se_tfo;
- struct se_node_acl *se_nacl;
unsigned long flags;
- bool comp_nacl = true;
if (!se_tpg) {
transport_free_session(se_sess);
return;
}
- se_tfo = se_tpg->se_tpg_tfo;
spin_lock_irqsave(&se_tpg->session_lock, flags);
list_del(&se_sess->sess_list);
@@ -392,44 +666,33 @@ void transport_deregister_session(struct se_session *se_sess)
spin_unlock_irqrestore(&se_tpg->session_lock, flags);
/*
- * Determine if we need to do extra work for this initiator node's
- * struct se_node_acl if it had been previously dynamically generated.
+ * Since the session is being removed, release SPC-2
+ * reservations held by the session that is disappearing.
*/
- se_nacl = se_sess->se_node_acl;
-
- spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
- if (se_nacl && se_nacl->dynamic_node_acl) {
- if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
- list_del(&se_nacl->acl_list);
- se_tpg->num_node_acls--;
- spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
- core_tpg_wait_for_nacl_pr_ref(se_nacl);
- core_free_device_list_for_node(se_nacl, se_tpg);
- se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl);
-
- comp_nacl = false;
- spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
- }
- }
- spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
+ target_for_each_device(target_release_res, se_sess);
pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
- se_tpg->se_tpg_tfo->get_fabric_name());
+ se_tpg->se_tpg_tfo->fabric_name);
/*
- * If last kref is dropping now for an explict NodeACL, awake sleeping
+ * If last kref is dropping now for an explicit NodeACL, awake sleeping
* ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
- * removal context.
+ * removal context from within transport_free_session() code.
+ *
+ * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
+ * to release all remaining generate_node_acl=1 created ACL resources.
*/
- if (se_nacl && comp_nacl == true)
- target_put_nacl(se_nacl);
transport_free_session(se_sess);
}
EXPORT_SYMBOL(transport_deregister_session);
-/*
- * Called with cmd->t_state_lock held.
- */
+void target_remove_session(struct se_session *se_sess)
+{
+ transport_deregister_session_configfs(se_sess);
+ transport_deregister_session(se_sess);
+}
+EXPORT_SYMBOL(target_remove_session);
+
static void target_remove_from_state_list(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -438,121 +701,91 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
if (!dev)
return;
- if (cmd->transport_state & CMD_T_BUSY)
- return;
-
- spin_lock_irqsave(&dev->execute_task_lock, flags);
+ spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
if (cmd->state_active) {
list_del(&cmd->state_list);
cmd->state_active = false;
}
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+ spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
}
-static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
- bool write_pending)
+static void target_remove_from_tmr_list(struct se_cmd *cmd)
{
+ struct se_device *dev = NULL;
unsigned long flags;
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (write_pending)
- cmd->t_state = TRANSPORT_WRITE_PENDING;
-
- /*
- * Determine if IOCTL context caller in requesting the stopping of this
- * command for LUN shutdown purposes.
- */
- if (cmd->transport_state & CMD_T_LUN_STOP) {
- pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
- __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
-
- cmd->transport_state &= ~CMD_T_ACTIVE;
- if (remove_from_lists)
- target_remove_from_state_list(cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- complete(&cmd->transport_lun_stop_comp);
- return 1;
- }
-
- if (remove_from_lists) {
- target_remove_from_state_list(cmd);
+ if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+ dev = cmd->se_tmr_req->tmr_dev;
- /*
- * Clear struct se_cmd->se_lun before the handoff to FE.
- */
- cmd->se_lun = NULL;
+ if (dev) {
+ spin_lock_irqsave(&dev->se_tmr_lock, flags);
+ if (cmd->se_tmr_req->tmr_dev)
+ list_del_init(&cmd->se_tmr_req->tmr_list);
+ spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
}
+}
+/*
+ * This function is called by the target core after the target core has
+ * finished processing a SCSI command or SCSI TMF. Both the regular command
+ * processing code and the code for aborting commands can call this
+ * function. CMD_T_STOP is set if and only if another thread is waiting
+ * inside transport_wait_for_tasks() for t_transport_stop_comp.
+ */
+static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
/*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
*/
if (cmd->transport_state & CMD_T_STOP) {
- pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
- __func__, __LINE__,
- cmd->se_tfo->get_task_tag(cmd));
+ pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
+ __func__, __LINE__, cmd->tag);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&cmd->t_transport_stop_comp);
+ complete_all(&cmd->t_transport_stop_comp);
return 1;
}
-
cmd->transport_state &= ~CMD_T_ACTIVE;
- if (remove_from_lists) {
- /*
- * Some fabric modules like tcm_loop can release
- * their internally allocated I/O reference now and
- * struct se_cmd now.
- *
- * Fabric modules are expected to return '1' here if the
- * se_cmd being passed is released at this point,
- * or zero if not being released.
- */
- if (cmd->se_tfo->check_stop_free != NULL) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return cmd->se_tfo->check_stop_free(cmd);
- }
- }
-
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return 0;
-}
-static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
-{
- return transport_cmd_check_stop(cmd, true, false);
+ /*
+ * Some fabric modules like tcm_loop can release their internally
+ * allocated I/O reference and struct se_cmd now.
+ *
+ * Fabric modules are expected to return '1' here if the se_cmd being
+ * passed is released at this point, or zero if not being released.
+ */
+ return cmd->se_tfo->check_stop_free(cmd);
}
static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
struct se_lun *lun = cmd->se_lun;
- unsigned long flags;
if (!lun)
return;
- spin_lock_irqsave(&lun->lun_cmd_lock, flags);
- if (!list_empty(&cmd->se_lun_node))
- list_del_init(&cmd->se_lun_node);
- spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
-}
+ target_remove_from_state_list(cmd);
+ target_remove_from_tmr_list(cmd);
-void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
-{
- if (transport_cmd_check_stop_to_fabric(cmd))
- return;
- if (remove)
- transport_put_cmd(cmd);
+ if (cmpxchg(&cmd->lun_ref_active, true, false))
+ percpu_ref_put(&lun->lun_ref);
+
+ /*
+ * Clear struct se_cmd->se_lun before the handoff to FE.
+ */
+ cmd->se_lun = NULL;
}
static void target_complete_failure_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
- transport_generic_request_failure(cmd,
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
+ transport_generic_request_failure(cmd, cmd->sense_reason);
}
/*
@@ -578,72 +811,186 @@ static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
return cmd->sense_buffer;
}
-void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
+void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
{
- struct se_device *dev = cmd->se_dev;
- int success = scsi_status == GOOD;
+ unsigned char *cmd_sense_buf;
unsigned long flags;
- cmd->scsi_status = scsi_status;
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ cmd_sense_buf = transport_get_sense_buffer(cmd);
+ if (!cmd_sense_buf) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return;
+ }
+ cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
+ memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+}
+EXPORT_SYMBOL(transport_copy_sense_to_cmd);
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- cmd->transport_state &= ~CMD_T_BUSY;
+static void target_handle_abort(struct se_cmd *cmd)
+{
+ bool tas = cmd->transport_state & CMD_T_TAS;
+ bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF;
+ int ret;
- if (dev && dev->transport->transport_complete) {
- dev->transport->transport_complete(cmd,
- cmd->t_data_sg,
- transport_get_sense_buffer(cmd));
- if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
- success = 1;
+ pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas);
+
+ if (tas) {
+ if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+ cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+ pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
+ cmd->t_task_cdb[0], cmd->tag);
+ trace_target_cmd_complete(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (ret) {
+ transport_handle_queue_full(cmd, cmd->se_dev,
+ ret, false);
+ return;
+ }
+ } else {
+ cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED;
+ cmd->se_tfo->queue_tm_rsp(cmd);
+ }
+ } else {
+ /*
+ * Allow the fabric driver to unmap any resources before
+ * releasing the descriptor via TFO->release_cmd().
+ */
+ cmd->se_tfo->aborted_task(cmd);
+ if (ack_kref)
+ WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0);
+ /*
+ * To do: establish a unit attention condition on the I_T
+ * nexus associated with cmd. See also the paragraph "Aborting
+ * commands" in SAM.
+ */
}
- /*
- * See if we are waiting to complete for an exception condition.
- */
- if (cmd->transport_state & CMD_T_REQUEST_STOP) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&cmd->task_stop_comp);
- return;
+ WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
+
+ transport_lun_remove_cmd(cmd);
+
+ transport_cmd_check_stop_to_fabric(cmd);
+}
+
+static void target_abort_work(struct work_struct *work)
+{
+ struct se_cmd *cmd = container_of(work, struct se_cmd, work);
+
+ target_handle_abort(cmd);
+}
+
+static bool target_cmd_interrupted(struct se_cmd *cmd)
+{
+ int post_ret;
+
+ if (cmd->transport_state & CMD_T_ABORTED) {
+ if (cmd->transport_complete_callback)
+ cmd->transport_complete_callback(cmd, false, &post_ret);
+ INIT_WORK(&cmd->work, target_abort_work);
+ queue_work(target_completion_wq, &cmd->work);
+ return true;
+ } else if (cmd->transport_state & CMD_T_STOP) {
+ if (cmd->transport_complete_callback)
+ cmd->transport_complete_callback(cmd, false, &post_ret);
+ complete_all(&cmd->t_transport_stop_comp);
+ return true;
}
- if (!success)
- cmd->transport_state |= CMD_T_FAILED;
+ return false;
+}
+
+/* May be called from interrupt context so must not sleep. */
+void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status,
+ sense_reason_t sense_reason)
+{
+ struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn;
+ int success, cpu;
+ unsigned long flags;
- /*
- * Check for case where an explict ABORT_TASK has been received
- * and transport_wait_for_tasks() will be waiting for completion..
- */
- if (cmd->transport_state & CMD_T_ABORTED &&
- cmd->transport_state & CMD_T_STOP) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&cmd->t_transport_stop_comp);
+ if (target_cmd_interrupted(cmd))
return;
- } else if (cmd->transport_state & CMD_T_FAILED) {
- INIT_WORK(&cmd->work, target_complete_failure_work);
- } else {
- INIT_WORK(&cmd->work, target_complete_ok_work);
+
+ cmd->scsi_status = scsi_status;
+ cmd->sense_reason = sense_reason;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ switch (cmd->scsi_status) {
+ case SAM_STAT_CHECK_CONDITION:
+ if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
+ success = 1;
+ else
+ success = 0;
+ break;
+ default:
+ success = 1;
+ break;
}
cmd->t_state = TRANSPORT_COMPLETE;
cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- queue_work(target_completion_wq, &cmd->work);
+ INIT_WORK(&cmd->work, success ? target_complete_ok_work :
+ target_complete_failure_work);
+
+ if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
+ cpu = cmd->cpuid;
+ else
+ cpu = wwn->cmd_compl_affinity;
+
+ queue_work_on(cpu, target_completion_wq, &cmd->work);
+}
+EXPORT_SYMBOL(target_complete_cmd_with_sense);
+
+void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
+{
+ target_complete_cmd_with_sense(cmd, scsi_status, scsi_status ?
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE :
+ TCM_NO_SENSE);
}
EXPORT_SYMBOL(target_complete_cmd);
+void target_set_cmd_data_length(struct se_cmd *cmd, int length)
+{
+ if (length < cmd->data_length) {
+ if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ cmd->residual_count += cmd->data_length - length;
+ } else {
+ cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+ cmd->residual_count = cmd->data_length - length;
+ }
+
+ cmd->data_length = length;
+ }
+}
+EXPORT_SYMBOL(target_set_cmd_data_length);
+
+void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
+{
+ if (scsi_status == SAM_STAT_GOOD ||
+ cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) {
+ target_set_cmd_data_length(cmd, length);
+ }
+
+ target_complete_cmd(cmd, scsi_status);
+}
+EXPORT_SYMBOL(target_complete_cmd_with_length);
+
static void target_add_to_state_list(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
unsigned long flags;
- spin_lock_irqsave(&dev->execute_task_lock, flags);
+ spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
if (!cmd->state_active) {
- list_add_tail(&cmd->state_list, &dev->state_list);
+ list_add_tail(&cmd->state_list,
+ &dev->queues[cmd->cpuid].state_list);
cmd->state_active = true;
}
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+ spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
}
/*
@@ -665,18 +1012,18 @@ void target_qf_do_work(struct work_struct *work)
list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
list_del(&cmd->se_qf_node);
- atomic_dec(&dev->dev_qf_count);
- smp_mb__after_atomic_dec();
+ atomic_dec_mb(&dev->dev_qf_count);
pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
- " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
+ " context: %s\n", cmd->se_tfo->fabric_name, cmd,
(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
: "UNKNOWN");
if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
transport_write_pending_qf(cmd);
- else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
+ else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
+ cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
transport_complete_qf(cmd);
}
}
@@ -973,6 +1320,68 @@ transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
}
EXPORT_SYMBOL(transport_set_vpd_ident);
+static sense_reason_t
+target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
+ unsigned int size)
+{
+ u32 mtl;
+
+ if (!cmd->se_tfo->max_data_sg_nents)
+ return TCM_NO_SENSE;
+ /*
+ * Check if fabric enforced maximum SGL entries per I/O descriptor
+ * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT +
+ * residual_count and reduce original cmd->data_length to maximum
+ * length based on single PAGE_SIZE entry scatter-lists.
+ */
+ mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
+ if (cmd->data_length > mtl) {
+ /*
+ * If an existing CDB overflow is present, calculate new residual
+ * based on CDB size minus fabric maximum transfer length.
+ *
+ * If an existing CDB underflow is present, calculate new residual
+ * based on original cmd->data_length minus fabric maximum transfer
+ * length.
+ *
+ * Otherwise, set the underflow residual based on cmd->data_length
+ * minus fabric maximum transfer length.
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ cmd->residual_count = (size - mtl);
+ } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ u32 orig_dl = size + cmd->residual_count;
+ cmd->residual_count = (orig_dl - mtl);
+ } else {
+ cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+ cmd->residual_count = (cmd->data_length - mtl);
+ }
+ cmd->data_length = mtl;
+ /*
+ * Reset sbc_check_prot() calculated protection payload
+ * length based upon the new smaller MTL.
+ */
+ if (cmd->prot_length) {
+ u32 sectors = (mtl / dev->dev_attrib.block_size);
+ cmd->prot_length = dev->prot_length * sectors;
+ }
+ }
+ return TCM_NO_SENSE;
+}
+
+/**
+ * target_cmd_size_check - Check whether there will be a residual.
+ * @cmd: SCSI command.
+ * @size: Data buffer size derived from CDB. The data buffer size provided by
+ * the SCSI transport driver is available in @cmd->data_length.
+ *
+ * Compare the data buffer size from the CDB with the data buffer limit from the transport
+ * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
+ *
+ * Note: target drivers set @cmd->data_length by calling __target_init_cmd().
+ *
+ * Return: TCM_NO_SENSE
+ */
sense_reason_t
target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
{
@@ -981,32 +1390,15 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
if (cmd->unknown_data_length) {
cmd->data_length = size;
} else if (size != cmd->data_length) {
- pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
+ pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
" %u does not match SCSI CDB Length: %u for SAM Opcode:"
- " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
+ " 0x%02x\n", cmd->se_tfo->fabric_name,
cmd->data_length, size, cmd->t_task_cdb[0]);
-
- if (cmd->data_direction == DMA_TO_DEVICE) {
- pr_err("Rejecting underflow/overflow"
- " WRITE data\n");
- return TCM_INVALID_CDB_FIELD;
- }
- /*
- * Reject READ_* or WRITE_* with overflow/underflow for
- * type SCF_SCSI_DATA_CDB.
- */
- if (dev->dev_attrib.block_size != 512) {
- pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
- " CDB on non 512-byte sector setup subsystem"
- " plugin: %s\n", dev->transport->name);
- /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
- return TCM_INVALID_CDB_FIELD;
- }
/*
- * For the overflow case keep the existing fabric provided
- * ->data_length. Otherwise for the underflow case, reset
- * ->data_length to the smaller SCSI expected data transfer
- * length.
+ * For READ command for the overflow case keep the existing
+ * fabric provided ->data_length. Otherwise for the underflow
+ * case, reset ->data_length to the smaller SCSI expected data
+ * transfer length.
*/
if (size > cmd->data_length) {
cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
@@ -1014,50 +1406,78 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
} else {
cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
cmd->residual_count = (cmd->data_length - size);
- cmd->data_length = size;
+ /*
+ * Do not truncate ->data_length for WRITE command to
+ * dump all payload
+ */
+ if (cmd->data_direction == DMA_FROM_DEVICE) {
+ cmd->data_length = size;
+ }
+ }
+
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+ pr_err_ratelimited("Rejecting underflow/overflow"
+ " for WRITE data CDB\n");
+ return TCM_INVALID_FIELD_IN_COMMAND_IU;
+ }
+ /*
+ * Some fabric drivers like iscsi-target still expect to
+ * always reject overflow writes. Reject this case until
+ * full fabric driver level support for overflow writes
+ * is introduced tree-wide.
+ */
+ if (size > cmd->data_length) {
+ pr_err_ratelimited("Rejecting overflow for"
+ " WRITE control CDB\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
}
}
- return 0;
+ return target_check_max_data_sg_nents(cmd, dev, size);
}
/*
* Used by fabric modules containing a local struct se_cmd within their
* fabric dependent per I/O descriptor.
+ *
+ * Preserves the value of @cmd->tag.
*/
-void transport_init_se_cmd(
- struct se_cmd *cmd,
- struct target_core_fabric_ops *tfo,
- struct se_session *se_sess,
- u32 data_length,
- int data_direction,
- int task_attr,
- unsigned char *sense_buffer)
+void __target_init_cmd(struct se_cmd *cmd,
+ const struct target_core_fabric_ops *tfo,
+ struct se_session *se_sess, u32 data_length,
+ int data_direction, int task_attr,
+ unsigned char *sense_buffer, u64 unpacked_lun,
+ struct target_cmd_counter *cmd_cnt)
{
- INIT_LIST_HEAD(&cmd->se_lun_node);
INIT_LIST_HEAD(&cmd->se_delayed_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
- INIT_LIST_HEAD(&cmd->se_cmd_list);
INIT_LIST_HEAD(&cmd->state_list);
- init_completion(&cmd->transport_lun_fe_stop_comp);
- init_completion(&cmd->transport_lun_stop_comp);
init_completion(&cmd->t_transport_stop_comp);
- init_completion(&cmd->cmd_wait_comp);
- init_completion(&cmd->task_stop_comp);
+ cmd->free_compl = NULL;
+ cmd->abrt_compl = NULL;
spin_lock_init(&cmd->t_state_lock);
- cmd->transport_state = CMD_T_DEV_ACTIVE;
+ INIT_WORK(&cmd->work, NULL);
+ kref_init(&cmd->cmd_kref);
+ cmd->t_task_cdb = &cmd->__t_task_cdb[0];
cmd->se_tfo = tfo;
cmd->se_sess = se_sess;
cmd->data_length = data_length;
cmd->data_direction = data_direction;
cmd->sam_task_attr = task_attr;
cmd->sense_buffer = sense_buffer;
+ cmd->orig_fe_lun = unpacked_lun;
+ cmd->cmd_cnt = cmd_cnt;
+
+ if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
+ cmd->cpuid = raw_smp_processor_id();
cmd->state_active = false;
}
-EXPORT_SYMBOL(transport_init_se_cmd);
+EXPORT_SYMBOL(__target_init_cmd);
static sense_reason_t
transport_check_alloc_task_attr(struct se_cmd *cmd)
@@ -1068,30 +1488,21 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
* Check if SAM Task Attribute emulation is enabled for this
* struct se_device storage object
*/
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return 0;
- if (cmd->sam_task_attr == MSG_ACA_TAG) {
+ if (cmd->sam_task_attr == TCM_ACA_TAG) {
pr_debug("SAM Task Attribute ACA"
" emulation is not supported\n");
return TCM_INVALID_CDB_FIELD;
}
- /*
- * Used to determine when ORDERED commands should go from
- * Dormant to Active status.
- */
- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
- smp_mb__after_atomic_inc();
- pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
- cmd->se_ordered_id, cmd->sam_task_attr,
- dev->transport->name);
+
return 0;
}
sense_reason_t
-target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
+target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp)
{
- struct se_device *dev = cmd->se_dev;
sense_reason_t ret;
/*
@@ -1102,7 +1513,8 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
pr_err("Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
- return TCM_INVALID_CDB_FIELD;
+ ret = TCM_INVALID_CDB_FIELD;
+ goto err;
}
/*
* If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
@@ -1110,42 +1522,47 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
* setup the pointer from __t_task_cdb to t_task_cdb.
*/
if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
- cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
- GFP_KERNEL);
+ cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp);
if (!cmd->t_task_cdb) {
pr_err("Unable to allocate cmd->t_task_cdb"
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
(unsigned long)sizeof(cmd->__t_task_cdb));
- return TCM_OUT_OF_RESOURCES;
+ ret = TCM_OUT_OF_RESOURCES;
+ goto err;
}
- } else
- cmd->t_task_cdb = &cmd->__t_task_cdb[0];
+ }
/*
* Copy the original CDB into cmd->
*/
memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
trace_target_sequencer_start(cmd);
+ return 0;
+err:
/*
- * Check for an existing UNIT ATTENTION condition
+ * Copy the CDB here to allow trace_target_cmd_complete() to
+ * print the cdb to the trace buffers.
*/
- ret = target_scsi3_ua_check(cmd);
- if (ret)
- return ret;
-
- ret = target_alua_state_check(cmd);
- if (ret)
- return ret;
+ memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb),
+ (unsigned int)TCM_MAX_COMMAND_SIZE));
+ return ret;
+}
+EXPORT_SYMBOL(target_cmd_init_cdb);
- ret = target_check_reservation(cmd);
- if (ret) {
- cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
- return ret;
- }
+sense_reason_t
+target_cmd_parse_cdb(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ sense_reason_t ret;
ret = dev->transport->parse_cdb(cmd);
+ if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
+ pr_debug_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
+ cmd->se_tfo->fabric_name,
+ cmd->se_sess->se_node_acl->initiatorname,
+ cmd->t_task_cdb[0]);
if (ret)
return ret;
@@ -1154,35 +1571,55 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
return ret;
cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
-
- spin_lock(&cmd->se_lun->lun_sep_lock);
- if (cmd->se_lun->lun_sep)
- cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
- spin_unlock(&cmd->se_lun->lun_sep_lock);
+ /*
+ * If this is the xcopy_lun then we won't have lun_stats since we
+ * can't export them.
+ */
+ if (cmd->se_lun->lun_stats)
+ this_cpu_inc(cmd->se_lun->lun_stats->cmd_pdus);
return 0;
}
-EXPORT_SYMBOL(target_setup_cmd_from_cdb);
+EXPORT_SYMBOL(target_cmd_parse_cdb);
-/*
- * Used by fabric module frontends to queue tasks directly.
- * Many only be used from process context only
- */
-int transport_handle_cdb_direct(
- struct se_cmd *cmd)
+static int __target_submit(struct se_cmd *cmd)
{
sense_reason_t ret;
+ might_sleep();
+
+ /*
+ * Check if we need to delay processing because of ALUA
+ * Active/NonOptimized primary access state..
+ */
+ core_alua_check_nonop_delay(cmd);
+
+ if (cmd->t_data_nents != 0) {
+ /*
+ * This is primarily a hack for udev and tcm loop which sends
+ * INQUIRYs with a single page and expects the data to be
+ * cleared.
+ */
+ if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
+ cmd->data_direction == DMA_FROM_DEVICE) {
+ struct scatterlist *sgl = cmd->t_data_sg;
+ unsigned char *buf = NULL;
+
+ BUG_ON(!sgl);
+
+ buf = kmap_local_page(sg_page(sgl));
+ if (buf) {
+ memset(buf + sgl->offset, 0, sgl->length);
+ kunmap_local(buf);
+ }
+ }
+ }
+
if (!cmd->se_lun) {
dump_stack();
pr_err("cmd->se_lun is NULL\n");
return -EINVAL;
}
- if (in_interrupt()) {
- dump_stack();
- pr_err("transport_generic_handle_cdb cannot be called"
- " from interrupt context\n");
- return -EINVAL;
- }
+
/*
* Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
* outstanding descriptors are handled correctly during shutdown via
@@ -1204,9 +1641,8 @@ int transport_handle_cdb_direct(
transport_generic_request_failure(cmd, ret);
return 0;
}
-EXPORT_SYMBOL(transport_handle_cdb_direct);
-static sense_reason_t
+sense_reason_t
transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
{
@@ -1226,92 +1662,124 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
cmd->t_data_sg = sgl;
cmd->t_data_nents = sgl_count;
+ cmd->t_bidi_data_sg = sgl_bidi;
+ cmd->t_bidi_data_nents = sgl_bidi_count;
- if (sgl_bidi && sgl_bidi_count) {
- cmd->t_bidi_data_sg = sgl_bidi;
- cmd->t_bidi_data_nents = sgl_bidi_count;
- }
cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
return 0;
}
-/*
- * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
- * se_cmd + use pre-allocated SGL memory.
- *
- * @se_cmd: command descriptor to submit
+/**
+ * target_init_cmd - initialize se_cmd
+ * @se_cmd: command descriptor to init
* @se_sess: associated se_sess for endpoint
- * @cdb: pointer to SCSI CDB
* @sense: pointer to SCSI sense buffer
* @unpacked_lun: unpacked LUN to reference for struct se_lun
* @data_length: fabric expected data transfer length
- * @task_addr: SAM task attribute
+ * @task_attr: SAM task attribute
* @data_dir: DMA data direction
* @flags: flags for command submission from target_sc_flags_tables
- * @sgl: struct scatterlist memory for unidirectional mapping
- * @sgl_count: scatterlist count for unidirectional mapping
- * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
- * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
*
- * Returns non zero to signal active I/O shutdown failure. All other
- * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
- * but still return zero here.
+ * Task tags are supported if the caller has set @se_cmd->tag.
*
- * This may only be called from process context, and also currently
- * assumes internal allocation of fabric payload buffer by target-core.
+ * Returns:
+ * - less than zero to signal active I/O shutdown failure.
+ * - zero on success.
+ *
+ * If the fabric driver calls target_stop_session, then it must check the
+ * return code and handle failures. This will never fail for other drivers,
+ * and the return code can be ignored.
*/
-int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
- unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
- u32 data_length, int task_attr, int data_dir, int flags,
- struct scatterlist *sgl, u32 sgl_count,
- struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
+int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+ unsigned char *sense, u64 unpacked_lun,
+ u32 data_length, int task_attr, int data_dir, int flags)
{
struct se_portal_group *se_tpg;
- sense_reason_t rc;
- int ret;
se_tpg = se_sess->se_tpg;
BUG_ON(!se_tpg);
BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
- BUG_ON(in_interrupt());
+
+ if (flags & TARGET_SCF_USE_CPUID)
+ se_cmd->se_cmd_flags |= SCF_USE_CPUID;
+ /*
+ * Signal bidirectional data payloads to target-core
+ */
+ if (flags & TARGET_SCF_BIDI_OP)
+ se_cmd->se_cmd_flags |= SCF_BIDI;
+
+ if (flags & TARGET_SCF_UNKNOWN_SIZE)
+ se_cmd->unknown_data_length = 1;
/*
* Initialize se_cmd for target operation. From this point
* exceptions are handled by sending exception status via
* target_core_fabric_ops->queue_status() callback
*/
- transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
- data_length, data_dir, task_attr, sense);
- if (flags & TARGET_SCF_UNKNOWN_SIZE)
- se_cmd->unknown_data_length = 1;
+ __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length,
+ data_dir, task_attr, sense, unpacked_lun,
+ se_sess->cmd_cnt);
+
/*
- * Obtain struct se_cmd->cmd_kref reference and add new cmd to
- * se_sess->sess_cmd_list. A second kref_get here is necessary
- * for fabrics using TARGET_SCF_ACK_KREF that expect a second
+ * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is
+ * necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second
* kref_put() to happen during fabric packet acknowledgement.
*/
- ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
- if (ret)
- return ret;
+ return target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
+}
+EXPORT_SYMBOL_GPL(target_init_cmd);
+
+/**
+ * target_submit_prep - prepare cmd for submission
+ * @se_cmd: command descriptor to prep
+ * @cdb: pointer to SCSI CDB
+ * @sgl: struct scatterlist memory for unidirectional mapping
+ * @sgl_count: scatterlist count for unidirectional mapping
+ * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
+ * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
+ * @sgl_prot: struct scatterlist memory protection information
+ * @sgl_prot_count: scatterlist count for protection information
+ * @gfp: gfp allocation type
+ *
+ * Returns:
+ * - less than zero to signal failure.
+ * - zero on success.
+ *
+ * If failure is returned, lio will the callers queue_status to complete
+ * the cmd.
+ */
+int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb,
+ struct scatterlist *sgl, u32 sgl_count,
+ struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
+ struct scatterlist *sgl_prot, u32 sgl_prot_count,
+ gfp_t gfp)
+{
+ sense_reason_t rc;
+
+ rc = target_cmd_init_cdb(se_cmd, cdb, gfp);
+ if (rc)
+ goto send_cc_direct;
+
/*
- * Signal bidirectional data payloads to target-core
+ * Locate se_lun pointer and attach it to struct se_cmd
*/
- if (flags & TARGET_SCF_BIDI_OP)
- se_cmd->se_cmd_flags |= SCF_BIDI;
+ rc = transport_lookup_cmd_lun(se_cmd);
+ if (rc)
+ goto send_cc_direct;
+
+ rc = target_cmd_parse_cdb(se_cmd);
+ if (rc != 0)
+ goto generic_fail;
+
/*
- * Locate se_lun pointer and attach it to struct se_cmd
+ * Save pointers for SGLs containing protection information,
+ * if present.
*/
- rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
- if (rc) {
- transport_send_check_condition_and_sense(se_cmd, rc, 0);
- target_put_sess_cmd(se_sess, se_cmd);
- return 0;
+ if (sgl_prot_count) {
+ se_cmd->t_prot_sg = sgl_prot;
+ se_cmd->t_prot_nents = sgl_prot_count;
+ se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
}
- rc = target_setup_cmd_from_cdb(se_cmd, cdb);
- if (rc != 0) {
- transport_generic_request_failure(se_cmd, rc);
- return 0;
- }
/*
* When a non zero sgl_count has been passed perform SGL passthrough
* mapping for pre-allocated fabric memory instead of having target
@@ -1320,46 +1788,26 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
if (sgl_count != 0) {
BUG_ON(!sgl);
- /*
- * A work-around for tcm_loop as some userspace code via
- * scsi-generic do not memset their associated read buffers,
- * so go ahead and do that here for type non-data CDBs. Also
- * note that this is currently guaranteed to be a single SGL
- * for this case by target core in target_setup_cmd_from_cdb()
- * -> transport_generic_cmd_sequencer().
- */
- if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
- se_cmd->data_direction == DMA_FROM_DEVICE) {
- unsigned char *buf = NULL;
-
- if (sgl)
- buf = kmap(sg_page(sgl)) + sgl->offset;
-
- if (buf) {
- memset(buf, 0, sgl->length);
- kunmap(sg_page(sgl));
- }
- }
-
rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
sgl_bidi, sgl_bidi_count);
- if (rc != 0) {
- transport_generic_request_failure(se_cmd, rc);
- return 0;
- }
+ if (rc != 0)
+ goto generic_fail;
}
- /*
- * Check if we need to delay processing because of ALUA
- * Active/NonOptimized primary access state..
- */
- core_alua_check_nonop_delay(se_cmd);
- transport_handle_cdb_direct(se_cmd);
return 0;
+
+send_cc_direct:
+ transport_send_check_condition_and_sense(se_cmd, rc, 0);
+ target_put_sess_cmd(se_cmd);
+ return -EIO;
+
+generic_fail:
+ transport_generic_request_failure(se_cmd, rc);
+ return -EIO;
}
-EXPORT_SYMBOL(target_submit_cmd_map_sgls);
+EXPORT_SYMBOL_GPL(target_submit_prep);
-/*
+/**
* target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
*
* @se_cmd: command descriptor to submit
@@ -1368,29 +1816,143 @@ EXPORT_SYMBOL(target_submit_cmd_map_sgls);
* @sense: pointer to SCSI sense buffer
* @unpacked_lun: unpacked LUN to reference for struct se_lun
* @data_length: fabric expected data transfer length
- * @task_addr: SAM task attribute
+ * @task_attr: SAM task attribute
* @data_dir: DMA data direction
* @flags: flags for command submission from target_sc_flags_tables
*
- * Returns non zero to signal active I/O shutdown failure. All other
- * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
- * but still return zero here.
+ * Task tags are supported if the caller has set @se_cmd->tag.
*
* This may only be called from process context, and also currently
* assumes internal allocation of fabric payload buffer by target-core.
*
* It also assumes interal target core SGL memory allocation.
+ *
+ * This function must only be used by drivers that do their own
+ * sync during shutdown and does not use target_stop_session. If there
+ * is a failure this function will call into the fabric driver's
+ * queue_status with a CHECK_CONDITION.
*/
-int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
- unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
+void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+ unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
u32 data_length, int task_attr, int data_dir, int flags)
{
- return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
- unpacked_lun, data_length, task_attr, data_dir,
- flags, NULL, 0, NULL, 0);
+ int rc;
+
+ rc = target_init_cmd(se_cmd, se_sess, sense, unpacked_lun, data_length,
+ task_attr, data_dir, flags);
+ WARN(rc, "Invalid target_submit_cmd use. Driver must not use target_stop_session or call target_init_cmd directly.\n");
+ if (rc)
+ return;
+
+ if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0,
+ GFP_KERNEL))
+ return;
+
+ target_submit(se_cmd);
}
EXPORT_SYMBOL(target_submit_cmd);
+
+static struct se_dev_plug *target_plug_device(struct se_device *se_dev)
+{
+ struct se_dev_plug *se_plug;
+
+ if (!se_dev->transport->plug_device)
+ return NULL;
+
+ se_plug = se_dev->transport->plug_device(se_dev);
+ if (!se_plug)
+ return NULL;
+
+ se_plug->se_dev = se_dev;
+ /*
+ * We have a ref to the lun at this point, but the cmds could
+ * complete before we unplug, so grab a ref to the se_device so we
+ * can call back into the backend.
+ */
+ config_group_get(&se_dev->dev_group);
+ return se_plug;
+}
+
+static void target_unplug_device(struct se_dev_plug *se_plug)
+{
+ struct se_device *se_dev = se_plug->se_dev;
+
+ se_dev->transport->unplug_device(se_plug);
+ config_group_put(&se_dev->dev_group);
+}
+
+void target_queued_submit_work(struct work_struct *work)
+{
+ struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work);
+ struct se_cmd *se_cmd, *next_cmd;
+ struct se_dev_plug *se_plug = NULL;
+ struct se_device *se_dev = NULL;
+ struct llist_node *cmd_list;
+
+ cmd_list = llist_del_all(&sq->cmd_list);
+ if (!cmd_list)
+ /* Previous call took what we were queued to submit */
+ return;
+
+ cmd_list = llist_reverse_order(cmd_list);
+ llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list) {
+ if (!se_dev) {
+ se_dev = se_cmd->se_dev;
+ se_plug = target_plug_device(se_dev);
+ }
+
+ __target_submit(se_cmd);
+ }
+
+ if (se_plug)
+ target_unplug_device(se_plug);
+}
+
+/**
+ * target_queue_submission - queue the cmd to run on the LIO workqueue
+ * @se_cmd: command descriptor to submit
+ */
+static void target_queue_submission(struct se_cmd *se_cmd)
+{
+ struct se_device *se_dev = se_cmd->se_dev;
+ int cpu = se_cmd->cpuid;
+ struct se_cmd_queue *sq;
+
+ sq = &se_dev->queues[cpu].sq;
+ llist_add(&se_cmd->se_cmd_list, &sq->cmd_list);
+ queue_work_on(cpu, target_submission_wq, &sq->work);
+}
+
+/**
+ * target_submit - perform final initialization and submit cmd to LIO core
+ * @se_cmd: command descriptor to submit
+ *
+ * target_submit_prep or something similar must have been called on the cmd,
+ * and this must be called from process context.
+ */
+int target_submit(struct se_cmd *se_cmd)
+{
+ const struct target_core_fabric_ops *tfo = se_cmd->se_sess->se_tpg->se_tpg_tfo;
+ struct se_dev_attrib *da = &se_cmd->se_dev->dev_attrib;
+ u8 submit_type;
+
+ if (da->submit_type == TARGET_FABRIC_DEFAULT_SUBMIT)
+ submit_type = tfo->default_submit_type;
+ else if (da->submit_type == TARGET_DIRECT_SUBMIT &&
+ tfo->direct_submit_supp)
+ submit_type = TARGET_DIRECT_SUBMIT;
+ else
+ submit_type = TARGET_QUEUE_SUBMIT;
+
+ if (submit_type == TARGET_DIRECT_SUBMIT)
+ return __target_submit(se_cmd);
+
+ target_queue_submission(se_cmd);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(target_submit);
+
static void target_complete_tmr_failure(struct work_struct *work)
{
struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
@@ -1398,6 +1960,7 @@ static void target_complete_tmr_failure(struct work_struct *work)
se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
se_cmd->se_tfo->queue_tm_rsp(se_cmd);
+ transport_lun_remove_cmd(se_cmd);
transport_cmd_check_stop_to_fabric(se_cmd);
}
@@ -1409,7 +1972,7 @@ static void target_complete_tmr_failure(struct work_struct *work)
* @se_sess: associated se_sess for endpoint
* @sense: pointer to SCSI sense buffer
* @unpacked_lun: unpacked LUN to reference for struct se_lun
- * @fabric_context: fabric context for TMR req
+ * @fabric_tmr_ptr: fabric context for TMR req
* @tm_type: Type of TM request
* @gfp: gfp type for caller
* @tag: referenced task tag for TMR_ABORT_TASK
@@ -1419,9 +1982,9 @@ static void target_complete_tmr_failure(struct work_struct *work)
**/
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
- unsigned char *sense, u32 unpacked_lun,
+ unsigned char *sense, u64 unpacked_lun,
void *fabric_tmr_ptr, unsigned char tm_type,
- gfp_t gfp, unsigned int tag, int flags)
+ gfp_t gfp, u64 tag, int flags)
{
struct se_portal_group *se_tpg;
int ret;
@@ -1429,8 +1992,9 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
se_tpg = se_sess->se_tpg;
BUG_ON(!se_tpg);
- transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
- 0, DMA_NONE, MSG_SIMPLE_TAG, sense);
+ __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+ 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun,
+ se_sess->cmd_cnt);
/*
* FIXME: Currently expect caller to handle se_cmd->se_tmr_req
* allocation failure.
@@ -1443,51 +2007,29 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
se_cmd->se_tmr_req->ref_task_tag = tag;
/* See target_submit_cmd for commentary */
- ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+ ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
if (ret) {
core_tmr_release_req(se_cmd->se_tmr_req);
return ret;
}
- ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
- if (ret) {
- /*
- * For callback during failure handling, push this work off
- * to process context with TMR_LUN_DOES_NOT_EXIST status.
- */
- INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
- schedule_work(&se_cmd->work);
- return 0;
- }
+ ret = transport_lookup_tmr_lun(se_cmd);
+ if (ret)
+ goto failure;
+
transport_generic_handle_tmr(se_cmd);
return 0;
-}
-EXPORT_SYMBOL(target_submit_tmr);
-
-/*
- * If the cmd is active, request it to be stopped and sleep until it
- * has completed.
- */
-bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
-{
- bool was_active = false;
-
- if (cmd->transport_state & CMD_T_BUSY) {
- cmd->transport_state |= CMD_T_REQUEST_STOP;
- spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
-
- pr_debug("cmd %p waiting to complete\n", cmd);
- wait_for_completion(&cmd->task_stop_comp);
- pr_debug("cmd %p stopped successfully\n", cmd);
- spin_lock_irqsave(&cmd->t_state_lock, *flags);
- cmd->transport_state &= ~CMD_T_REQUEST_STOP;
- cmd->transport_state &= ~CMD_T_BUSY;
- was_active = true;
- }
-
- return was_active;
+ /*
+ * For callback during failure handling, push this work off
+ * to process context with TMR_LUN_DOES_NOT_EXIST status.
+ */
+failure:
+ INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
+ schedule_work(&se_cmd->work);
+ return 0;
}
+EXPORT_SYMBOL(target_submit_tmr);
/*
* Handle SAM-esque emulation for generic transport request failures.
@@ -1495,24 +2037,26 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
void transport_generic_request_failure(struct se_cmd *cmd,
sense_reason_t sense_reason)
{
- int ret = 0;
+ int ret = 0, post_ret;
- pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
- " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
- cmd->t_task_cdb[0]);
- pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
- cmd->se_tfo->get_cmd_state(cmd),
- cmd->t_state, sense_reason);
- pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
- (cmd->transport_state & CMD_T_ACTIVE) != 0,
- (cmd->transport_state & CMD_T_STOP) != 0,
- (cmd->transport_state & CMD_T_SENT) != 0);
+ pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
+ sense_reason);
+ target_show_cmd("-----[ ", cmd);
/*
* For SAM Task Attribute emulation for failed struct se_cmd
*/
transport_complete_task_attr(cmd);
+ if (cmd->transport_complete_callback)
+ cmd->transport_complete_callback(cmd, false, &post_ret);
+
+ if (cmd->transport_state & CMD_T_ABORTED) {
+ INIT_WORK(&cmd->work, target_abort_work);
+ queue_work(target_completion_wq, &cmd->work);
+ return;
+ }
+
switch (sense_reason) {
case TCM_NON_EXISTENT_LUN:
case TCM_UNSUPPORTED_SCSI_OPCODE:
@@ -1525,11 +2069,26 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_ADDRESS_OUT_OF_RANGE:
case TCM_CHECK_CONDITION_ABORT_CMD:
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
- case TCM_CHECK_CONDITION_NOT_READY:
+ case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
+ case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
+ case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
+ case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
+ case TCM_TOO_MANY_TARGET_DESCS:
+ case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
+ case TCM_TOO_MANY_SEGMENT_DESCS:
+ case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
+ case TCM_INVALID_FIELD_IN_COMMAND_IU:
+ case TCM_ALUA_TG_PT_STANDBY:
+ case TCM_ALUA_TG_PT_UNAVAILABLE:
+ case TCM_ALUA_STATE_TRANSITION:
+ case TCM_ALUA_OFFLINE:
break;
case TCM_OUT_OF_RESOURCES:
- sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- break;
+ cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+ goto queue_status;
+ case TCM_LUN_BUSY:
+ cmd->scsi_status = SAM_STAT_BUSY;
+ goto queue_status;
case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
@@ -1546,16 +2105,14 @@ void transport_generic_request_failure(struct se_cmd *cmd,
* See spc4r17, section 7.4.6 Control Mode Page, Table 349
*/
if (cmd->se_sess &&
- cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2)
- core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
- cmd->orig_fe_lun, 0x2C,
- ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
+ cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl
+ == TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
+ target_ua_allocate_lun(cmd->se_sess->se_node_acl,
+ cmd->orig_fe_lun, 0x2C,
+ ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
+ }
- trace_target_cmd_complete(cmd);
- ret = cmd->se_tfo-> queue_status(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM)
- goto queue_full;
- goto check_stop;
+ goto queue_status;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
cmd->t_task_cdb[0], sense_reason);
@@ -1564,142 +2121,185 @@ void transport_generic_request_failure(struct se_cmd *cmd,
}
ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
check_stop:
transport_lun_remove_cmd(cmd);
- if (!transport_cmd_check_stop_to_fabric(cmd))
- ;
+ transport_cmd_check_stop_to_fabric(cmd);
return;
+queue_status:
+ trace_target_cmd_complete(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (!ret)
+ goto check_stop;
queue_full:
- cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
- transport_handle_queue_full(cmd, cmd->se_dev);
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
}
EXPORT_SYMBOL(transport_generic_request_failure);
-static void __target_execute_cmd(struct se_cmd *cmd)
+void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
{
sense_reason_t ret;
- if (cmd->execute_cmd) {
- ret = cmd->execute_cmd(cmd);
+ if (!cmd->execute_cmd) {
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto err;
+ }
+ if (do_checks) {
+ /*
+ * Check for an existing UNIT ATTENTION condition after
+ * target_handle_task_attr() has done SAM task attr
+ * checking, and possibly have already defered execution
+ * out to target_restart_delayed_cmds() context.
+ */
+ ret = target_scsi3_ua_check(cmd);
+ if (ret)
+ goto err;
+
+ ret = target_alua_state_check(cmd);
+ if (ret)
+ goto err;
+
+ ret = target_check_reservation(cmd);
if (ret) {
+ cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+ goto err;
+ }
+ }
+
+ ret = cmd->execute_cmd(cmd);
+ if (!ret)
+ return;
+err:
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->transport_state &= ~CMD_T_SENT;
+ spin_unlock_irq(&cmd->t_state_lock);
+
+ transport_generic_request_failure(cmd, ret);
+}
+
+static int target_write_prot_action(struct se_cmd *cmd)
+{
+ u32 sectors;
+ /*
+ * Perform WRITE_INSERT of PI using software emulation when backend
+ * device has PI enabled, if the transport has not already generated
+ * PI using hardware WRITE_INSERT offload.
+ */
+ switch (cmd->prot_op) {
+ case TARGET_PROT_DOUT_INSERT:
+ if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
+ sbc_dif_generate(cmd);
+ break;
+ case TARGET_PROT_DOUT_STRIP:
+ if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
+ break;
+
+ sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
+ cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
+ sectors, 0, cmd->t_prot_sg, 0);
+ if (unlikely(cmd->pi_err)) {
spin_lock_irq(&cmd->t_state_lock);
- cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+ cmd->transport_state &= ~CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
-
- transport_generic_request_failure(cmd, ret);
+ transport_generic_request_failure(cmd, cmd->pi_err);
+ return -1;
}
+ break;
+ default:
+ break;
}
+
+ return 0;
}
static bool target_handle_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
+ unsigned long flags;
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return false;
+ cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
+
/*
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
switch (cmd->sam_task_attr) {
- case MSG_HEAD_TAG:
- pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
- "se_ordered_id: %u\n",
- cmd->t_task_cdb[0], cmd->se_ordered_id);
+ case TCM_HEAD_TAG:
+ pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
+ cmd->t_task_cdb[0]);
return false;
- case MSG_ORDERED_TAG:
- atomic_inc(&dev->dev_ordered_sync);
- smp_mb__after_atomic_inc();
-
- pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
- " se_ordered_id: %u\n",
- cmd->t_task_cdb[0], cmd->se_ordered_id);
-
- /*
- * Execute an ORDERED command if no other older commands
- * exist that need to be completed first.
- */
- if (!atomic_read(&dev->simple_cmds))
- return false;
+ case TCM_ORDERED_TAG:
+ pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
+ cmd->t_task_cdb[0]);
break;
default:
/*
* For SIMPLE and UNTAGGED Task Attribute commands
*/
- atomic_inc(&dev->simple_cmds);
- smp_mb__after_atomic_inc();
+retry:
+ if (percpu_ref_tryget_live(&dev->non_ordered))
+ return false;
+
break;
}
- if (atomic_read(&dev->dev_ordered_sync) == 0)
- return false;
+ spin_lock_irqsave(&dev->delayed_cmd_lock, flags);
+ if (cmd->sam_task_attr == TCM_SIMPLE_TAG &&
+ !percpu_ref_is_dying(&dev->non_ordered)) {
+ spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags);
+ /* We raced with the last ordered completion so retry. */
+ goto retry;
+ } else if (!percpu_ref_is_dying(&dev->non_ordered)) {
+ percpu_ref_kill(&dev->non_ordered);
+ }
+
+ spin_lock(&cmd->t_state_lock);
+ cmd->transport_state &= ~CMD_T_SENT;
+ spin_unlock(&cmd->t_state_lock);
- spin_lock(&dev->delayed_cmd_lock);
list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
- spin_unlock(&dev->delayed_cmd_lock);
+ spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags);
- pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
- " delayed CMD list, se_ordered_id: %u\n",
- cmd->t_task_cdb[0], cmd->sam_task_attr,
- cmd->se_ordered_id);
+ pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
+ cmd->t_task_cdb[0], cmd->sam_task_attr);
+ /*
+ * We may have no non ordered cmds when this function started or we
+ * could have raced with the last simple/head cmd completing, so kick
+ * the delayed handler here.
+ */
+ schedule_work(&dev->delayed_cmd_work);
return true;
}
void target_execute_cmd(struct se_cmd *cmd)
{
/*
- * If the received CDB has aleady been aborted stop processing it here.
- */
- if (transport_check_aborted_status(cmd, 1)) {
- complete(&cmd->transport_lun_stop_comp);
- return;
- }
-
- /*
- * Determine if IOCTL context caller in requesting the stopping of this
- * command for LUN shutdown purposes.
- */
- spin_lock_irq(&cmd->t_state_lock);
- if (cmd->transport_state & CMD_T_LUN_STOP) {
- pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
- __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
-
- cmd->transport_state &= ~CMD_T_ACTIVE;
- spin_unlock_irq(&cmd->t_state_lock);
- complete(&cmd->transport_lun_stop_comp);
- return;
- }
- /*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
+ *
+ * If the received CDB has already been aborted stop processing it here.
*/
- if (cmd->transport_state & CMD_T_STOP) {
- pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
- __func__, __LINE__,
- cmd->se_tfo->get_task_tag(cmd));
-
- spin_unlock_irq(&cmd->t_state_lock);
- complete(&cmd->t_transport_stop_comp);
+ if (target_cmd_interrupted(cmd))
return;
- }
+ spin_lock_irq(&cmd->t_state_lock);
cmd->t_state = TRANSPORT_PROCESSING;
- cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
+ cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
- if (target_handle_task_attr(cmd)) {
- spin_lock_irq(&cmd->t_state_lock);
- cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT;
- spin_unlock_irq(&cmd->t_state_lock);
+ if (target_write_prot_action(cmd))
+ return;
+
+ if (target_handle_task_attr(cmd))
return;
- }
- __target_execute_cmd(cmd);
+ __target_execute_cmd(cmd, true);
}
EXPORT_SYMBOL(target_execute_cmd);
@@ -1707,27 +2307,59 @@ EXPORT_SYMBOL(target_execute_cmd);
* Process all commands up to the last received ORDERED task attribute which
* requires another blocking boundary
*/
-static void target_restart_delayed_cmds(struct se_device *dev)
+void target_do_delayed_work(struct work_struct *work)
{
- for (;;) {
+ struct se_device *dev = container_of(work, struct se_device,
+ delayed_cmd_work);
+
+ spin_lock(&dev->delayed_cmd_lock);
+ while (!dev->ordered_sync_in_progress) {
struct se_cmd *cmd;
- spin_lock(&dev->delayed_cmd_lock);
- if (list_empty(&dev->delayed_cmd_list)) {
- spin_unlock(&dev->delayed_cmd_lock);
+ /*
+ * We can be woken up early/late due to races or the
+ * extra wake up we do when adding commands to the list.
+ * We check for both cases here.
+ */
+ if (list_empty(&dev->delayed_cmd_list) ||
+ !percpu_ref_is_zero(&dev->non_ordered))
break;
- }
cmd = list_entry(dev->delayed_cmd_list.next,
struct se_cmd, se_delayed_node);
+ cmd->se_cmd_flags |= SCF_TASK_ORDERED_SYNC;
+ cmd->transport_state |= CMD_T_SENT;
+
+ dev->ordered_sync_in_progress = true;
+
list_del(&cmd->se_delayed_node);
spin_unlock(&dev->delayed_cmd_lock);
- __target_execute_cmd(cmd);
-
- if (cmd->sam_task_attr == MSG_ORDERED_TAG)
- break;
+ __target_execute_cmd(cmd, true);
+ spin_lock(&dev->delayed_cmd_lock);
}
+ spin_unlock(&dev->delayed_cmd_lock);
+}
+
+static void transport_complete_ordered_sync(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->delayed_cmd_lock, flags);
+ dev->dev_cur_ordered_id++;
+
+ pr_debug("Incremented dev_cur_ordered_id: %u for type %d\n",
+ dev->dev_cur_ordered_id, cmd->sam_task_attr);
+
+ dev->ordered_sync_in_progress = false;
+
+ if (list_empty(&dev->delayed_cmd_list))
+ percpu_ref_resurrect(&dev->non_ordered);
+ else
+ schedule_work(&dev->delayed_cmd_work);
+
+ spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags);
}
/*
@@ -1738,31 +2370,28 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
return;
- if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
- atomic_dec(&dev->simple_cmds);
- smp_mb__after_atomic_dec();
- dev->dev_cur_ordered_id++;
- pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
- " SIMPLE: %u\n", dev->dev_cur_ordered_id,
- cmd->se_ordered_id);
- } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- dev->dev_cur_ordered_id++;
- pr_debug("Incremented dev_cur_ordered_id: %u for"
- " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
- cmd->se_ordered_id);
- } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- atomic_dec(&dev->dev_ordered_sync);
- smp_mb__after_atomic_dec();
+ if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
+ return;
+
+ cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
- dev->dev_cur_ordered_id++;
- pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
- " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
+ if (cmd->se_cmd_flags & SCF_TASK_ORDERED_SYNC) {
+ transport_complete_ordered_sync(cmd);
+ return;
}
- target_restart_delayed_cmds(dev);
+ switch (cmd->sam_task_attr) {
+ case TCM_SIMPLE_TAG:
+ percpu_ref_put(&dev->non_ordered);
+ break;
+ case TCM_ORDERED_TAG:
+ /* All ordered should have been executed as sync */
+ WARN_ON(1);
+ break;
+ }
}
static void transport_complete_qf(struct se_cmd *cmd)
@@ -1770,27 +2399,54 @@ static void transport_complete_qf(struct se_cmd *cmd)
int ret = 0;
transport_complete_task_attr(cmd);
+ /*
+ * If a fabric driver ->write_pending() or ->queue_data_in() callback
+ * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
+ * the same callbacks should not be retried. Return CHECK_CONDITION
+ * if a scsi_status is not already set.
+ *
+ * If a fabric driver ->queue_status() has returned non zero, always
+ * keep retrying no matter what..
+ */
+ if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
+ if (cmd->scsi_status)
+ goto queue_status;
- if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
- trace_target_cmd_complete(cmd);
- ret = cmd->se_tfo->queue_status(cmd);
- if (ret)
- goto out;
+ translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
+ goto queue_status;
}
+ /*
+ * Check if we need to send a sense buffer from
+ * the struct se_cmd in question. We do NOT want
+ * to take this path of the IO has been marked as
+ * needing to be treated like a "normal read". This
+ * is the case if it's a tape read, and either the
+ * FM, EOM, or ILI bits are set, but there is no
+ * sense data.
+ */
+ if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
+ cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
+ goto queue_status;
+
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
+ /* queue status if not treating this as a normal read */
+ if (cmd->scsi_status &&
+ !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
+ goto queue_status;
+
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_data_in(cmd);
break;
case DMA_TO_DEVICE:
- if (cmd->t_bidi_data_sg) {
+ if (cmd->se_cmd_flags & SCF_BIDI) {
ret = cmd->se_tfo->queue_data_in(cmd);
- if (ret < 0)
- break;
+ break;
}
- /* Fall through for DMA_TO_DEVICE */
+ fallthrough;
case DMA_NONE:
+queue_status:
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd);
break;
@@ -1798,28 +2454,69 @@ static void transport_complete_qf(struct se_cmd *cmd)
break;
}
-out:
if (ret < 0) {
- transport_handle_queue_full(cmd, cmd->se_dev);
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
return;
}
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
}
-static void transport_handle_queue_full(
- struct se_cmd *cmd,
- struct se_device *dev)
+static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
+ int err, bool write_pending)
{
+ /*
+ * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
+ * ->queue_data_in() callbacks from new process context.
+ *
+ * Otherwise for other errors, transport_complete_qf() will send
+ * CHECK_CONDITION via ->queue_status() instead of attempting to
+ * retry associated fabric driver data-transfer callbacks.
+ */
+ if (err == -EAGAIN || err == -ENOMEM) {
+ cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
+ TRANSPORT_COMPLETE_QF_OK;
+ } else {
+ pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
+ cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
+ }
+
spin_lock_irq(&dev->qf_cmd_lock);
list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
- atomic_inc(&dev->dev_qf_count);
- smp_mb__after_atomic_inc();
+ atomic_inc_mb(&dev->dev_qf_count);
spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
schedule_work(&cmd->se_dev->qf_work_queue);
}
+static bool target_read_prot_action(struct se_cmd *cmd)
+{
+ switch (cmd->prot_op) {
+ case TARGET_PROT_DIN_STRIP:
+ if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
+ u32 sectors = cmd->data_length >>
+ ilog2(cmd->se_dev->dev_attrib.block_size);
+
+ cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
+ sectors, 0, cmd->t_prot_sg,
+ 0);
+ if (cmd->pi_err)
+ return true;
+ }
+ break;
+ case TARGET_PROT_DIN_INSERT:
+ if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
+ break;
+
+ sbc_dif_generate(cmd);
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
+
static void target_complete_ok_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -1841,13 +2538,19 @@ static void target_complete_ok_work(struct work_struct *work)
/*
* Check if we need to send a sense buffer from
- * the struct se_cmd in question.
+ * the struct se_cmd in question. We do NOT want
+ * to take this path of the IO has been marked as
+ * needing to be treated like a "normal read". This
+ * is the case if it's a tape read, and either the
+ * FM, EOM, or ILI bits are set, but there is no
+ * sense data.
*/
- if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
+ if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
+ cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
WARN_ON(!cmd->scsi_status);
ret = transport_send_check_condition_and_sense(
cmd, 0, 1);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
transport_lun_remove_cmd(cmd);
@@ -1856,52 +2559,95 @@ static void target_complete_ok_work(struct work_struct *work)
}
/*
* Check for a callback, used by amongst other things
- * XDWRITE_READ_10 emulation.
+ * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
*/
- if (cmd->transport_complete_callback)
- cmd->transport_complete_callback(cmd);
+ if (cmd->transport_complete_callback) {
+ sense_reason_t rc;
+ bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
+ bool zero_dl = !(cmd->data_length);
+ int post_ret = 0;
+
+ rc = cmd->transport_complete_callback(cmd, true, &post_ret);
+ if (!rc && !post_ret) {
+ if (caw && zero_dl)
+ goto queue_rsp;
+
+ return;
+ } else if (rc) {
+ ret = transport_send_check_condition_and_sense(cmd,
+ rc, 0);
+ if (ret)
+ goto queue_full;
+
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
+ return;
+ }
+ }
+queue_rsp:
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
- spin_lock(&cmd->se_lun->lun_sep_lock);
- if (cmd->se_lun->lun_sep) {
- cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
- cmd->data_length;
+ /*
+ * if this is a READ-type IO, but SCSI status
+ * is set, then skip returning data and just
+ * return the status -- unless this IO is marked
+ * as needing to be treated as a normal read,
+ * in which case we want to go ahead and return
+ * the data. This happens, for example, for tape
+ * reads with the FM, EOM, or ILI bits set, with
+ * no sense data.
+ */
+ if (cmd->scsi_status &&
+ !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
+ goto queue_status;
+
+ if (cmd->se_lun->lun_stats)
+ this_cpu_add(cmd->se_lun->lun_stats->tx_data_octets,
+ cmd->data_length);
+ /*
+ * Perform READ_STRIP of PI using software emulation when
+ * backend had PI enabled, if the transport will not be
+ * performing hardware READ_STRIP offload.
+ */
+ if (target_read_prot_action(cmd)) {
+ ret = transport_send_check_condition_and_sense(cmd,
+ cmd->pi_err, 0);
+ if (ret)
+ goto queue_full;
+
+ transport_lun_remove_cmd(cmd);
+ transport_cmd_check_stop_to_fabric(cmd);
+ return;
}
- spin_unlock(&cmd->se_lun->lun_sep_lock);
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_data_in(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
break;
case DMA_TO_DEVICE:
- spin_lock(&cmd->se_lun->lun_sep_lock);
- if (cmd->se_lun->lun_sep) {
- cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
- cmd->data_length;
- }
- spin_unlock(&cmd->se_lun->lun_sep_lock);
+ if (cmd->se_lun->lun_stats)
+ this_cpu_add(cmd->se_lun->lun_stats->rx_data_octets,
+ cmd->data_length);
/*
* Check if we need to send READ payload for BIDI-COMMAND
*/
- if (cmd->t_bidi_data_sg) {
- spin_lock(&cmd->se_lun->lun_sep_lock);
- if (cmd->se_lun->lun_sep) {
- cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
- cmd->data_length;
- }
- spin_unlock(&cmd->se_lun->lun_sep_lock);
+ if (cmd->se_cmd_flags & SCF_BIDI) {
+ if (cmd->se_lun->lun_stats)
+ this_cpu_add(cmd->se_lun->lun_stats->tx_data_octets,
+ cmd->data_length);
ret = cmd->se_tfo->queue_data_in(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
break;
}
- /* Fall through for DMA_TO_DEVICE */
+ fallthrough;
case DMA_NONE:
+queue_status:
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
break;
default:
@@ -1915,69 +2661,65 @@ static void target_complete_ok_work(struct work_struct *work)
queue_full:
pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
" data_direction: %d\n", cmd, cmd->data_direction);
- cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
- transport_handle_queue_full(cmd, cmd->se_dev);
+
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
}
-static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
+void target_free_sgl(struct scatterlist *sgl, int nents)
{
- struct scatterlist *sg;
- int count;
+ sgl_free_n_order(sgl, nents, 0);
+}
+EXPORT_SYMBOL(target_free_sgl);
- for_each_sg(sgl, sg, nents, count)
- __free_page(sg_page(sg));
+static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
+{
+ /*
+ * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
+ * emulation, and free + reset pointers if necessary..
+ */
+ if (!cmd->t_data_sg_orig)
+ return;
- kfree(sgl);
+ kfree(cmd->t_data_sg);
+ cmd->t_data_sg = cmd->t_data_sg_orig;
+ cmd->t_data_sg_orig = NULL;
+ cmd->t_data_nents = cmd->t_data_nents_orig;
+ cmd->t_data_nents_orig = 0;
}
static inline void transport_free_pages(struct se_cmd *cmd)
{
- if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
+ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
+ target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
+ cmd->t_prot_sg = NULL;
+ cmd->t_prot_nents = 0;
+ }
+
+ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
+ /*
+ * Release special case READ buffer payload required for
+ * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
+ */
+ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
+ target_free_sgl(cmd->t_bidi_data_sg,
+ cmd->t_bidi_data_nents);
+ cmd->t_bidi_data_sg = NULL;
+ cmd->t_bidi_data_nents = 0;
+ }
+ transport_reset_sgl_orig(cmd);
return;
+ }
+ transport_reset_sgl_orig(cmd);
- transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
+ target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
cmd->t_data_sg = NULL;
cmd->t_data_nents = 0;
- transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
+ target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
cmd->t_bidi_data_sg = NULL;
cmd->t_bidi_data_nents = 0;
}
-/**
- * transport_release_cmd - free a command
- * @cmd: command to free
- *
- * This routine unconditionally frees a command, and reference counting
- * or list removal must be done in the caller.
- */
-static int transport_release_cmd(struct se_cmd *cmd)
-{
- BUG_ON(!cmd->se_tfo);
-
- if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
- core_tmr_release_req(cmd->se_tmr_req);
- if (cmd->t_task_cdb != cmd->__t_task_cdb)
- kfree(cmd->t_task_cdb);
- /*
- * If this cmd has been setup with target_get_sess_cmd(), drop
- * the kref and call ->release_cmd() in kref callback.
- */
- return target_put_sess_cmd(cmd->se_sess, cmd);
-}
-
-/**
- * transport_put_cmd - release a reference to a command
- * @cmd: command to release
- *
- * This routine releases our reference to the command and frees it if possible.
- */
-static int transport_put_cmd(struct se_cmd *cmd)
-{
- transport_free_pages(cmd);
- return transport_release_cmd(cmd);
-}
-
void *transport_kmap_data_sg(struct se_cmd *cmd)
{
struct scatterlist *sg = cmd->t_data_sg;
@@ -1997,7 +2739,7 @@ void *transport_kmap_data_sg(struct se_cmd *cmd)
return kmap(sg_page(sg)) + sg->offset;
/* >1 page. use vmap */
- pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
+ pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL);
if (!pages)
return NULL;
@@ -2029,46 +2771,16 @@ void transport_kunmap_data_sg(struct se_cmd *cmd)
}
EXPORT_SYMBOL(transport_kunmap_data_sg);
-static int
-transport_generic_get_mem(struct se_cmd *cmd)
+int
+target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
+ bool zero_page, bool chainable)
{
- u32 length = cmd->data_length;
- unsigned int nents;
- struct page *page;
- gfp_t zero_flag;
- int i = 0;
-
- nents = DIV_ROUND_UP(length, PAGE_SIZE);
- cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
- if (!cmd->t_data_sg)
- return -ENOMEM;
-
- cmd->t_data_nents = nents;
- sg_init_table(cmd->t_data_sg, nents);
-
- zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_CDB ? 0 : __GFP_ZERO;
-
- while (length) {
- u32 page_len = min_t(u32, length, PAGE_SIZE);
- page = alloc_page(GFP_KERNEL | zero_flag);
- if (!page)
- goto out;
-
- sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
- length -= page_len;
- i++;
- }
- return 0;
+ gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
-out:
- while (i > 0) {
- i--;
- __free_page(sg_page(&cmd->t_data_sg[i]));
- }
- kfree(cmd->t_data_sg);
- cmd->t_data_sg = NULL;
- return -ENOMEM;
+ *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
+ return *sgl ? 0 : -ENOMEM;
}
+EXPORT_SYMBOL(target_alloc_sgl);
/*
* Allocate any required resources to execute the command. For writes we
@@ -2078,16 +2790,59 @@ out:
sense_reason_t
transport_generic_new_cmd(struct se_cmd *cmd)
{
+ unsigned long flags;
int ret = 0;
+ bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
+
+ if (cmd->prot_op != TARGET_PROT_NORMAL &&
+ !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
+ ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
+ cmd->prot_length, true, false);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
/*
- * Determine is the TCM fabric module has already allocated physical
+ * Determine if the TCM fabric module has already allocated physical
* memory, and is directly calling transport_generic_map_mem_to_cmd()
* beforehand.
*/
if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
cmd->data_length) {
- ret = transport_generic_get_mem(cmd);
+
+ if ((cmd->se_cmd_flags & SCF_BIDI) ||
+ (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
+ u32 bidi_length;
+
+ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
+ bidi_length = cmd->t_task_nolb *
+ cmd->se_dev->dev_attrib.block_size;
+ else
+ bidi_length = cmd->data_length;
+
+ ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
+ &cmd->t_bidi_data_nents,
+ bidi_length, zero_flag, false);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
+ ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
+ cmd->data_length, zero_flag, false);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ cmd->data_length) {
+ /*
+ * Special case for COMPARE_AND_WRITE with fabrics
+ * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
+ */
+ u32 caw_length = cmd->t_task_nolb *
+ cmd->se_dev->dev_attrib.block_size;
+
+ ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
+ &cmd->t_bidi_data_nents,
+ caw_length, zero_flag, false);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
@@ -2097,723 +2852,733 @@ transport_generic_new_cmd(struct se_cmd *cmd)
* and let it call back once the write buffers are ready.
*/
target_add_to_state_list(cmd);
- if (cmd->data_direction != DMA_TO_DEVICE) {
+ if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
target_execute_cmd(cmd);
return 0;
}
- transport_cmd_check_stop(cmd, false, true);
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ cmd->t_state = TRANSPORT_WRITE_PENDING;
+ /*
+ * Determine if frontend context caller is requesting the stopping of
+ * this command for frontend exceptions.
+ */
+ if (cmd->transport_state & CMD_T_STOP &&
+ !cmd->se_tfo->write_pending_must_be_called) {
+ pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
+ __func__, __LINE__, cmd->tag);
+
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ complete_all(&cmd->t_transport_stop_comp);
+ return 0;
+ }
+ cmd->transport_state &= ~CMD_T_ACTIVE;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
ret = cmd->se_tfo->write_pending(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM)
+ if (ret)
goto queue_full;
- /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
- WARN_ON(ret);
-
- return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return 0;
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
- cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
- transport_handle_queue_full(cmd, cmd->se_dev);
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
return 0;
}
EXPORT_SYMBOL(transport_generic_new_cmd);
static void transport_write_pending_qf(struct se_cmd *cmd)
{
+ unsigned long flags;
int ret;
+ bool stop;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ if (stop) {
+ pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
+ __func__, __LINE__, cmd->tag);
+ complete_all(&cmd->t_transport_stop_comp);
+ return;
+ }
ret = cmd->se_tfo->write_pending(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM) {
+ if (ret) {
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
cmd);
- transport_handle_queue_full(cmd, cmd->se_dev);
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
}
}
+static bool
+__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
+ unsigned long *flags);
+
+static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+}
+
+/*
+ * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has
+ * finished.
+ */
+void target_put_cmd_and_wait(struct se_cmd *cmd)
+{
+ DECLARE_COMPLETION_ONSTACK(compl);
+
+ WARN_ON_ONCE(cmd->abrt_compl);
+ cmd->abrt_compl = &compl;
+ target_put_sess_cmd(cmd);
+ wait_for_completion(&compl);
+}
+
+/*
+ * This function is called by frontend drivers after processing of a command
+ * has finished.
+ *
+ * The protocol for ensuring that either the regular frontend command
+ * processing flow or target_handle_abort() code drops one reference is as
+ * follows:
+ * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
+ * the frontend driver to call this function synchronously or asynchronously.
+ * That will cause one reference to be dropped.
+ * - During regular command processing the target core sets CMD_T_COMPLETE
+ * before invoking one of the .queue_*() functions.
+ * - The code that aborts commands skips commands and TMFs for which
+ * CMD_T_COMPLETE has been set.
+ * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for
+ * commands that will be aborted.
+ * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set
+ * transport_generic_free_cmd() skips its call to target_put_sess_cmd().
+ * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
+ * be called and will drop a reference.
+ * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
+ * will be called. target_handle_abort() will drop the final reference.
+ */
int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
+ DECLARE_COMPLETION_ONSTACK(compl);
int ret = 0;
+ bool aborted = false, tas = false;
- if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
- if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
- transport_wait_for_tasks(cmd);
+ if (wait_for_tasks)
+ target_wait_free_cmd(cmd, &aborted, &tas);
- ret = transport_release_cmd(cmd);
- } else {
- if (wait_for_tasks)
- transport_wait_for_tasks(cmd);
+ if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) {
+ /*
+ * Handle WRITE failure case where transport_generic_new_cmd()
+ * has already added se_cmd to state_list, but fabric has
+ * failed command before I/O submission.
+ */
+ if (cmd->state_active)
+ target_remove_from_state_list(cmd);
if (cmd->se_lun)
transport_lun_remove_cmd(cmd);
-
- ret = transport_put_cmd(cmd);
+ }
+ if (aborted)
+ cmd->free_compl = &compl;
+ ret = target_put_sess_cmd(cmd);
+ if (aborted) {
+ pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
+ wait_for_completion(&compl);
+ ret = 1;
}
return ret;
}
EXPORT_SYMBOL(transport_generic_free_cmd);
-/* target_get_sess_cmd - Add command to active ->sess_cmd_list
- * @se_sess: session to reference
+/**
+ * target_get_sess_cmd - Verify the session is accepting cmds and take ref
* @se_cmd: command descriptor to add
* @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
*/
-int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
- bool ack_kref)
+int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
{
- unsigned long flags;
int ret = 0;
- kref_init(&se_cmd->cmd_kref);
/*
* Add a second kref if the fabric caller is expecting to handle
* fabric acknowledgement that requires two target_put_sess_cmd()
* invocations before se_cmd descriptor release.
*/
- if (ack_kref == true) {
+ if (ack_kref) {
kref_get(&se_cmd->cmd_kref);
se_cmd->se_cmd_flags |= SCF_ACK_KREF;
}
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- if (se_sess->sess_tearing_down) {
- ret = -ESHUTDOWN;
- goto out;
+ /*
+ * Users like xcopy do not use counters since they never do a stop
+ * and wait.
+ */
+ if (se_cmd->cmd_cnt) {
+ if (!percpu_ref_tryget_live(&se_cmd->cmd_cnt->refcnt))
+ ret = -ESHUTDOWN;
}
- list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
-out:
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ if (ret && ack_kref)
+ target_put_sess_cmd(se_cmd);
+
return ret;
}
EXPORT_SYMBOL(target_get_sess_cmd);
+static void target_free_cmd_mem(struct se_cmd *cmd)
+{
+ transport_free_pages(cmd);
+
+ if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+ core_tmr_release_req(cmd->se_tmr_req);
+ if (cmd->t_task_cdb != cmd->__t_task_cdb)
+ kfree(cmd->t_task_cdb);
+}
+
static void target_release_cmd_kref(struct kref *kref)
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
- struct se_session *se_sess = se_cmd->se_sess;
-
- if (list_empty(&se_cmd->se_cmd_list)) {
- spin_unlock(&se_sess->sess_cmd_lock);
- se_cmd->se_tfo->release_cmd(se_cmd);
- return;
- }
- if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
- spin_unlock(&se_sess->sess_cmd_lock);
- complete(&se_cmd->cmd_wait_comp);
- return;
- }
- list_del(&se_cmd->se_cmd_list);
- spin_unlock(&se_sess->sess_cmd_lock);
+ struct target_cmd_counter *cmd_cnt = se_cmd->cmd_cnt;
+ struct completion *free_compl = se_cmd->free_compl;
+ struct completion *abrt_compl = se_cmd->abrt_compl;
+ target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
+ if (free_compl)
+ complete(free_compl);
+ if (abrt_compl)
+ complete(abrt_compl);
+
+ if (cmd_cnt)
+ percpu_ref_put(&cmd_cnt->refcnt);
}
-/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
- * @se_sess: session to reference
- * @se_cmd: command descriptor to drop
+/**
+ * target_put_sess_cmd - decrease the command reference count
+ * @se_cmd: command to drop a reference from
+ *
+ * Returns 1 if and only if this target_put_sess_cmd() call caused the
+ * refcount to drop to zero. Returns zero otherwise.
*/
-int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+int target_put_sess_cmd(struct se_cmd *se_cmd)
{
- return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
- &se_sess->sess_cmd_lock);
+ return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
}
EXPORT_SYMBOL(target_put_sess_cmd);
-/* target_sess_cmd_list_set_waiting - Flag all commands in
- * sess_cmd_list to complete cmd_wait_comp. Set
- * sess_tearing_down so no more commands are queued.
- * @se_sess: session to flag
- */
-void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
+static const char *data_dir_name(enum dma_data_direction d)
{
- struct se_cmd *se_cmd;
- unsigned long flags;
-
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- if (se_sess->sess_tearing_down) {
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- return;
+ switch (d) {
+ case DMA_BIDIRECTIONAL: return "BIDI";
+ case DMA_TO_DEVICE: return "WRITE";
+ case DMA_FROM_DEVICE: return "READ";
+ case DMA_NONE: return "NONE";
}
- se_sess->sess_tearing_down = 1;
- list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
-
- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
- se_cmd->cmd_wait_set = 1;
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ return "(?)";
}
-EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
-/* target_wait_for_sess_cmds - Wait for outstanding descriptors
- * @se_sess: session to wait for active I/O
- */
-void target_wait_for_sess_cmds(struct se_session *se_sess)
+static const char *cmd_state_name(enum transport_state_table t)
{
- struct se_cmd *se_cmd, *tmp_cmd;
- unsigned long flags;
-
- list_for_each_entry_safe(se_cmd, tmp_cmd,
- &se_sess->sess_wait_list, se_cmd_list) {
- list_del(&se_cmd->se_cmd_list);
-
- pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
- " %d\n", se_cmd, se_cmd->t_state,
- se_cmd->se_tfo->get_cmd_state(se_cmd));
-
- wait_for_completion(&se_cmd->cmd_wait_comp);
- pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
- " fabric state: %d\n", se_cmd, se_cmd->t_state,
- se_cmd->se_tfo->get_cmd_state(se_cmd));
-
- se_cmd->se_tfo->release_cmd(se_cmd);
+ switch (t) {
+ case TRANSPORT_NO_STATE: return "NO_STATE";
+ case TRANSPORT_NEW_CMD: return "NEW_CMD";
+ case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING";
+ case TRANSPORT_PROCESSING: return "PROCESSING";
+ case TRANSPORT_COMPLETE: return "COMPLETE";
+ case TRANSPORT_ISTATE_PROCESSING:
+ return "ISTATE_PROCESSING";
+ case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP";
+ case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK";
+ case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR";
}
- spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- WARN_ON(!list_empty(&se_sess->sess_cmd_list));
- spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
-
+ return "(?)";
}
-EXPORT_SYMBOL(target_wait_for_sess_cmds);
-/* transport_lun_wait_for_tasks():
- *
- * Called from ConfigFS context to stop the passed struct se_cmd to allow
- * an struct se_lun to be successfully shutdown.
- */
-static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
+static void target_append_str(char **str, const char *txt)
{
- unsigned long flags;
- int ret = 0;
-
- /*
- * If the frontend has already requested this struct se_cmd to
- * be stopped, we can safely ignore this struct se_cmd.
- */
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (cmd->transport_state & CMD_T_STOP) {
- cmd->transport_state &= ~CMD_T_LUN_STOP;
-
- pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
- cmd->se_tfo->get_task_tag(cmd));
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- transport_cmd_check_stop(cmd, false, false);
- return -EPERM;
- }
- cmd->transport_state |= CMD_T_LUN_FE_STOP;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ char *prev = *str;
- // XXX: audit task_flags checks.
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if ((cmd->transport_state & CMD_T_BUSY) &&
- (cmd->transport_state & CMD_T_SENT)) {
- if (!target_stop_cmd(cmd, &flags))
- ret++;
- }
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) :
+ kstrdup(txt, GFP_ATOMIC);
+ kfree(prev);
+}
- pr_debug("ConfigFS: cmd: %p stop tasks ret:"
- " %d\n", cmd, ret);
- if (!ret) {
- pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
- cmd->se_tfo->get_task_tag(cmd));
- wait_for_completion(&cmd->transport_lun_stop_comp);
- pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
- cmd->se_tfo->get_task_tag(cmd));
+/*
+ * Convert a transport state bitmask into a string. The caller is
+ * responsible for freeing the returned pointer.
+ */
+static char *target_ts_to_str(u32 ts)
+{
+ char *str = NULL;
+
+ if (ts & CMD_T_ABORTED)
+ target_append_str(&str, "aborted");
+ if (ts & CMD_T_ACTIVE)
+ target_append_str(&str, "active");
+ if (ts & CMD_T_COMPLETE)
+ target_append_str(&str, "complete");
+ if (ts & CMD_T_SENT)
+ target_append_str(&str, "sent");
+ if (ts & CMD_T_STOP)
+ target_append_str(&str, "stop");
+ if (ts & CMD_T_FABRIC_STOP)
+ target_append_str(&str, "fabric_stop");
+
+ return str;
+}
+
+static const char *target_tmf_name(enum tcm_tmreq_table tmf)
+{
+ switch (tmf) {
+ case TMR_ABORT_TASK: return "ABORT_TASK";
+ case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET";
+ case TMR_CLEAR_ACA: return "CLEAR_ACA";
+ case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET";
+ case TMR_LUN_RESET: return "LUN_RESET";
+ case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET";
+ case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET";
+ case TMR_LUN_RESET_PRO: return "LUN_RESET_PRO";
+ case TMR_UNKNOWN: break;
+ }
+ return "(?)";
+}
+
+void target_show_cmd(const char *pfx, struct se_cmd *cmd)
+{
+ char *ts_str = target_ts_to_str(cmd->transport_state);
+ const u8 *cdb = cmd->t_task_cdb;
+ struct se_tmr_req *tmf = cmd->se_tmr_req;
+
+ if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+ pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n",
+ pfx, cdb[0], cdb[1], cmd->tag,
+ data_dir_name(cmd->data_direction),
+ cmd->se_tfo->get_cmd_state(cmd),
+ cmd_state_name(cmd->t_state), cmd->data_length,
+ kref_read(&cmd->cmd_kref), ts_str);
+ } else {
+ pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n",
+ pfx, target_tmf_name(tmf->function), cmd->tag,
+ tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd),
+ cmd_state_name(cmd->t_state),
+ kref_read(&cmd->cmd_kref), ts_str);
}
+ kfree(ts_str);
+}
+EXPORT_SYMBOL(target_show_cmd);
- return 0;
+static void target_stop_cmd_counter_confirm(struct percpu_ref *ref)
+{
+ struct target_cmd_counter *cmd_cnt = container_of(ref,
+ struct target_cmd_counter,
+ refcnt);
+ complete_all(&cmd_cnt->stop_done);
}
-static void __transport_clear_lun_from_sessions(struct se_lun *lun)
+/**
+ * target_stop_cmd_counter - Stop new IO from being added to the counter.
+ * @cmd_cnt: counter to stop
+ */
+void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt)
{
- struct se_cmd *cmd = NULL;
- unsigned long lun_flags, cmd_flags;
- /*
- * Do exception processing and return CHECK_CONDITION status to the
- * Initiator Port.
- */
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- while (!list_empty(&lun->lun_cmd_list)) {
- cmd = list_first_entry(&lun->lun_cmd_list,
- struct se_cmd, se_lun_node);
- list_del_init(&cmd->se_lun_node);
-
- spin_lock(&cmd->t_state_lock);
- pr_debug("SE_LUN[%d] - Setting cmd->transport"
- "_lun_stop for ITT: 0x%08x\n",
- cmd->se_lun->unpacked_lun,
- cmd->se_tfo->get_task_tag(cmd));
- cmd->transport_state |= CMD_T_LUN_STOP;
- spin_unlock(&cmd->t_state_lock);
-
- spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
-
- if (!cmd->se_lun) {
- pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
- cmd->se_tfo->get_task_tag(cmd),
- cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
- BUG();
- }
- /*
- * If the Storage engine still owns the iscsi_cmd_t, determine
- * and/or stop its context.
- */
- pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
- "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
- cmd->se_tfo->get_task_tag(cmd));
+ pr_debug("Stopping command counter.\n");
+ if (!atomic_cmpxchg(&cmd_cnt->stopped, 0, 1))
+ percpu_ref_kill_and_confirm(&cmd_cnt->refcnt,
+ target_stop_cmd_counter_confirm);
+}
+EXPORT_SYMBOL_GPL(target_stop_cmd_counter);
- if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- continue;
- }
+/**
+ * target_stop_session - Stop new IO from being queued on the session.
+ * @se_sess: session to stop
+ */
+void target_stop_session(struct se_session *se_sess)
+{
+ target_stop_cmd_counter(se_sess->cmd_cnt);
+}
+EXPORT_SYMBOL(target_stop_session);
- pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
- "_wait_for_tasks(): SUCCESS\n",
- cmd->se_lun->unpacked_lun,
- cmd->se_tfo->get_task_tag(cmd));
+/**
+ * target_wait_for_cmds - Wait for outstanding cmds.
+ * @cmd_cnt: counter to wait for active I/O for.
+ */
+void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt)
+{
+ int ret;
- spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
- if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
- goto check_cond;
- }
- cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
- target_remove_from_state_list(cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
+ WARN_ON_ONCE(!atomic_read(&cmd_cnt->stopped));
- /*
- * The Storage engine stopped this struct se_cmd before it was
- * send to the fabric frontend for delivery back to the
- * Initiator Node. Return this SCSI CDB back with an
- * CHECK_CONDITION status.
- */
-check_cond:
- transport_send_check_condition_and_sense(cmd,
- TCM_NON_EXISTENT_LUN, 0);
- /*
- * If the fabric frontend is waiting for this iscsi_cmd_t to
- * be released, notify the waiting thread now that LU has
- * finished accessing it.
- */
- spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
- if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
- pr_debug("SE_LUN[%d] - Detected FE stop for"
- " struct se_cmd: %p ITT: 0x%08x\n",
- lun->unpacked_lun,
- cmd, cmd->se_tfo->get_task_tag(cmd));
-
- spin_unlock_irqrestore(&cmd->t_state_lock,
- cmd_flags);
- transport_cmd_check_stop(cmd, false, false);
- complete(&cmd->transport_lun_fe_stop_comp);
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- continue;
- }
- pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
- lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
+ do {
+ pr_debug("Waiting for running cmds to complete.\n");
+ ret = wait_event_timeout(cmd_cnt->refcnt_wq,
+ percpu_ref_is_zero(&cmd_cnt->refcnt),
+ 180 * HZ);
+ } while (ret <= 0);
- spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- }
- spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
+ wait_for_completion(&cmd_cnt->stop_done);
+ pr_debug("Waiting for cmds done.\n");
}
+EXPORT_SYMBOL_GPL(target_wait_for_cmds);
-static int transport_clear_lun_thread(void *p)
+/**
+ * target_wait_for_sess_cmds - Wait for outstanding commands
+ * @se_sess: session to wait for active I/O
+ */
+void target_wait_for_sess_cmds(struct se_session *se_sess)
{
- struct se_lun *lun = p;
-
- __transport_clear_lun_from_sessions(lun);
- complete(&lun->lun_shutdown_comp);
-
- return 0;
+ target_wait_for_cmds(se_sess->cmd_cnt);
}
+EXPORT_SYMBOL(target_wait_for_sess_cmds);
-int transport_clear_lun_from_sessions(struct se_lun *lun)
+/*
+ * Prevent that new percpu_ref_tryget_live() calls succeed and wait until
+ * all references to the LUN have been released. Called during LUN shutdown.
+ */
+void transport_clear_lun_ref(struct se_lun *lun)
{
- struct task_struct *kt;
-
- kt = kthread_run(transport_clear_lun_thread, lun,
- "tcm_cl_%u", lun->unpacked_lun);
- if (IS_ERR(kt)) {
- pr_err("Unable to start clear_lun thread\n");
- return PTR_ERR(kt);
- }
+ percpu_ref_kill(&lun->lun_ref);
wait_for_completion(&lun->lun_shutdown_comp);
-
- return 0;
}
-/**
- * transport_wait_for_tasks - wait for completion to occur
- * @cmd: command to wait
- *
- * Called from frontend fabric context to wait for storage engine
- * to pause and/or release frontend generated struct se_cmd.
- */
-bool transport_wait_for_tasks(struct se_cmd *cmd)
+static bool
+__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
+ bool *aborted, bool *tas, unsigned long *flags)
+ __releases(&cmd->t_state_lock)
+ __acquires(&cmd->t_state_lock)
{
- unsigned long flags;
+ lockdep_assert_held(&cmd->t_state_lock);
+
+ if (fabric_stop)
+ cmd->transport_state |= CMD_T_FABRIC_STOP;
+
+ if (cmd->transport_state & CMD_T_ABORTED)
+ *aborted = true;
+
+ if (cmd->transport_state & CMD_T_TAS)
+ *tas = true;
- spin_lock_irqsave(&cmd->t_state_lock, flags);
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
return false;
- }
if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
return false;
- }
- /*
- * If we are already stopped due to an external event (ie: LUN shutdown)
- * sleep until the connection can have the passed struct se_cmd back.
- * The cmd->transport_lun_stopped_sem will be upped by
- * transport_clear_lun_from_sessions() once the ConfigFS context caller
- * has completed its operation on the struct se_cmd.
- */
- if (cmd->transport_state & CMD_T_LUN_STOP) {
- pr_debug("wait_for_tasks: Stopping"
- " wait_for_completion(&cmd->t_tasktransport_lun_fe"
- "_stop_comp); for ITT: 0x%08x\n",
- cmd->se_tfo->get_task_tag(cmd));
- /*
- * There is a special case for WRITES where a FE exception +
- * LUN shutdown means ConfigFS context is still sleeping on
- * transport_lun_stop_comp in transport_lun_wait_for_tasks().
- * We go ahead and up transport_lun_stop_comp just to be sure
- * here.
- */
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&cmd->transport_lun_stop_comp);
- wait_for_completion(&cmd->transport_lun_fe_stop_comp);
- spin_lock_irqsave(&cmd->t_state_lock, flags);
-
- target_remove_from_state_list(cmd);
- /*
- * At this point, the frontend who was the originator of this
- * struct se_cmd, now owns the structure and can be released through
- * normal means below.
- */
- pr_debug("wait_for_tasks: Stopped"
- " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
- "stop_comp); for ITT: 0x%08x\n",
- cmd->se_tfo->get_task_tag(cmd));
- cmd->transport_state &= ~CMD_T_LUN_STOP;
- }
+ if (!(cmd->transport_state & CMD_T_ACTIVE))
+ return false;
- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ if (fabric_stop && *aborted)
return false;
- }
cmd->transport_state |= CMD_T_STOP;
- pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
- " i_state: %d, t_state: %d, CMD_T_STOP\n",
- cmd, cmd->se_tfo->get_task_tag(cmd),
- cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
+ target_show_cmd("wait_for_tasks: Stopping ", cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
- wait_for_completion(&cmd->t_transport_stop_comp);
+ while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp,
+ 180 * HZ))
+ target_show_cmd("wait for tasks: ", cmd);
- spin_lock_irqsave(&cmd->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, *flags);
cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
- pr_debug("wait_for_tasks: Stopped wait_for_completion("
- "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
- cmd->se_tfo->get_task_tag(cmd));
-
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
+ "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
return true;
}
-EXPORT_SYMBOL(transport_wait_for_tasks);
-static int transport_get_sense_codes(
- struct se_cmd *cmd,
- u8 *asc,
- u8 *ascq)
-{
- *asc = cmd->scsi_asc;
- *ascq = cmd->scsi_ascq;
-
- return 0;
-}
-
-int
-transport_send_check_condition_and_sense(struct se_cmd *cmd,
- sense_reason_t reason, int from_transport)
+/**
+ * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp
+ * @cmd: command to wait on
+ */
+bool transport_wait_for_tasks(struct se_cmd *cmd)
{
- unsigned char *buffer = cmd->sense_buffer;
unsigned long flags;
- u8 asc = 0, ascq = 0;
+ bool ret, aborted = false, tas = false;
spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return 0;
- }
- cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
+ ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- if (!reason && from_transport)
- goto after_reason;
-
- if (!from_transport)
- cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+ return ret;
+}
+EXPORT_SYMBOL(transport_wait_for_tasks);
- /*
- * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
- * SENSE KEY values from include/scsi/scsi.h
- */
- switch (reason) {
- case TCM_NO_SENSE:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* Not Ready */
- buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
- /* NO ADDITIONAL SENSE INFORMATION */
- buffer[SPC_ASC_KEY_OFFSET] = 0;
- buffer[SPC_ASCQ_KEY_OFFSET] = 0;
- break;
- case TCM_NON_EXISTENT_LUN:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ILLEGAL REQUEST */
- buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
- /* LOGICAL UNIT NOT SUPPORTED */
- buffer[SPC_ASC_KEY_OFFSET] = 0x25;
- break;
- case TCM_UNSUPPORTED_SCSI_OPCODE:
- case TCM_SECTOR_COUNT_TOO_MANY:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ILLEGAL REQUEST */
- buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
- /* INVALID COMMAND OPERATION CODE */
- buffer[SPC_ASC_KEY_OFFSET] = 0x20;
- break;
- case TCM_UNKNOWN_MODE_PAGE:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ILLEGAL REQUEST */
- buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
- /* INVALID FIELD IN CDB */
- buffer[SPC_ASC_KEY_OFFSET] = 0x24;
- break;
- case TCM_CHECK_CONDITION_ABORT_CMD:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ABORTED COMMAND */
- buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
- /* BUS DEVICE RESET FUNCTION OCCURRED */
- buffer[SPC_ASC_KEY_OFFSET] = 0x29;
- buffer[SPC_ASCQ_KEY_OFFSET] = 0x03;
- break;
- case TCM_INCORRECT_AMOUNT_OF_DATA:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ABORTED COMMAND */
- buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
- /* WRITE ERROR */
- buffer[SPC_ASC_KEY_OFFSET] = 0x0c;
- /* NOT ENOUGH UNSOLICITED DATA */
- buffer[SPC_ASCQ_KEY_OFFSET] = 0x0d;
- break;
- case TCM_INVALID_CDB_FIELD:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ILLEGAL REQUEST */
- buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
- /* INVALID FIELD IN CDB */
- buffer[SPC_ASC_KEY_OFFSET] = 0x24;
- break;
- case TCM_INVALID_PARAMETER_LIST:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ILLEGAL REQUEST */
- buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
- /* INVALID FIELD IN PARAMETER LIST */
- buffer[SPC_ASC_KEY_OFFSET] = 0x26;
- break;
- case TCM_PARAMETER_LIST_LENGTH_ERROR:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ILLEGAL REQUEST */
- buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
- /* PARAMETER LIST LENGTH ERROR */
- buffer[SPC_ASC_KEY_OFFSET] = 0x1a;
- break;
- case TCM_UNEXPECTED_UNSOLICITED_DATA:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ABORTED COMMAND */
- buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
- /* WRITE ERROR */
- buffer[SPC_ASC_KEY_OFFSET] = 0x0c;
- /* UNEXPECTED_UNSOLICITED_DATA */
- buffer[SPC_ASCQ_KEY_OFFSET] = 0x0c;
- break;
- case TCM_SERVICE_CRC_ERROR:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ABORTED COMMAND */
- buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
- /* PROTOCOL SERVICE CRC ERROR */
- buffer[SPC_ASC_KEY_OFFSET] = 0x47;
- /* N/A */
- buffer[SPC_ASCQ_KEY_OFFSET] = 0x05;
- break;
- case TCM_SNACK_REJECTED:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ABORTED COMMAND */
- buffer[SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
- /* READ ERROR */
- buffer[SPC_ASC_KEY_OFFSET] = 0x11;
- /* FAILED RETRANSMISSION REQUEST */
- buffer[SPC_ASCQ_KEY_OFFSET] = 0x13;
- break;
- case TCM_WRITE_PROTECTED:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* DATA PROTECT */
- buffer[SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
- /* WRITE PROTECTED */
- buffer[SPC_ASC_KEY_OFFSET] = 0x27;
- break;
- case TCM_ADDRESS_OUT_OF_RANGE:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* ILLEGAL REQUEST */
- buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
- /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
- buffer[SPC_ASC_KEY_OFFSET] = 0x21;
- break;
- case TCM_CHECK_CONDITION_UNIT_ATTENTION:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* UNIT ATTENTION */
- buffer[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
- core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
- buffer[SPC_ASC_KEY_OFFSET] = asc;
- buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
- break;
- case TCM_CHECK_CONDITION_NOT_READY:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
- /* Not Ready */
- buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
- transport_get_sense_codes(cmd, &asc, &ascq);
- buffer[SPC_ASC_KEY_OFFSET] = asc;
- buffer[SPC_ASCQ_KEY_OFFSET] = ascq;
- break;
- case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
- default:
- /* CURRENT ERROR */
- buffer[0] = 0x70;
- buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+struct sense_detail {
+ u8 key;
+ u8 asc;
+ u8 ascq;
+ bool add_sense_info;
+};
+
+static const struct sense_detail sense_detail_table[] = {
+ [TCM_NO_SENSE] = {
+ .key = NOT_READY
+ },
+ [TCM_NON_EXISTENT_LUN] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
+ },
+ [TCM_UNSUPPORTED_SCSI_OPCODE] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
+ },
+ [TCM_SECTOR_COUNT_TOO_MANY] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
+ },
+ [TCM_UNKNOWN_MODE_PAGE] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x24, /* INVALID FIELD IN CDB */
+ },
+ [TCM_CHECK_CONDITION_ABORT_CMD] = {
+ .key = ABORTED_COMMAND,
+ .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
+ .ascq = 0x03,
+ },
+ [TCM_INCORRECT_AMOUNT_OF_DATA] = {
+ .key = ABORTED_COMMAND,
+ .asc = 0x0c, /* WRITE ERROR */
+ .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
+ },
+ [TCM_INVALID_CDB_FIELD] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x24, /* INVALID FIELD IN CDB */
+ },
+ [TCM_INVALID_PARAMETER_LIST] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
+ },
+ [TCM_TOO_MANY_TARGET_DESCS] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x26,
+ .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
+ },
+ [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x26,
+ .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
+ },
+ [TCM_TOO_MANY_SEGMENT_DESCS] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x26,
+ .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
+ },
+ [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x26,
+ .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
+ },
+ [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
+ },
+ [TCM_UNEXPECTED_UNSOLICITED_DATA] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x0c, /* WRITE ERROR */
+ .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
+ },
+ [TCM_SERVICE_CRC_ERROR] = {
+ .key = ABORTED_COMMAND,
+ .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
+ .ascq = 0x05, /* N/A */
+ },
+ [TCM_SNACK_REJECTED] = {
+ .key = ABORTED_COMMAND,
+ .asc = 0x11, /* READ ERROR */
+ .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
+ },
+ [TCM_WRITE_PROTECTED] = {
+ .key = DATA_PROTECT,
+ .asc = 0x27, /* WRITE PROTECTED */
+ },
+ [TCM_ADDRESS_OUT_OF_RANGE] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
+ },
+ [TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
+ .key = UNIT_ATTENTION,
+ },
+ [TCM_MISCOMPARE_VERIFY] = {
+ .key = MISCOMPARE,
+ .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
+ .ascq = 0x00,
+ .add_sense_info = true,
+ },
+ [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
+ .key = ABORTED_COMMAND,
+ .asc = 0x10,
+ .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
+ .add_sense_info = true,
+ },
+ [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
+ .key = ABORTED_COMMAND,
+ .asc = 0x10,
+ .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
+ .add_sense_info = true,
+ },
+ [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
+ .key = ABORTED_COMMAND,
+ .asc = 0x10,
+ .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
+ .add_sense_info = true,
+ },
+ [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
+ .key = COPY_ABORTED,
+ .asc = 0x0d,
+ .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
+
+ },
+ [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
/*
* Returning ILLEGAL REQUEST would cause immediate IO errors on
* Solaris initiators. Returning NOT READY instead means the
* operations will be retried a finite number of times and we
* can survive intermittent errors.
*/
- buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
- /* LOGICAL UNIT COMMUNICATION FAILURE */
- buffer[SPC_ASC_KEY_OFFSET] = 0x08;
- break;
- }
- /*
- * This code uses linux/include/scsi/scsi.h SAM status codes!
- */
- cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
- /*
- * Automatically padded, this value is encoded in the fabric's
- * data_length response PDU containing the SCSI defined sense data.
- */
- cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
-
-after_reason:
- trace_target_cmd_complete(cmd);
- return cmd->se_tfo->queue_status(cmd);
-}
-EXPORT_SYMBOL(transport_send_check_condition_and_sense);
+ .key = NOT_READY,
+ .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
+ },
+ [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
+ /*
+ * From spc4r22 section5.7.7,5.7.8
+ * If a PERSISTENT RESERVE OUT command with a REGISTER service action
+ * or a REGISTER AND IGNORE EXISTING KEY service action or
+ * REGISTER AND MOVE service actionis attempted,
+ * but there are insufficient device server resources to complete the
+ * operation, then the command shall be terminated with CHECK CONDITION
+ * status, with the sense key set to ILLEGAL REQUEST,and the additonal
+ * sense code set to INSUFFICIENT REGISTRATION RESOURCES.
+ */
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x55,
+ .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
+ },
+ [TCM_INVALID_FIELD_IN_COMMAND_IU] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x0e,
+ .ascq = 0x03, /* INVALID FIELD IN COMMAND INFORMATION UNIT */
+ },
+ [TCM_ALUA_TG_PT_STANDBY] = {
+ .key = NOT_READY,
+ .asc = 0x04,
+ .ascq = ASCQ_04H_ALUA_TG_PT_STANDBY,
+ },
+ [TCM_ALUA_TG_PT_UNAVAILABLE] = {
+ .key = NOT_READY,
+ .asc = 0x04,
+ .ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE,
+ },
+ [TCM_ALUA_STATE_TRANSITION] = {
+ .key = NOT_READY,
+ .asc = 0x04,
+ .ascq = ASCQ_04H_ALUA_STATE_TRANSITION,
+ },
+ [TCM_ALUA_OFFLINE] = {
+ .key = NOT_READY,
+ .asc = 0x04,
+ .ascq = ASCQ_04H_ALUA_OFFLINE,
+ },
+};
-int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+/**
+ * translate_sense_reason - translate a sense reason into T10 key, asc and ascq
+ * @cmd: SCSI command in which the resulting sense buffer or SCSI status will
+ * be stored.
+ * @reason: LIO sense reason code. If this argument has the value
+ * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If
+ * dequeuing a unit attention fails due to multiple commands being processed
+ * concurrently, set the command status to BUSY.
+ *
+ * Return: 0 upon success or -EINVAL if the sense buffer is too small.
+ */
+static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
{
- if (!(cmd->transport_state & CMD_T_ABORTED))
- return 0;
-
- if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
- return 1;
-
- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
- cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
+ const struct sense_detail *sd;
+ u8 *buffer = cmd->sense_buffer;
+ int r = (__force int)reason;
+ u8 key, asc, ascq;
+ bool desc_format = target_sense_desc_format(cmd->se_dev);
- cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
- trace_target_cmd_complete(cmd);
- cmd->se_tfo->queue_status(cmd);
+ if (r < ARRAY_SIZE(sense_detail_table) && sense_detail_table[r].key)
+ sd = &sense_detail_table[r];
+ else
+ sd = &sense_detail_table[(__force int)
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
+
+ key = sd->key;
+ if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
+ if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
+ &ascq)) {
+ cmd->scsi_status = SAM_STAT_BUSY;
+ return;
+ }
+ } else {
+ WARN_ON_ONCE(sd->asc == 0);
+ asc = sd->asc;
+ ascq = sd->ascq;
+ }
- return 1;
+ cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+ cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
+ scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
+ if (sd->add_sense_info)
+ WARN_ON_ONCE(scsi_set_sense_information(buffer,
+ cmd->scsi_sense_length,
+ cmd->sense_info) < 0);
}
-EXPORT_SYMBOL(transport_check_aborted_status);
-void transport_send_task_abort(struct se_cmd *cmd)
+int
+transport_send_check_condition_and_sense(struct se_cmd *cmd,
+ sense_reason_t reason, int from_transport)
{
unsigned long flags;
+ WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
+
spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) {
+ if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return;
+ return 0;
}
+ cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- /*
- * If there are still expected incoming fabric WRITEs, we wait
- * until until they have completed before sending a TASK_ABORTED
- * response. This response with TASK_ABORTED status will be
- * queued back to fabric module by transport_check_aborted_status().
- */
- if (cmd->data_direction == DMA_TO_DEVICE) {
- if (cmd->se_tfo->write_pending_status(cmd) != 0) {
- cmd->transport_state |= CMD_T_ABORTED;
- smp_mb__after_atomic_inc();
- }
- }
- cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+ if (!from_transport)
+ translate_sense_reason(cmd, reason);
- transport_lun_remove_cmd(cmd);
+ trace_target_cmd_complete(cmd);
+ return cmd->se_tfo->queue_status(cmd);
+}
+EXPORT_SYMBOL(transport_send_check_condition_and_sense);
- pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
- " ITT: 0x%08x\n", cmd->t_task_cdb[0],
- cmd->se_tfo->get_task_tag(cmd));
+/**
+ * target_send_busy - Send SCSI BUSY status back to the initiator
+ * @cmd: SCSI command for which to send a BUSY reply.
+ *
+ * Note: Only call this function if target_submit_cmd*() failed.
+ */
+int target_send_busy(struct se_cmd *cmd)
+{
+ WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
+ cmd->scsi_status = SAM_STAT_BUSY;
trace_target_cmd_complete(cmd);
- cmd->se_tfo->queue_status(cmd);
+ return cmd->se_tfo->queue_status(cmd);
}
+EXPORT_SYMBOL(target_send_busy);
static void target_tmr_work(struct work_struct *work)
{
@@ -2822,6 +3587,9 @@ static void target_tmr_work(struct work_struct *work)
struct se_tmr_req *tmr = cmd->se_tmr_req;
int ret;
+ if (cmd->transport_state & CMD_T_ABORTED)
+ goto aborted;
+
switch (tmr->function) {
case TMR_ABORT_TASK:
core_tmr_abort_task(dev, tmr, cmd->se_sess);
@@ -2835,6 +3603,10 @@ static void target_tmr_work(struct work_struct *work)
ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
TMR_FUNCTION_REJECTED;
+ if (tmr->response == TMR_FUNCTION_COMPLETE) {
+ target_dev_ua_allocate(dev, 0x29,
+ ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
+ }
break;
case TMR_TARGET_WARM_RESET:
tmr->response = TMR_FUNCTION_REJECTED;
@@ -2843,23 +3615,73 @@ static void target_tmr_work(struct work_struct *work)
tmr->response = TMR_FUNCTION_REJECTED;
break;
default:
- pr_err("Uknown TMR function: 0x%02x.\n",
+ pr_err("Unknown TMR function: 0x%02x.\n",
tmr->function);
tmr->response = TMR_FUNCTION_REJECTED;
break;
}
- cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+ if (cmd->transport_state & CMD_T_ABORTED)
+ goto aborted;
+
cmd->se_tfo->queue_tm_rsp(cmd);
+ transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
+ return;
+
+aborted:
+ target_handle_abort(cmd);
}
int transport_generic_handle_tmr(
struct se_cmd *cmd)
{
+ unsigned long flags;
+ bool aborted = false;
+
+ spin_lock_irqsave(&cmd->se_dev->se_tmr_lock, flags);
+ list_add_tail(&cmd->se_tmr_req->tmr_list, &cmd->se_dev->dev_tmr_list);
+ spin_unlock_irqrestore(&cmd->se_dev->se_tmr_lock, flags);
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (cmd->transport_state & CMD_T_ABORTED) {
+ aborted = true;
+ } else {
+ cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+ cmd->transport_state |= CMD_T_ACTIVE;
+ }
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ if (aborted) {
+ pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n",
+ cmd->se_tmr_req->function,
+ cmd->se_tmr_req->ref_task_tag, cmd->tag);
+ target_handle_abort(cmd);
+ return 0;
+ }
+
INIT_WORK(&cmd->work, target_tmr_work);
- queue_work(cmd->se_dev->tmr_wq, &cmd->work);
+ schedule_work(&cmd->work);
return 0;
}
EXPORT_SYMBOL(transport_generic_handle_tmr);
+
+bool
+target_check_wce(struct se_device *dev)
+{
+ bool wce = false;
+
+ if (dev->transport->get_write_cache)
+ wce = dev->transport->get_write_cache(dev);
+ else if (dev->dev_attrib.emulate_write_cache > 0)
+ wce = true;
+
+ return wce;
+}
+
+bool
+target_check_fua(struct se_device *dev)
+{
+ return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
+}
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index bf0e390ce2d7..4276690fb6cb 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -1,36 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: target_core_ua.c
*
* This file contains logic for SPC-3 Unit Attention emulation
*
- * (c) Copyright 2009-2012 RisingTide Systems LLC.
+ * (c) Copyright 2009-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
******************************************************************************/
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
@@ -51,9 +36,17 @@ target_scsi3_ua_check(struct se_cmd *cmd)
if (!nacl)
return 0;
- deve = nacl->device_list[cmd->orig_fe_lun];
- if (!atomic_read(&deve->ua_count))
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
+ if (!deve) {
+ rcu_read_unlock();
+ return 0;
+ }
+ if (list_empty_careful(&deve->ua_list)) {
+ rcu_read_unlock();
return 0;
+ }
+ rcu_read_unlock();
/*
* From sam4r14, section 5.14 Unit attention condition:
*
@@ -80,34 +73,22 @@ target_scsi3_ua_check(struct se_cmd *cmd)
}
int core_scsi3_ua_allocate(
- struct se_node_acl *nacl,
- u32 unpacked_lun,
+ struct se_dev_entry *deve,
u8 asc,
u8 ascq)
{
- struct se_dev_entry *deve;
struct se_ua *ua, *ua_p, *ua_tmp;
- /*
- * PASSTHROUGH OPS
- */
- if (!nacl)
- return -EINVAL;
ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
if (!ua) {
pr_err("Unable to allocate struct se_ua\n");
return -ENOMEM;
}
- INIT_LIST_HEAD(&ua->ua_dev_list);
INIT_LIST_HEAD(&ua->ua_nacl_list);
- ua->ua_nacl = nacl;
ua->ua_asc = asc;
ua->ua_ascq = ascq;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[unpacked_lun];
-
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
/*
@@ -115,7 +96,6 @@ int core_scsi3_ua_allocate(
*/
if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
spin_unlock(&deve->ua_lock);
- spin_unlock_irq(&nacl->device_list_lock);
kmem_cache_free(se_ua_cache, ua);
return 0;
}
@@ -160,26 +140,38 @@ int core_scsi3_ua_allocate(
list_add_tail(&ua->ua_nacl_list,
&deve->ua_list);
spin_unlock(&deve->ua_lock);
- spin_unlock_irq(&nacl->device_list_lock);
- atomic_inc(&deve->ua_count);
- smp_mb__after_atomic_inc();
return 0;
}
list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
spin_unlock(&deve->ua_lock);
- spin_unlock_irq(&nacl->device_list_lock);
- pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
- " 0x%02x, ASCQ: 0x%02x\n",
- nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+ pr_debug("Allocated UNIT ATTENTION, mapped LUN: %llu, ASC:"
+ " 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun,
asc, ascq);
- atomic_inc(&deve->ua_count);
- smp_mb__after_atomic_inc();
return 0;
}
+void target_ua_allocate_lun(struct se_node_acl *nacl,
+ u32 unpacked_lun, u8 asc, u8 ascq)
+{
+ struct se_dev_entry *deve;
+
+ if (!nacl)
+ return;
+
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, unpacked_lun);
+ if (!deve) {
+ rcu_read_unlock();
+ return;
+ }
+
+ core_scsi3_ua_allocate(deve, asc, ascq);
+ rcu_read_unlock();
+}
+
void core_scsi3_ua_release_all(
struct se_dev_entry *deve)
{
@@ -189,17 +181,17 @@ void core_scsi3_ua_release_all(
list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
-
- atomic_dec(&deve->ua_count);
- smp_mb__after_atomic_dec();
}
spin_unlock(&deve->ua_lock);
}
-void core_scsi3_ua_for_check_condition(
- struct se_cmd *cmd,
- u8 *asc,
- u8 *ascq)
+/*
+ * Dequeue a unit attention from the unit attention list. This function
+ * returns true if the dequeuing succeeded and if *@key, *@asc and *@ascq have
+ * been set.
+ */
+bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc,
+ u8 *ascq)
{
struct se_device *dev = cmd->se_dev;
struct se_dev_entry *deve;
@@ -207,20 +199,26 @@ void core_scsi3_ua_for_check_condition(
struct se_node_acl *nacl;
struct se_ua *ua = NULL, *ua_p;
int head = 1;
+ bool dev_ua_intlck_clear = (dev->dev_attrib.emulate_ua_intlck_ctrl
+ == TARGET_UA_INTLCK_CTRL_CLEAR);
- if (!sess)
- return;
+ if (WARN_ON_ONCE(!sess))
+ return false;
nacl = sess->se_node_acl;
- if (!nacl)
- return;
+ if (WARN_ON_ONCE(!nacl))
+ return false;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[cmd->orig_fe_lun];
- if (!atomic_read(&deve->ua_count)) {
- spin_unlock_irq(&nacl->device_list_lock);
- return;
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
+ if (!deve) {
+ rcu_read_unlock();
+ *key = ILLEGAL_REQUEST;
+ *asc = 0x25; /* LOGICAL UNIT NOT SUPPORTED */
+ *ascq = 0;
+ return true;
}
+ *key = UNIT_ATTENTION;
/*
* The highest priority Unit Attentions are placed at the head of the
* struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
@@ -233,7 +231,7 @@ void core_scsi3_ua_for_check_condition(
* highest priority UNIT_ATTENTION and ASC/ASCQ without
* clearing it.
*/
- if (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) {
+ if (!dev_ua_intlck_clear) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
break;
@@ -250,20 +248,19 @@ void core_scsi3_ua_for_check_condition(
}
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
-
- atomic_dec(&deve->ua_count);
- smp_mb__after_atomic_dec();
}
spin_unlock(&deve->ua_lock);
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
pr_debug("[%s]: %s UNIT ATTENTION condition with"
- " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
+ " INTLCK_CTRL: %d, mapped LUN: %llu, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
- nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
- (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
- "Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
+ nacl->se_tpg->se_tpg_tfo->fabric_name,
+ dev_ua_intlck_clear ? "Releasing" : "Reporting",
+ dev->dev_attrib.emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
+
+ return head == 0;
}
int core_scsi3_ua_clear_for_request_sense(
@@ -284,10 +281,14 @@ int core_scsi3_ua_clear_for_request_sense(
if (!nacl)
return -EINVAL;
- spin_lock_irq(&nacl->device_list_lock);
- deve = nacl->device_list[cmd->orig_fe_lun];
- if (!atomic_read(&deve->ua_count)) {
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_lock();
+ deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
+ if (!deve) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ if (list_empty_careful(&deve->ua_list)) {
+ rcu_read_unlock();
return -EPERM;
}
/*
@@ -309,16 +310,13 @@ int core_scsi3_ua_clear_for_request_sense(
}
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
-
- atomic_dec(&deve->ua_count);
- smp_mb__after_atomic_dec();
}
spin_unlock(&deve->ua_lock);
- spin_unlock_irq(&nacl->device_list_lock);
+ rcu_read_unlock();
pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
- " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
- " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ " LUN: %llu, got REQUEST_SENSE reported ASC: 0x%02x,"
+ " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->fabric_name,
cmd->orig_fe_lun, *asc, *ascq);
return (head) ? -EPERM : 0;
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index 0204952fe4d3..76487c9be090 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -1,4 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef TARGET_CORE_UA_H
+#define TARGET_CORE_UA_H
+
+#include <target/target_core_base.h>
/*
* From spc4r17, Table D.1: ASC and ASCQ Assignement
@@ -19,17 +23,22 @@
#define ASCQ_2AH_RESERVATIONS_RELEASED 0x04
#define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05
#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06
-#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
+#define ASCQ_2AH_IMPLICIT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
#define ASCQ_2AH_PRIORITY_CHANGED 0x08
#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09
+#define ASCQ_3FH_INQUIRY_DATA_HAS_CHANGED 0x03
+#define ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED 0x0E
+
extern struct kmem_cache *se_ua_cache;
extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
-extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
+extern int core_scsi3_ua_allocate(struct se_dev_entry *, u8, u8);
+extern void target_ua_allocate_lun(struct se_node_acl *, u32, u8, u8);
extern void core_scsi3_ua_release_all(struct se_dev_entry *);
-extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
+extern bool core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *,
+ u8 *);
extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
u8 *, u8 *);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
new file mode 100644
index 000000000000..3fd963612775
--- /dev/null
+++ b/drivers/target/target_core_user.c
@@ -0,0 +1,3393 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
+ * Copyright (C) 2014 Red Hat, Inc.
+ * Copyright (C) 2015 Arrikto, Inc.
+ * Copyright (C) 2017 Chinamobile, Inc.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/parser.h>
+#include <linux/vmalloc.h>
+#include <linux/uio_driver.h>
+#include <linux/xarray.h>
+#include <linux/stringify.h>
+#include <linux/bitops.h>
+#include <linux/highmem.h>
+#include <linux/configfs.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/pagemap.h>
+#include <net/genetlink.h>
+#include <scsi/scsi_common.h>
+#include <scsi/scsi_proto.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_backend.h>
+
+#include <linux/target_core_user.h>
+
+/**
+ * DOC: Userspace I/O
+ * Userspace I/O
+ * -------------
+ *
+ * Define a shared-memory interface for LIO to pass SCSI commands and
+ * data to userspace for processing. This is to allow backends that
+ * are too complex for in-kernel support to be possible.
+ *
+ * It uses the UIO framework to do a lot of the device-creation and
+ * introspection work for us.
+ *
+ * See the .h file for how the ring is laid out. Note that while the
+ * command ring is defined, the particulars of the data area are
+ * not. Offset values in the command entry point to other locations
+ * internal to the mmap-ed area. There is separate space outside the
+ * command ring for data buffers. This leaves maximum flexibility for
+ * moving buffer allocations, or even page flipping or other
+ * allocation techniques, without altering the command ring layout.
+ *
+ * SECURITY:
+ * The user process must be assumed to be malicious. There's no way to
+ * prevent it breaking the command ring protocol if it wants, but in
+ * order to prevent other issues we must only ever read *data* from
+ * the shared memory area, not offsets or sizes. This applies to
+ * command ring entries as well as the mailbox. Extra code needed for
+ * this may have a 'UAM' comment.
+ */
+
+#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
+
+/* For mailbox plus cmd ring, the size is fixed 8MB */
+#define MB_CMDR_SIZE_DEF (8 * 1024 * 1024)
+/* Offset of cmd ring is size of mailbox */
+#define CMDR_OFF ((__u32)sizeof(struct tcmu_mailbox))
+#define CMDR_SIZE_DEF (MB_CMDR_SIZE_DEF - CMDR_OFF)
+
+/*
+ * For data area, the default block size is PAGE_SIZE and
+ * the default total size is 256K * PAGE_SIZE.
+ */
+#define DATA_PAGES_PER_BLK_DEF 1
+#define DATA_AREA_PAGES_DEF (256 * 1024)
+
+#define TCMU_MBS_TO_PAGES(_mbs) ((size_t)_mbs << (20 - PAGE_SHIFT))
+#define TCMU_PAGES_TO_MBS(_pages) (_pages >> (20 - PAGE_SHIFT))
+
+/*
+ * Default number of global data blocks(512K * PAGE_SIZE)
+ * when the unmap thread will be started.
+ */
+#define TCMU_GLOBAL_MAX_PAGES_DEF (512 * 1024)
+
+static u8 tcmu_kern_cmd_reply_supported;
+static u8 tcmu_netlink_blocked;
+
+static struct device *tcmu_root_device;
+
+struct tcmu_hba {
+ u32 host_id;
+};
+
+#define TCMU_CONFIG_LEN 256
+
+static DEFINE_MUTEX(tcmu_nl_cmd_mutex);
+static LIST_HEAD(tcmu_nl_cmd_list);
+
+struct tcmu_dev;
+
+struct tcmu_nl_cmd {
+ /* wake up thread waiting for reply */
+ struct completion complete;
+ struct list_head nl_list;
+ struct tcmu_dev *udev;
+ int cmd;
+ int status;
+};
+
+struct tcmu_dev {
+ struct list_head node;
+ struct kref kref;
+
+ struct se_device se_dev;
+ struct se_dev_plug se_plug;
+
+ char *name;
+ struct se_hba *hba;
+
+#define TCMU_DEV_BIT_OPEN 0
+#define TCMU_DEV_BIT_BROKEN 1
+#define TCMU_DEV_BIT_BLOCKED 2
+#define TCMU_DEV_BIT_TMR_NOTIFY 3
+#define TCMU_DEV_BIT_PLUGGED 4
+ unsigned long flags;
+
+ struct uio_info uio_info;
+
+ struct inode *inode;
+
+ uint64_t dev_size;
+
+ struct tcmu_mailbox *mb_addr;
+ void *cmdr;
+ u32 cmdr_size;
+ u32 cmdr_last_cleaned;
+ /* Offset of data area from start of mb */
+ /* Must add data_off and mb_addr to get the address */
+ size_t data_off;
+ int data_area_mb;
+ uint32_t max_blocks;
+ size_t mmap_pages;
+
+ struct mutex cmdr_lock;
+ struct list_head qfull_queue;
+ struct list_head tmr_queue;
+
+ uint32_t dbi_max;
+ uint32_t dbi_thresh;
+ unsigned long *data_bitmap;
+ struct xarray data_pages;
+ uint32_t data_pages_per_blk;
+ uint32_t data_blk_size;
+
+ struct xarray commands;
+
+ struct timer_list cmd_timer;
+ unsigned int cmd_time_out;
+ struct list_head inflight_queue;
+
+ struct timer_list qfull_timer;
+ int qfull_time_out;
+
+ struct list_head timedout_entry;
+
+ struct tcmu_nl_cmd curr_nl_cmd;
+
+ char dev_config[TCMU_CONFIG_LEN];
+
+ int nl_reply_supported;
+};
+
+#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
+
+struct tcmu_cmd {
+ struct se_cmd *se_cmd;
+ struct tcmu_dev *tcmu_dev;
+ struct list_head queue_entry;
+
+ uint16_t cmd_id;
+
+ /* Can't use se_cmd when cleaning up expired cmds, because if
+ cmd has been completed then accessing se_cmd is off limits */
+ uint32_t dbi_cnt;
+ uint32_t dbi_bidi_cnt;
+ uint32_t dbi_cur;
+ uint32_t *dbi;
+
+ uint32_t data_len_bidi;
+
+ unsigned long deadline;
+
+#define TCMU_CMD_BIT_EXPIRED 0
+#define TCMU_CMD_BIT_KEEP_BUF 1
+ unsigned long flags;
+};
+
+struct tcmu_tmr {
+ struct list_head queue_entry;
+
+ uint8_t tmr_type;
+ uint32_t tmr_cmd_cnt;
+ int16_t tmr_cmd_ids[] __counted_by(tmr_cmd_cnt);
+};
+
+/*
+ * To avoid dead lock the mutex lock order should always be:
+ *
+ * mutex_lock(&root_udev_mutex);
+ * ...
+ * mutex_lock(&tcmu_dev->cmdr_lock);
+ * mutex_unlock(&tcmu_dev->cmdr_lock);
+ * ...
+ * mutex_unlock(&root_udev_mutex);
+ */
+static DEFINE_MUTEX(root_udev_mutex);
+static LIST_HEAD(root_udev);
+
+static DEFINE_SPINLOCK(timed_out_udevs_lock);
+static LIST_HEAD(timed_out_udevs);
+
+static struct kmem_cache *tcmu_cmd_cache;
+
+static atomic_t global_page_count = ATOMIC_INIT(0);
+static struct delayed_work tcmu_unmap_work;
+static int tcmu_global_max_pages = TCMU_GLOBAL_MAX_PAGES_DEF;
+
+static int tcmu_set_global_max_data_area(const char *str,
+ const struct kernel_param *kp)
+{
+ int ret, max_area_mb;
+
+ ret = kstrtoint(str, 10, &max_area_mb);
+ if (ret)
+ return -EINVAL;
+
+ if (max_area_mb <= 0) {
+ pr_err("global_max_data_area must be larger than 0.\n");
+ return -EINVAL;
+ }
+
+ tcmu_global_max_pages = TCMU_MBS_TO_PAGES(max_area_mb);
+ if (atomic_read(&global_page_count) > tcmu_global_max_pages)
+ schedule_delayed_work(&tcmu_unmap_work, 0);
+ else
+ cancel_delayed_work_sync(&tcmu_unmap_work);
+
+ return 0;
+}
+
+static int tcmu_get_global_max_data_area(char *buffer,
+ const struct kernel_param *kp)
+{
+ return sprintf(buffer, "%d\n", TCMU_PAGES_TO_MBS(tcmu_global_max_pages));
+}
+
+static const struct kernel_param_ops tcmu_global_max_data_area_op = {
+ .set = tcmu_set_global_max_data_area,
+ .get = tcmu_get_global_max_data_area,
+};
+
+module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL,
+ S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(global_max_data_area_mb,
+ "Max MBs allowed to be allocated to all the tcmu device's "
+ "data areas.");
+
+static int tcmu_get_block_netlink(char *buffer,
+ const struct kernel_param *kp)
+{
+ return sprintf(buffer, "%s\n", tcmu_netlink_blocked ?
+ "blocked" : "unblocked");
+}
+
+static int tcmu_set_block_netlink(const char *str,
+ const struct kernel_param *kp)
+{
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(str, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val > 1) {
+ pr_err("Invalid block netlink value %u\n", val);
+ return -EINVAL;
+ }
+
+ tcmu_netlink_blocked = val;
+ return 0;
+}
+
+static const struct kernel_param_ops tcmu_block_netlink_op = {
+ .set = tcmu_set_block_netlink,
+ .get = tcmu_get_block_netlink,
+};
+
+module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(block_netlink, "Block new netlink commands.");
+
+static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd)
+{
+ struct tcmu_dev *udev = nl_cmd->udev;
+
+ if (!tcmu_netlink_blocked) {
+ pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n");
+ return -EBUSY;
+ }
+
+ if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
+ pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name);
+ nl_cmd->status = -EINTR;
+ list_del(&nl_cmd->nl_list);
+ complete(&nl_cmd->complete);
+ }
+ return 0;
+}
+
+static int tcmu_set_reset_netlink(const char *str,
+ const struct kernel_param *kp)
+{
+ struct tcmu_nl_cmd *nl_cmd, *tmp_cmd;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8(str, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != 1) {
+ pr_err("Invalid reset netlink value %u\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&tcmu_nl_cmd_mutex);
+ list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) {
+ ret = tcmu_fail_netlink_cmd(nl_cmd);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+
+ return ret;
+}
+
+static const struct kernel_param_ops tcmu_reset_netlink_op = {
+ .set = tcmu_set_reset_netlink,
+};
+
+module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR);
+MODULE_PARM_DESC(reset_netlink, "Reset netlink commands.");
+
+/* multicast group */
+enum tcmu_multicast_groups {
+ TCMU_MCGRP_CONFIG,
+};
+
+static const struct genl_multicast_group tcmu_mcgrps[] = {
+ [TCMU_MCGRP_CONFIG] = { .name = "config", },
+};
+
+static const struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX + 1] = {
+ [TCMU_ATTR_DEVICE] = { .type = NLA_STRING },
+ [TCMU_ATTR_MINOR] = { .type = NLA_U32 },
+ [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 },
+ [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 },
+ [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
+};
+
+static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
+{
+ struct tcmu_dev *udev = NULL;
+ struct tcmu_nl_cmd *nl_cmd;
+ int dev_id, rc, ret = 0;
+
+ if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
+ !info->attrs[TCMU_ATTR_DEVICE_ID]) {
+ printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
+ return -EINVAL;
+ }
+
+ dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
+ rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
+
+ mutex_lock(&tcmu_nl_cmd_mutex);
+ list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) {
+ if (nl_cmd->udev->se_dev.dev_index == dev_id) {
+ udev = nl_cmd->udev;
+ break;
+ }
+ }
+
+ if (!udev) {
+ pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
+ completed_cmd, rc, dev_id);
+ ret = -ENODEV;
+ goto unlock;
+ }
+ list_del(&nl_cmd->nl_list);
+
+ pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n",
+ udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc,
+ nl_cmd->status);
+
+ if (nl_cmd->cmd != completed_cmd) {
+ pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
+ udev->name, completed_cmd, nl_cmd->cmd);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ nl_cmd->status = rc;
+ complete(&nl_cmd->complete);
+unlock:
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ return ret;
+}
+
+static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
+{
+ return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
+}
+
+static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
+{
+ return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
+}
+
+static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
+}
+
+static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
+{
+ if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
+ tcmu_kern_cmd_reply_supported =
+ nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
+ printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
+ tcmu_kern_cmd_reply_supported);
+ }
+
+ return 0;
+}
+
+static const struct genl_small_ops tcmu_genl_ops[] = {
+ {
+ .cmd = TCMU_CMD_SET_FEATURES,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_ADMIN_PERM,
+ .doit = tcmu_genl_set_features,
+ },
+ {
+ .cmd = TCMU_CMD_ADDED_DEVICE_DONE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_ADMIN_PERM,
+ .doit = tcmu_genl_add_dev_done,
+ },
+ {
+ .cmd = TCMU_CMD_REMOVED_DEVICE_DONE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_ADMIN_PERM,
+ .doit = tcmu_genl_rm_dev_done,
+ },
+ {
+ .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_ADMIN_PERM,
+ .doit = tcmu_genl_reconfig_dev_done,
+ },
+};
+
+/* Our generic netlink family */
+static struct genl_family tcmu_genl_family __ro_after_init = {
+ .module = THIS_MODULE,
+ .hdrsize = 0,
+ .name = "TCM-USER",
+ .version = 2,
+ .maxattr = TCMU_ATTR_MAX,
+ .policy = tcmu_attr_policy,
+ .mcgrps = tcmu_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
+ .netnsok = true,
+ .small_ops = tcmu_genl_ops,
+ .n_small_ops = ARRAY_SIZE(tcmu_genl_ops),
+ .resv_start_op = TCMU_CMD_SET_FEATURES + 1,
+};
+
+#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
+#define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
+#define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
+#define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
+
+static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
+{
+ struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
+ uint32_t i;
+
+ for (i = 0; i < len; i++)
+ clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
+}
+
+static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
+ struct tcmu_cmd *tcmu_cmd,
+ int prev_dbi, int length, int *iov_cnt)
+{
+ XA_STATE(xas, &udev->data_pages, 0);
+ struct page *page;
+ int i, cnt, dbi, dpi;
+ int page_cnt = DIV_ROUND_UP(length, PAGE_SIZE);
+
+ dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
+ if (dbi == udev->dbi_thresh)
+ return -1;
+
+ dpi = dbi * udev->data_pages_per_blk;
+ /* Count the number of already allocated pages */
+ xas_set(&xas, dpi);
+ rcu_read_lock();
+ for (cnt = 0; xas_next(&xas) && cnt < page_cnt;)
+ cnt++;
+ rcu_read_unlock();
+
+ for (i = cnt; i < page_cnt; i++) {
+ /* try to get new zeroed page from the mm */
+ page = alloc_page(GFP_NOIO | __GFP_ZERO);
+ if (!page)
+ break;
+
+ if (xa_store(&udev->data_pages, dpi + i, page, GFP_NOIO)) {
+ __free_page(page);
+ break;
+ }
+ }
+ if (atomic_add_return(i - cnt, &global_page_count) >
+ tcmu_global_max_pages)
+ schedule_delayed_work(&tcmu_unmap_work, 0);
+
+ if (i && dbi > udev->dbi_max)
+ udev->dbi_max = dbi;
+
+ set_bit(dbi, udev->data_bitmap);
+ tcmu_cmd_set_dbi(tcmu_cmd, dbi);
+
+ if (dbi != prev_dbi + 1)
+ *iov_cnt += 1;
+
+ return i == page_cnt ? dbi : -1;
+}
+
+static int tcmu_get_empty_blocks(struct tcmu_dev *udev,
+ struct tcmu_cmd *tcmu_cmd, int length)
+{
+ /* start value of dbi + 1 must not be a valid dbi */
+ int dbi = -2;
+ int blk_data_len, iov_cnt = 0;
+ uint32_t blk_size = udev->data_blk_size;
+
+ for (; length > 0; length -= blk_size) {
+ blk_data_len = min_t(uint32_t, length, blk_size);
+ dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len,
+ &iov_cnt);
+ if (dbi < 0)
+ return -1;
+ }
+ return iov_cnt;
+}
+
+static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
+{
+ kfree(tcmu_cmd->dbi);
+ kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
+}
+
+static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd)
+{
+ int i, len;
+ struct se_cmd *se_cmd = cmd->se_cmd;
+ uint32_t blk_size = cmd->tcmu_dev->data_blk_size;
+
+ cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, blk_size);
+
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
+ BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
+ for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++)
+ len += se_cmd->t_bidi_data_sg[i].length;
+ cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, blk_size);
+ cmd->dbi_cnt += cmd->dbi_bidi_cnt;
+ cmd->data_len_bidi = len;
+ }
+}
+
+static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+ struct iovec **iov, int prev_dbi, int len)
+{
+ /* Get the next dbi */
+ int dbi = tcmu_cmd_get_dbi(cmd);
+
+ /* Do not add more than udev->data_blk_size to iov */
+ len = min_t(int, len, udev->data_blk_size);
+
+ /*
+ * The following code will gather and map the blocks to the same iovec
+ * when the blocks are all next to each other.
+ */
+ if (dbi != prev_dbi + 1) {
+ /* dbi is not next to previous dbi, so start new iov */
+ if (prev_dbi >= 0)
+ (*iov)++;
+ /* write offset relative to mb_addr */
+ (*iov)->iov_base = (void __user *)
+ (udev->data_off + dbi * udev->data_blk_size);
+ }
+ (*iov)->iov_len += len;
+
+ return dbi;
+}
+
+static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+ struct iovec **iov, int data_length)
+{
+ /* start value of dbi + 1 must not be a valid dbi */
+ int dbi = -2;
+
+ /* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */
+ for (; data_length > 0; data_length -= udev->data_blk_size)
+ dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length);
+}
+
+static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
+{
+ struct se_device *se_dev = se_cmd->se_dev;
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+ struct tcmu_cmd *tcmu_cmd;
+
+ tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO);
+ if (!tcmu_cmd)
+ return NULL;
+
+ INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
+ tcmu_cmd->se_cmd = se_cmd;
+ tcmu_cmd->tcmu_dev = udev;
+
+ tcmu_cmd_set_block_cnts(tcmu_cmd);
+ tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
+ GFP_NOIO);
+ if (!tcmu_cmd->dbi) {
+ kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
+ return NULL;
+ }
+
+ return tcmu_cmd;
+}
+
+static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
+{
+ unsigned long offset = offset_in_page(vaddr);
+ void *start = vaddr - offset;
+
+ size = round_up(size+offset, PAGE_SIZE);
+
+ while (size) {
+ flush_dcache_page(vmalloc_to_page(start));
+ start += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+}
+
+/*
+ * Some ring helper functions. We don't assume size is a power of 2 so
+ * we can't use circ_buf.h.
+ */
+static inline size_t spc_used(size_t head, size_t tail, size_t size)
+{
+ int diff = head - tail;
+
+ if (diff >= 0)
+ return diff;
+ else
+ return size + diff;
+}
+
+static inline size_t spc_free(size_t head, size_t tail, size_t size)
+{
+ /* Keep 1 byte unused or we can't tell full from empty */
+ return (size - spc_used(head, tail, size) - 1);
+}
+
+static inline size_t head_to_end(size_t head, size_t size)
+{
+ return size - head;
+}
+
+#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
+
+#define TCMU_SG_TO_DATA_AREA 1
+#define TCMU_DATA_AREA_TO_SG 2
+
+static inline void tcmu_copy_data(struct tcmu_dev *udev,
+ struct tcmu_cmd *tcmu_cmd, uint32_t direction,
+ struct scatterlist *sg, unsigned int sg_nents,
+ struct iovec **iov, size_t data_len)
+{
+ /* start value of dbi + 1 must not be a valid dbi */
+ int dbi = -2;
+ size_t page_remaining, cp_len;
+ int page_cnt, page_inx, dpi;
+ struct sg_mapping_iter sg_iter;
+ unsigned int sg_flags;
+ struct page *page;
+ void *data_page_start, *data_addr;
+
+ if (direction == TCMU_SG_TO_DATA_AREA)
+ sg_flags = SG_MITER_ATOMIC | SG_MITER_FROM_SG;
+ else
+ sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
+ sg_miter_start(&sg_iter, sg, sg_nents, sg_flags);
+
+ while (data_len) {
+ if (direction == TCMU_SG_TO_DATA_AREA)
+ dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi,
+ data_len);
+ else
+ dbi = tcmu_cmd_get_dbi(tcmu_cmd);
+
+ page_cnt = DIV_ROUND_UP(data_len, PAGE_SIZE);
+ if (page_cnt > udev->data_pages_per_blk)
+ page_cnt = udev->data_pages_per_blk;
+
+ dpi = dbi * udev->data_pages_per_blk;
+ for (page_inx = 0; page_inx < page_cnt && data_len;
+ page_inx++, dpi++) {
+ page = xa_load(&udev->data_pages, dpi);
+
+ if (direction == TCMU_DATA_AREA_TO_SG)
+ flush_dcache_page(page);
+ data_page_start = kmap_atomic(page);
+ page_remaining = PAGE_SIZE;
+
+ while (page_remaining && data_len) {
+ if (!sg_miter_next(&sg_iter)) {
+ /* set length to 0 to abort outer loop */
+ data_len = 0;
+ pr_debug("%s: aborting data copy due to exhausted sg_list\n",
+ __func__);
+ break;
+ }
+ cp_len = min3(sg_iter.length, page_remaining,
+ data_len);
+
+ data_addr = data_page_start +
+ PAGE_SIZE - page_remaining;
+ if (direction == TCMU_SG_TO_DATA_AREA)
+ memcpy(data_addr, sg_iter.addr, cp_len);
+ else
+ memcpy(sg_iter.addr, data_addr, cp_len);
+
+ data_len -= cp_len;
+ page_remaining -= cp_len;
+ sg_iter.consumed = cp_len;
+ }
+ sg_miter_stop(&sg_iter);
+
+ kunmap_atomic(data_page_start);
+ if (direction == TCMU_SG_TO_DATA_AREA)
+ flush_dcache_page(page);
+ }
+ }
+}
+
+static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
+ struct iovec **iov)
+{
+ struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+
+ tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg,
+ se_cmd->t_data_nents, iov, se_cmd->data_length);
+}
+
+static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
+ bool bidi, uint32_t read_len)
+{
+ struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+ struct scatterlist *data_sg;
+ unsigned int data_nents;
+
+ if (!bidi) {
+ data_sg = se_cmd->t_data_sg;
+ data_nents = se_cmd->t_data_nents;
+ } else {
+ /*
+ * For bidi case, the first count blocks are for Data-Out
+ * buffer blocks, and before gathering the Data-In buffer
+ * the Data-Out buffer blocks should be skipped.
+ */
+ tcmu_cmd_set_dbi_cur(tcmu_cmd,
+ tcmu_cmd->dbi_cnt - tcmu_cmd->dbi_bidi_cnt);
+
+ data_sg = se_cmd->t_bidi_data_sg;
+ data_nents = se_cmd->t_bidi_data_nents;
+ }
+
+ tcmu_copy_data(udev, tcmu_cmd, TCMU_DATA_AREA_TO_SG, data_sg,
+ data_nents, NULL, read_len);
+}
+
+static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
+{
+ return thresh - bitmap_weight(bitmap, thresh);
+}
+
+/*
+ * We can't queue a command until we have space available on the cmd ring.
+ *
+ * Called with ring lock held.
+ */
+static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size)
+{
+ struct tcmu_mailbox *mb = udev->mb_addr;
+ size_t space, cmd_needed;
+ u32 cmd_head;
+
+ tcmu_flush_dcache_range(mb, sizeof(*mb));
+
+ cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
+
+ /*
+ * If cmd end-of-ring space is too small then we need space for a NOP plus
+ * original cmd - cmds are internally contiguous.
+ */
+ if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
+ cmd_needed = cmd_size;
+ else
+ cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
+
+ space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
+ if (space < cmd_needed) {
+ pr_debug("no cmd space: %u %u %u\n", cmd_head,
+ udev->cmdr_last_cleaned, udev->cmdr_size);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * We have to allocate data buffers before we can queue a command.
+ * Returns -1 on error (not enough space) or number of needed iovs on success
+ *
+ * Called with ring lock held.
+ */
+static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+ int *iov_bidi_cnt)
+{
+ int space, iov_cnt = 0, ret = 0;
+
+ if (!cmd->dbi_cnt)
+ goto wr_iov_cnts;
+
+ /* try to check and get the data blocks as needed */
+ space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
+ if (space < cmd->dbi_cnt) {
+ unsigned long blocks_left =
+ (udev->max_blocks - udev->dbi_thresh) + space;
+
+ if (blocks_left < cmd->dbi_cnt) {
+ pr_debug("no data space: only %lu available, but ask for %u\n",
+ blocks_left * udev->data_blk_size,
+ cmd->dbi_cnt * udev->data_blk_size);
+ return -1;
+ }
+
+ udev->dbi_thresh += cmd->dbi_cnt;
+ if (udev->dbi_thresh > udev->max_blocks)
+ udev->dbi_thresh = udev->max_blocks;
+ }
+
+ iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length);
+ if (iov_cnt < 0)
+ return -1;
+
+ if (cmd->dbi_bidi_cnt) {
+ ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi);
+ if (ret < 0)
+ return -1;
+ }
+wr_iov_cnts:
+ *iov_bidi_cnt = ret;
+ return iov_cnt + ret;
+}
+
+static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
+{
+ return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
+ sizeof(struct tcmu_cmd_entry));
+}
+
+static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
+ size_t base_command_size)
+{
+ struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+ size_t command_size;
+
+ command_size = base_command_size +
+ round_up(scsi_command_size(se_cmd->t_task_cdb),
+ TCMU_OP_ALIGN_SIZE);
+
+ WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
+
+ return command_size;
+}
+
+static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
+ struct timer_list *timer)
+{
+ if (!tmo)
+ return;
+
+ tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
+ if (!timer_pending(timer))
+ mod_timer(timer, tcmu_cmd->deadline);
+
+ pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd,
+ tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC);
+}
+
+static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
+{
+ struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
+ unsigned int tmo;
+
+ /*
+ * For backwards compat if qfull_time_out is not set use
+ * cmd_time_out and if that's not set use the default time out.
+ */
+ if (!udev->qfull_time_out)
+ return -ETIMEDOUT;
+ else if (udev->qfull_time_out > 0)
+ tmo = udev->qfull_time_out;
+ else if (udev->cmd_time_out)
+ tmo = udev->cmd_time_out;
+ else
+ tmo = TCMU_TIME_OUT;
+
+ tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
+
+ list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
+ pr_debug("adding cmd %p on dev %s to ring space wait queue\n",
+ tcmu_cmd, udev->name);
+ return 0;
+}
+
+static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size)
+{
+ struct tcmu_cmd_entry_hdr *hdr;
+ struct tcmu_mailbox *mb = udev->mb_addr;
+ uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
+
+ /* Insert a PAD if end-of-ring space is too small */
+ if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) {
+ size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
+
+ hdr = udev->cmdr + cmd_head;
+ tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD);
+ tcmu_hdr_set_len(&hdr->len_op, pad_size);
+ hdr->cmd_id = 0; /* not used for PAD */
+ hdr->kflags = 0;
+ hdr->uflags = 0;
+ tcmu_flush_dcache_range(hdr, sizeof(*hdr));
+
+ UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
+ tcmu_flush_dcache_range(mb, sizeof(*mb));
+
+ cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
+ WARN_ON(cmd_head != 0);
+ }
+
+ return cmd_head;
+}
+
+static void tcmu_unplug_device(struct se_dev_plug *se_plug)
+{
+ struct se_device *se_dev = se_plug->se_dev;
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+
+ clear_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags);
+ uio_event_notify(&udev->uio_info);
+}
+
+static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev)
+{
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+
+ if (!test_and_set_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags))
+ return &udev->se_plug;
+
+ return NULL;
+}
+
+/**
+ * queue_cmd_ring - queue cmd to ring or internally
+ * @tcmu_cmd: cmd to queue
+ * @scsi_err: TCM error code if failure (-1) returned.
+ *
+ * Returns:
+ * -1 we cannot queue internally or to the ring.
+ * 0 success
+ * 1 internally queued to wait for ring memory to free.
+ */
+static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
+{
+ struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
+ struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+ size_t base_command_size, command_size;
+ struct tcmu_mailbox *mb = udev->mb_addr;
+ struct tcmu_cmd_entry *entry;
+ struct iovec *iov;
+ int iov_cnt, iov_bidi_cnt;
+ uint32_t cmd_id, cmd_head;
+ uint64_t cdb_off;
+ uint32_t blk_size = udev->data_blk_size;
+ /* size of data buffer needed */
+ size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size;
+
+ *scsi_err = TCM_NO_SENSE;
+
+ if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) {
+ *scsi_err = TCM_LUN_BUSY;
+ return -1;
+ }
+
+ if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
+ *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -1;
+ }
+
+ if (!list_empty(&udev->qfull_queue))
+ goto queue;
+
+ if (data_length > (size_t)udev->max_blocks * blk_size) {
+ pr_warn("TCMU: Request of size %zu is too big for %zu data area\n",
+ data_length, (size_t)udev->max_blocks * blk_size);
+ *scsi_err = TCM_INVALID_CDB_FIELD;
+ return -1;
+ }
+
+ iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt);
+ if (iov_cnt < 0)
+ goto free_and_queue;
+
+ /*
+ * Must be a certain minimum size for response sense info, but
+ * also may be larger if the iov array is large.
+ */
+ base_command_size = tcmu_cmd_get_base_cmd_size(iov_cnt);
+ command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
+
+ if (command_size > (udev->cmdr_size / 2)) {
+ pr_warn("TCMU: Request of size %zu is too big for %u cmd ring\n",
+ command_size, udev->cmdr_size);
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
+ *scsi_err = TCM_INVALID_CDB_FIELD;
+ return -1;
+ }
+
+ if (!is_ring_space_avail(udev, command_size))
+ /*
+ * Don't leave commands partially setup because the unmap
+ * thread might need the blocks to make forward progress.
+ */
+ goto free_and_queue;
+
+ if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff),
+ GFP_NOWAIT) < 0) {
+ pr_err("tcmu: Could not allocate cmd id.\n");
+
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
+ *scsi_err = TCM_OUT_OF_RESOURCES;
+ return -1;
+ }
+ tcmu_cmd->cmd_id = cmd_id;
+
+ pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
+ tcmu_cmd, udev->name);
+
+ cmd_head = ring_insert_padding(udev, command_size);
+
+ entry = udev->cmdr + cmd_head;
+ memset(entry, 0, command_size);
+ tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
+
+ /* prepare iov list and copy data to data area if necessary */
+ tcmu_cmd_reset_dbi_cur(tcmu_cmd);
+ iov = &entry->req.iov[0];
+
+ if (se_cmd->data_direction == DMA_TO_DEVICE ||
+ se_cmd->se_cmd_flags & SCF_BIDI)
+ scatter_data_area(udev, tcmu_cmd, &iov);
+ else
+ tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length);
+
+ entry->req.iov_cnt = iov_cnt - iov_bidi_cnt;
+
+ /* Handle BIDI commands */
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
+ iov++;
+ tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi);
+ entry->req.iov_bidi_cnt = iov_bidi_cnt;
+ }
+
+ tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
+
+ entry->hdr.cmd_id = tcmu_cmd->cmd_id;
+
+ tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
+
+ /* All offsets relative to mb_addr, not start of entry! */
+ cdb_off = CMDR_OFF + cmd_head + base_command_size;
+ memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
+ entry->req.cdb_off = cdb_off;
+ tcmu_flush_dcache_range(entry, command_size);
+
+ UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
+ tcmu_flush_dcache_range(mb, sizeof(*mb));
+
+ list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
+
+ if (!test_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags))
+ uio_event_notify(&udev->uio_info);
+
+ return 0;
+
+free_and_queue:
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
+ tcmu_cmd_reset_dbi_cur(tcmu_cmd);
+
+queue:
+ if (add_to_qfull_queue(tcmu_cmd)) {
+ *scsi_err = TCM_OUT_OF_RESOURCES;
+ return -1;
+ }
+
+ return 1;
+}
+
+/**
+ * queue_tmr_ring - queue tmr info to ring or internally
+ * @udev: related tcmu_dev
+ * @tmr: tcmu_tmr containing tmr info to queue
+ *
+ * Returns:
+ * 0 success
+ * 1 internally queued to wait for ring memory to free.
+ */
+static int
+queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr)
+{
+ struct tcmu_tmr_entry *entry;
+ int cmd_size;
+ int id_list_sz;
+ struct tcmu_mailbox *mb = udev->mb_addr;
+ uint32_t cmd_head;
+
+ if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
+ goto out_free;
+
+ id_list_sz = sizeof(tmr->tmr_cmd_ids[0]) * tmr->tmr_cmd_cnt;
+ cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE);
+
+ if (!list_empty(&udev->tmr_queue) ||
+ !is_ring_space_avail(udev, cmd_size)) {
+ list_add_tail(&tmr->queue_entry, &udev->tmr_queue);
+ pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n",
+ tmr, udev->name);
+ return 1;
+ }
+
+ cmd_head = ring_insert_padding(udev, cmd_size);
+
+ entry = udev->cmdr + cmd_head;
+ memset(entry, 0, cmd_size);
+ tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR);
+ tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size);
+ entry->tmr_type = tmr->tmr_type;
+ entry->cmd_cnt = tmr->tmr_cmd_cnt;
+ memcpy(&entry->cmd_ids[0], &tmr->tmr_cmd_ids[0], id_list_sz);
+ tcmu_flush_dcache_range(entry, cmd_size);
+
+ UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size);
+ tcmu_flush_dcache_range(mb, sizeof(*mb));
+
+ uio_event_notify(&udev->uio_info);
+
+out_free:
+ kfree(tmr);
+
+ return 0;
+}
+
+static sense_reason_t
+tcmu_queue_cmd(struct se_cmd *se_cmd)
+{
+ struct se_device *se_dev = se_cmd->se_dev;
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+ struct tcmu_cmd *tcmu_cmd;
+ sense_reason_t scsi_ret = TCM_CHECK_CONDITION_ABORT_CMD;
+ int ret = -1;
+
+ tcmu_cmd = tcmu_alloc_cmd(se_cmd);
+ if (!tcmu_cmd)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ mutex_lock(&udev->cmdr_lock);
+ if (!(se_cmd->transport_state & CMD_T_ABORTED))
+ ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
+ if (ret < 0)
+ tcmu_free_cmd(tcmu_cmd);
+ else
+ se_cmd->priv = tcmu_cmd;
+ mutex_unlock(&udev->cmdr_lock);
+ return scsi_ret;
+}
+
+static void tcmu_set_next_deadline(struct list_head *queue,
+ struct timer_list *timer)
+{
+ struct tcmu_cmd *cmd;
+
+ if (!list_empty(queue)) {
+ cmd = list_first_entry(queue, struct tcmu_cmd, queue_entry);
+ mod_timer(timer, cmd->deadline);
+ } else
+ timer_delete(timer);
+}
+
+static int
+tcmu_tmr_type(enum tcm_tmreq_table tmf)
+{
+ switch (tmf) {
+ case TMR_ABORT_TASK: return TCMU_TMR_ABORT_TASK;
+ case TMR_ABORT_TASK_SET: return TCMU_TMR_ABORT_TASK_SET;
+ case TMR_CLEAR_ACA: return TCMU_TMR_CLEAR_ACA;
+ case TMR_CLEAR_TASK_SET: return TCMU_TMR_CLEAR_TASK_SET;
+ case TMR_LUN_RESET: return TCMU_TMR_LUN_RESET;
+ case TMR_TARGET_WARM_RESET: return TCMU_TMR_TARGET_WARM_RESET;
+ case TMR_TARGET_COLD_RESET: return TCMU_TMR_TARGET_COLD_RESET;
+ case TMR_LUN_RESET_PRO: return TCMU_TMR_LUN_RESET_PRO;
+ default: return TCMU_TMR_UNKNOWN;
+ }
+}
+
+static void
+tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
+ struct list_head *cmd_list)
+{
+ int i = 0, cmd_cnt = 0;
+ bool unqueued = false;
+ struct tcmu_cmd *cmd;
+ struct se_cmd *se_cmd;
+ struct tcmu_tmr *tmr;
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+
+ mutex_lock(&udev->cmdr_lock);
+
+ /* First we check for aborted commands in qfull_queue */
+ list_for_each_entry(se_cmd, cmd_list, state_list) {
+ i++;
+ if (!se_cmd->priv)
+ continue;
+ cmd = se_cmd->priv;
+ /* Commands on qfull queue have no id yet */
+ if (cmd->cmd_id) {
+ cmd_cnt++;
+ continue;
+ }
+ pr_debug("Removing aborted command %p from queue on dev %s.\n",
+ cmd, udev->name);
+
+ list_del_init(&cmd->queue_entry);
+ tcmu_free_cmd(cmd);
+ se_cmd->priv = NULL;
+ target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED);
+ unqueued = true;
+ }
+ if (unqueued)
+ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
+
+ if (!test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags))
+ goto unlock;
+
+ pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n",
+ tcmu_tmr_type(tmf), udev->name, i, cmd_cnt);
+
+ tmr = kmalloc(struct_size(tmr, tmr_cmd_ids, cmd_cnt), GFP_NOIO);
+ if (!tmr)
+ goto unlock;
+
+ tmr->tmr_type = tcmu_tmr_type(tmf);
+ tmr->tmr_cmd_cnt = cmd_cnt;
+
+ if (cmd_cnt != 0) {
+ cmd_cnt = 0;
+ list_for_each_entry(se_cmd, cmd_list, state_list) {
+ if (!se_cmd->priv)
+ continue;
+ cmd = se_cmd->priv;
+ if (cmd->cmd_id)
+ tmr->tmr_cmd_ids[cmd_cnt++] = cmd->cmd_id;
+ }
+ }
+
+ queue_tmr_ring(udev, tmr);
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+}
+
+static bool tcmu_handle_completion(struct tcmu_cmd *cmd,
+ struct tcmu_cmd_entry *entry, bool keep_buf)
+{
+ struct se_cmd *se_cmd = cmd->se_cmd;
+ struct tcmu_dev *udev = cmd->tcmu_dev;
+ bool read_len_valid = false;
+ bool ret = true;
+ uint32_t read_len;
+
+ /*
+ * cmd has been completed already from timeout, just reclaim
+ * data area space and free cmd
+ */
+ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ WARN_ON_ONCE(se_cmd);
+ goto out;
+ }
+ if (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
+ pr_err("cmd_id %u already completed with KEEP_BUF, ring is broken\n",
+ entry->hdr.cmd_id);
+ set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
+ ret = false;
+ goto out;
+ }
+
+ list_del_init(&cmd->queue_entry);
+
+ tcmu_cmd_reset_dbi_cur(cmd);
+
+ if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
+ pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
+ cmd->se_cmd);
+ entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
+ goto done;
+ }
+
+ read_len = se_cmd->data_length;
+ if (se_cmd->data_direction == DMA_FROM_DEVICE &&
+ (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
+ read_len_valid = true;
+ if (entry->rsp.read_len < read_len)
+ read_len = entry->rsp.read_len;
+ }
+
+ if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
+ transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
+ if (!read_len_valid )
+ goto done;
+ else
+ se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
+ }
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
+ /* Get Data-In buffer before clean up */
+ gather_data_area(udev, cmd, true, read_len);
+ } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+ gather_data_area(udev, cmd, false, read_len);
+ } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
+ /* TODO: */
+ } else if (se_cmd->data_direction != DMA_NONE) {
+ pr_warn("TCMU: data direction was %d!\n",
+ se_cmd->data_direction);
+ }
+
+done:
+ se_cmd->priv = NULL;
+ if (read_len_valid) {
+ pr_debug("read_len = %d\n", read_len);
+ target_complete_cmd_with_length(cmd->se_cmd,
+ entry->rsp.scsi_status, read_len);
+ } else
+ target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
+
+out:
+ if (!keep_buf) {
+ tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
+ tcmu_free_cmd(cmd);
+ } else {
+ /*
+ * Keep this command after completion, since userspace still
+ * needs the data buffer. Mark it with TCMU_CMD_BIT_KEEP_BUF
+ * and reset potential TCMU_CMD_BIT_EXPIRED, so we don't accept
+ * a second completion later.
+ * Userspace can free the buffer later by writing the cmd_id
+ * to new action attribute free_kept_buf.
+ */
+ clear_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
+ set_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags);
+ }
+ return ret;
+}
+
+static int tcmu_run_tmr_queue(struct tcmu_dev *udev)
+{
+ struct tcmu_tmr *tmr, *tmp;
+ LIST_HEAD(tmrs);
+
+ if (list_empty(&udev->tmr_queue))
+ return 1;
+
+ pr_debug("running %s's tmr queue\n", udev->name);
+
+ list_splice_init(&udev->tmr_queue, &tmrs);
+
+ list_for_each_entry_safe(tmr, tmp, &tmrs, queue_entry) {
+ list_del_init(&tmr->queue_entry);
+
+ pr_debug("removing tmr %p on dev %s from queue\n",
+ tmr, udev->name);
+
+ if (queue_tmr_ring(udev, tmr)) {
+ pr_debug("ran out of space during tmr queue run\n");
+ /*
+ * tmr was requeued, so just put all tmrs back in
+ * the queue
+ */
+ list_splice_tail(&tmrs, &udev->tmr_queue);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static bool tcmu_handle_completions(struct tcmu_dev *udev)
+{
+ struct tcmu_mailbox *mb;
+ struct tcmu_cmd *cmd;
+ bool free_space = false;
+
+ if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
+ pr_err("ring broken, not handling completions\n");
+ return false;
+ }
+
+ mb = udev->mb_addr;
+ tcmu_flush_dcache_range(mb, sizeof(*mb));
+
+ while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
+
+ struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned;
+ bool keep_buf;
+
+ /*
+ * Flush max. up to end of cmd ring since current entry might
+ * be a padding that is shorter than sizeof(*entry)
+ */
+ size_t ring_left = head_to_end(udev->cmdr_last_cleaned,
+ udev->cmdr_size);
+ tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ?
+ ring_left : sizeof(*entry));
+
+ free_space = true;
+
+ if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD ||
+ tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_TMR) {
+ UPDATE_HEAD(udev->cmdr_last_cleaned,
+ tcmu_hdr_get_len(entry->hdr.len_op),
+ udev->cmdr_size);
+ continue;
+ }
+ WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
+
+ keep_buf = !!(entry->hdr.uflags & TCMU_UFLAG_KEEP_BUF);
+ if (keep_buf)
+ cmd = xa_load(&udev->commands, entry->hdr.cmd_id);
+ else
+ cmd = xa_erase(&udev->commands, entry->hdr.cmd_id);
+ if (!cmd) {
+ pr_err("cmd_id %u not found, ring is broken\n",
+ entry->hdr.cmd_id);
+ set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
+ return false;
+ }
+
+ if (!tcmu_handle_completion(cmd, entry, keep_buf))
+ break;
+
+ UPDATE_HEAD(udev->cmdr_last_cleaned,
+ tcmu_hdr_get_len(entry->hdr.len_op),
+ udev->cmdr_size);
+ }
+ if (free_space)
+ free_space = tcmu_run_tmr_queue(udev);
+
+ if (atomic_read(&global_page_count) > tcmu_global_max_pages &&
+ xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) {
+ /*
+ * Allocated blocks exceeded global block limit, currently no
+ * more pending or waiting commands so try to reclaim blocks.
+ */
+ schedule_delayed_work(&tcmu_unmap_work, 0);
+ }
+ if (udev->cmd_time_out)
+ tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
+
+ return free_space;
+}
+
+static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
+{
+ struct se_cmd *se_cmd;
+
+ if (!time_after_eq(jiffies, cmd->deadline))
+ return;
+
+ set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
+ list_del_init(&cmd->queue_entry);
+ se_cmd = cmd->se_cmd;
+ se_cmd->priv = NULL;
+ cmd->se_cmd = NULL;
+
+ pr_debug("Timing out inflight cmd %u on dev %s.\n",
+ cmd->cmd_id, cmd->tcmu_dev->name);
+
+ target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION);
+}
+
+static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
+{
+ struct se_cmd *se_cmd;
+
+ if (!time_after_eq(jiffies, cmd->deadline))
+ return;
+
+ pr_debug("Timing out queued cmd %p on dev %s.\n",
+ cmd, cmd->tcmu_dev->name);
+
+ list_del_init(&cmd->queue_entry);
+ se_cmd = cmd->se_cmd;
+ tcmu_free_cmd(cmd);
+
+ se_cmd->priv = NULL;
+ target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
+}
+
+static void tcmu_device_timedout(struct tcmu_dev *udev)
+{
+ spin_lock(&timed_out_udevs_lock);
+ if (list_empty(&udev->timedout_entry))
+ list_add_tail(&udev->timedout_entry, &timed_out_udevs);
+ spin_unlock(&timed_out_udevs_lock);
+
+ schedule_delayed_work(&tcmu_unmap_work, 0);
+}
+
+static void tcmu_cmd_timedout(struct timer_list *t)
+{
+ struct tcmu_dev *udev = timer_container_of(udev, t, cmd_timer);
+
+ pr_debug("%s cmd timeout has expired\n", udev->name);
+ tcmu_device_timedout(udev);
+}
+
+static void tcmu_qfull_timedout(struct timer_list *t)
+{
+ struct tcmu_dev *udev = timer_container_of(udev, t, qfull_timer);
+
+ pr_debug("%s qfull timeout has expired\n", udev->name);
+ tcmu_device_timedout(udev);
+}
+
+static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
+{
+ struct tcmu_hba *tcmu_hba;
+
+ tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
+ if (!tcmu_hba)
+ return -ENOMEM;
+
+ tcmu_hba->host_id = host_id;
+ hba->hba_ptr = tcmu_hba;
+
+ return 0;
+}
+
+static void tcmu_detach_hba(struct se_hba *hba)
+{
+ kfree(hba->hba_ptr);
+ hba->hba_ptr = NULL;
+}
+
+static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
+{
+ struct tcmu_dev *udev;
+
+ udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
+ if (!udev)
+ return NULL;
+ kref_init(&udev->kref);
+
+ udev->name = kstrdup(name, GFP_KERNEL);
+ if (!udev->name) {
+ kfree(udev);
+ return NULL;
+ }
+
+ udev->hba = hba;
+ udev->cmd_time_out = TCMU_TIME_OUT;
+ udev->qfull_time_out = -1;
+
+ udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF;
+ udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk;
+ udev->cmdr_size = CMDR_SIZE_DEF;
+ udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF);
+
+ mutex_init(&udev->cmdr_lock);
+
+ INIT_LIST_HEAD(&udev->node);
+ INIT_LIST_HEAD(&udev->timedout_entry);
+ INIT_LIST_HEAD(&udev->qfull_queue);
+ INIT_LIST_HEAD(&udev->tmr_queue);
+ INIT_LIST_HEAD(&udev->inflight_queue);
+ xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1);
+
+ timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
+ timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
+
+ xa_init(&udev->data_pages);
+
+ return &udev->se_dev;
+}
+
+static void tcmu_dev_call_rcu(struct rcu_head *p)
+{
+ struct se_device *dev = container_of(p, struct se_device, rcu_head);
+ struct tcmu_dev *udev = TCMU_DEV(dev);
+
+ kfree(udev->uio_info.name);
+ kfree(udev->name);
+ kfree(udev);
+}
+
+static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
+{
+ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ||
+ test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
+ kmem_cache_free(tcmu_cmd_cache, cmd);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static u32 tcmu_blocks_release(struct tcmu_dev *udev, unsigned long first,
+ unsigned long last)
+{
+ struct page *page;
+ unsigned long dpi;
+ u32 pages_freed = 0;
+
+ first = first * udev->data_pages_per_blk;
+ last = (last + 1) * udev->data_pages_per_blk - 1;
+ xa_for_each_range(&udev->data_pages, dpi, page, first, last) {
+ xa_erase(&udev->data_pages, dpi);
+ /*
+ * While reaching here there may be page faults occurring on
+ * the to-be-released pages. A race condition may occur if
+ * unmap_mapping_range() is called before page faults on these
+ * pages have completed; a valid but stale map is created.
+ *
+ * If another command subsequently runs and needs to extend
+ * dbi_thresh, it may reuse the slot corresponding to the
+ * previous page in data_bitmap. Though we will allocate a new
+ * page for the slot in data_area, no page fault will happen
+ * because we have a valid map. Therefore the command's data
+ * will be lost.
+ *
+ * We lock and unlock pages that are to be released to ensure
+ * all page faults have completed. This way
+ * unmap_mapping_range() can ensure stale maps are cleanly
+ * removed.
+ */
+ lock_page(page);
+ unlock_page(page);
+ __free_page(page);
+ pages_freed++;
+ }
+
+ atomic_sub(pages_freed, &global_page_count);
+
+ return pages_freed;
+}
+
+static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev)
+{
+ struct tcmu_tmr *tmr, *tmp;
+
+ list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) {
+ list_del_init(&tmr->queue_entry);
+ kfree(tmr);
+ }
+}
+
+static void tcmu_dev_kref_release(struct kref *kref)
+{
+ struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
+ struct se_device *dev = &udev->se_dev;
+ struct tcmu_cmd *cmd;
+ bool all_expired = true;
+ unsigned long i;
+
+ vfree(udev->mb_addr);
+ udev->mb_addr = NULL;
+
+ spin_lock_bh(&timed_out_udevs_lock);
+ if (!list_empty(&udev->timedout_entry))
+ list_del(&udev->timedout_entry);
+ spin_unlock_bh(&timed_out_udevs_lock);
+
+ /* Upper layer should drain all requests before calling this */
+ mutex_lock(&udev->cmdr_lock);
+ xa_for_each(&udev->commands, i, cmd) {
+ if (tcmu_check_and_free_pending_cmd(cmd) != 0)
+ all_expired = false;
+ }
+ /* There can be left over TMR cmds. Remove them. */
+ tcmu_remove_all_queued_tmr(udev);
+ if (!list_empty(&udev->qfull_queue))
+ all_expired = false;
+ xa_destroy(&udev->commands);
+ WARN_ON(!all_expired);
+
+ tcmu_blocks_release(udev, 0, udev->dbi_max);
+ bitmap_free(udev->data_bitmap);
+ mutex_unlock(&udev->cmdr_lock);
+
+ pr_debug("dev_kref_release\n");
+
+ call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
+}
+
+static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
+{
+ struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
+ LIST_HEAD(cmds);
+ sense_reason_t scsi_ret;
+ int ret;
+
+ if (list_empty(&udev->qfull_queue))
+ return;
+
+ pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
+
+ list_splice_init(&udev->qfull_queue, &cmds);
+
+ list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
+ list_del_init(&tcmu_cmd->queue_entry);
+
+ pr_debug("removing cmd %p on dev %s from queue\n",
+ tcmu_cmd, udev->name);
+
+ if (fail) {
+ /*
+ * We were not able to even start the command, so
+ * fail with busy to allow a retry in case runner
+ * was only temporarily down. If the device is being
+ * removed then LIO core will do the right thing and
+ * fail the retry.
+ */
+ tcmu_cmd->se_cmd->priv = NULL;
+ target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
+ tcmu_free_cmd(tcmu_cmd);
+ continue;
+ }
+
+ ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
+ if (ret < 0) {
+ pr_debug("cmd %p on dev %s failed with %u\n",
+ tcmu_cmd, udev->name, scsi_ret);
+ /*
+ * Ignore scsi_ret for now. target_complete_cmd
+ * drops it.
+ */
+ tcmu_cmd->se_cmd->priv = NULL;
+ target_complete_cmd(tcmu_cmd->se_cmd,
+ SAM_STAT_CHECK_CONDITION);
+ tcmu_free_cmd(tcmu_cmd);
+ } else if (ret > 0) {
+ pr_debug("ran out of space during cmdr queue run\n");
+ /*
+ * cmd was requeued, so just put all cmds back in
+ * the queue
+ */
+ list_splice_tail(&cmds, &udev->qfull_queue);
+ break;
+ }
+ }
+
+ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
+}
+
+static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
+{
+ struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
+
+ mutex_lock(&udev->cmdr_lock);
+ if (tcmu_handle_completions(udev))
+ run_qfull_queue(udev, false);
+ mutex_unlock(&udev->cmdr_lock);
+
+ return 0;
+}
+
+/*
+ * mmap code from uio.c. Copied here because we want to hook mmap()
+ * and this stuff must come along.
+ */
+static int tcmu_find_mem_index(struct vm_area_struct *vma)
+{
+ struct tcmu_dev *udev = vma->vm_private_data;
+ struct uio_info *info = &udev->uio_info;
+
+ if (vma->vm_pgoff < MAX_UIO_MAPS) {
+ if (info->mem[vma->vm_pgoff].size == 0)
+ return -1;
+ return (int)vma->vm_pgoff;
+ }
+ return -1;
+}
+
+static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi)
+{
+ struct page *page;
+
+ mutex_lock(&udev->cmdr_lock);
+ page = xa_load(&udev->data_pages, dpi);
+ if (likely(page)) {
+ get_page(page);
+ lock_page(page);
+ mutex_unlock(&udev->cmdr_lock);
+ return page;
+ }
+
+ /*
+ * Userspace messed up and passed in a address not in the
+ * data iov passed to it.
+ */
+ pr_err("Invalid addr to data page mapping (dpi %u) on device %s\n",
+ dpi, udev->name);
+ mutex_unlock(&udev->cmdr_lock);
+
+ return NULL;
+}
+
+static void tcmu_vma_open(struct vm_area_struct *vma)
+{
+ struct tcmu_dev *udev = vma->vm_private_data;
+
+ pr_debug("vma_open\n");
+
+ kref_get(&udev->kref);
+}
+
+static void tcmu_vma_close(struct vm_area_struct *vma)
+{
+ struct tcmu_dev *udev = vma->vm_private_data;
+
+ pr_debug("vma_close\n");
+
+ /* release ref from tcmu_vma_open */
+ kref_put(&udev->kref, tcmu_dev_kref_release);
+}
+
+static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
+{
+ struct tcmu_dev *udev = vmf->vma->vm_private_data;
+ struct uio_info *info = &udev->uio_info;
+ struct page *page;
+ unsigned long offset;
+ void *addr;
+ vm_fault_t ret = 0;
+
+ int mi = tcmu_find_mem_index(vmf->vma);
+ if (mi < 0)
+ return VM_FAULT_SIGBUS;
+
+ /*
+ * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
+ * to use mem[N].
+ */
+ offset = (vmf->pgoff - mi) << PAGE_SHIFT;
+
+ if (offset < udev->data_off) {
+ /* For the vmalloc()ed cmd area pages */
+ addr = (void *)(unsigned long)info->mem[mi].addr + offset;
+ page = vmalloc_to_page(addr);
+ get_page(page);
+ } else {
+ uint32_t dpi;
+
+ /* For the dynamically growing data area pages */
+ dpi = (offset - udev->data_off) / PAGE_SIZE;
+ page = tcmu_try_get_data_page(udev, dpi);
+ if (!page)
+ return VM_FAULT_SIGBUS;
+ ret = VM_FAULT_LOCKED;
+ }
+
+ vmf->page = page;
+ return ret;
+}
+
+static const struct vm_operations_struct tcmu_vm_ops = {
+ .open = tcmu_vma_open,
+ .close = tcmu_vma_close,
+ .fault = tcmu_vma_fault,
+};
+
+static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
+{
+ struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
+
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
+ vma->vm_ops = &tcmu_vm_ops;
+
+ vma->vm_private_data = udev;
+
+ /* Ensure the mmap is exactly the right size */
+ if (vma_pages(vma) != udev->mmap_pages)
+ return -EINVAL;
+
+ tcmu_vma_open(vma);
+
+ return 0;
+}
+
+static int tcmu_open(struct uio_info *info, struct inode *inode)
+{
+ struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
+
+ /* O_EXCL not supported for char devs, so fake it? */
+ if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
+ return -EBUSY;
+
+ udev->inode = inode;
+
+ pr_debug("open\n");
+
+ return 0;
+}
+
+static int tcmu_release(struct uio_info *info, struct inode *inode)
+{
+ struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
+ struct tcmu_cmd *cmd;
+ unsigned long i;
+ bool freed = false;
+
+ mutex_lock(&udev->cmdr_lock);
+
+ xa_for_each(&udev->commands, i, cmd) {
+ /* Cmds with KEEP_BUF set are no longer on the ring, but
+ * userspace still holds the data buffer. If userspace closes
+ * we implicitly free these cmds and buffers, since after new
+ * open the (new ?) userspace cannot find the cmd in the ring
+ * and thus never will release the buffer by writing cmd_id to
+ * free_kept_buf action attribute.
+ */
+ if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags))
+ continue;
+ pr_debug("removing KEEP_BUF cmd %u on dev %s from ring\n",
+ cmd->cmd_id, udev->name);
+ freed = true;
+
+ xa_erase(&udev->commands, i);
+ tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
+ tcmu_free_cmd(cmd);
+ }
+ /*
+ * We only freed data space, not ring space. Therefore we dont call
+ * run_tmr_queue, but call run_qfull_queue if tmr_list is empty.
+ */
+ if (freed && list_empty(&udev->tmr_queue))
+ run_qfull_queue(udev, false);
+
+ mutex_unlock(&udev->cmdr_lock);
+
+ clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
+
+ pr_debug("close\n");
+
+ return 0;
+}
+
+static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
+{
+ struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
+
+ if (!tcmu_kern_cmd_reply_supported)
+ return 0;
+
+ if (udev->nl_reply_supported <= 0)
+ return 0;
+
+ mutex_lock(&tcmu_nl_cmd_mutex);
+
+ if (tcmu_netlink_blocked) {
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd,
+ udev->name);
+ return -EAGAIN;
+ }
+
+ if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ pr_warn("netlink cmd %d already executing on %s\n",
+ nl_cmd->cmd, udev->name);
+ return -EBUSY;
+ }
+
+ memset(nl_cmd, 0, sizeof(*nl_cmd));
+ nl_cmd->cmd = cmd;
+ nl_cmd->udev = udev;
+ init_completion(&nl_cmd->complete);
+ INIT_LIST_HEAD(&nl_cmd->nl_list);
+
+ list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list);
+
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+ return 0;
+}
+
+static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev)
+{
+ struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
+
+ if (!tcmu_kern_cmd_reply_supported)
+ return;
+
+ if (udev->nl_reply_supported <= 0)
+ return;
+
+ mutex_lock(&tcmu_nl_cmd_mutex);
+
+ list_del(&nl_cmd->nl_list);
+ memset(nl_cmd, 0, sizeof(*nl_cmd));
+
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+}
+
+static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
+{
+ struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
+ int ret;
+
+ if (!tcmu_kern_cmd_reply_supported)
+ return 0;
+
+ if (udev->nl_reply_supported <= 0)
+ return 0;
+
+ pr_debug("sleeping for nl reply\n");
+ wait_for_completion(&nl_cmd->complete);
+
+ mutex_lock(&tcmu_nl_cmd_mutex);
+ nl_cmd->cmd = TCMU_CMD_UNSPEC;
+ ret = nl_cmd->status;
+ mutex_unlock(&tcmu_nl_cmd_mutex);
+
+ return ret;
+}
+
+static int tcmu_netlink_event_init(struct tcmu_dev *udev,
+ enum tcmu_genl_cmd cmd,
+ struct sk_buff **buf, void **hdr)
+{
+ struct sk_buff *skb;
+ void *msg_header;
+ int ret = -ENOMEM;
+
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return ret;
+
+ msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
+ if (!msg_header)
+ goto free_skb;
+
+ ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
+ if (ret < 0)
+ goto free_skb;
+
+ ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
+ if (ret < 0)
+ goto free_skb;
+
+ ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
+ if (ret < 0)
+ goto free_skb;
+
+ *buf = skb;
+ *hdr = msg_header;
+ return ret;
+
+free_skb:
+ nlmsg_free(skb);
+ return ret;
+}
+
+static int tcmu_netlink_event_send(struct tcmu_dev *udev,
+ enum tcmu_genl_cmd cmd,
+ struct sk_buff *skb, void *msg_header)
+{
+ int ret;
+
+ genlmsg_end(skb, msg_header);
+
+ ret = tcmu_init_genl_cmd_reply(udev, cmd);
+ if (ret) {
+ nlmsg_free(skb);
+ return ret;
+ }
+
+ ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
+ TCMU_MCGRP_CONFIG);
+
+ /* Wait during an add as the listener may not be up yet */
+ if (ret == 0 ||
+ (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE))
+ return tcmu_wait_genl_cmd_reply(udev);
+ else
+ tcmu_destroy_genl_cmd_reply(udev);
+
+ return ret;
+}
+
+static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
+{
+ struct sk_buff *skb = NULL;
+ void *msg_header = NULL;
+ int ret = 0;
+
+ ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb,
+ &msg_header);
+ if (ret < 0)
+ return ret;
+ return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb,
+ msg_header);
+}
+
+static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
+{
+ struct sk_buff *skb = NULL;
+ void *msg_header = NULL;
+ int ret = 0;
+
+ ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE,
+ &skb, &msg_header);
+ if (ret < 0)
+ return ret;
+ return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
+ skb, msg_header);
+}
+
+static int tcmu_update_uio_info(struct tcmu_dev *udev)
+{
+ struct tcmu_hba *hba = udev->hba->hba_ptr;
+ struct uio_info *info;
+ char *str;
+
+ info = &udev->uio_info;
+
+ if (udev->dev_config[0])
+ str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id,
+ udev->name, udev->dev_config);
+ else
+ str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id,
+ udev->name);
+ if (!str)
+ return -ENOMEM;
+
+ /* If the old string exists, free it */
+ kfree(info->name);
+ info->name = str;
+
+ return 0;
+}
+
+static int tcmu_configure_device(struct se_device *dev)
+{
+ struct tcmu_dev *udev = TCMU_DEV(dev);
+ struct uio_info *info;
+ struct tcmu_mailbox *mb;
+ size_t data_size;
+ int ret = 0;
+
+ ret = tcmu_update_uio_info(udev);
+ if (ret)
+ return ret;
+
+ info = &udev->uio_info;
+
+ mutex_lock(&udev->cmdr_lock);
+ udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL);
+ mutex_unlock(&udev->cmdr_lock);
+ if (!udev->data_bitmap) {
+ ret = -ENOMEM;
+ goto err_bitmap_alloc;
+ }
+
+ mb = vzalloc(udev->cmdr_size + CMDR_OFF);
+ if (!mb) {
+ ret = -ENOMEM;
+ goto err_vzalloc;
+ }
+
+ /* mailbox fits in first part of CMDR space */
+ udev->mb_addr = mb;
+ udev->cmdr = (void *)mb + CMDR_OFF;
+ udev->data_off = udev->cmdr_size + CMDR_OFF;
+ data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT;
+ udev->mmap_pages = (data_size + udev->cmdr_size + CMDR_OFF) >> PAGE_SHIFT;
+ udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE;
+ udev->dbi_thresh = 0; /* Default in Idle state */
+
+ /* Initialise the mailbox of the ring buffer */
+ mb->version = TCMU_MAILBOX_VERSION;
+ mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC |
+ TCMU_MAILBOX_FLAG_CAP_READ_LEN |
+ TCMU_MAILBOX_FLAG_CAP_TMR |
+ TCMU_MAILBOX_FLAG_CAP_KEEP_BUF;
+ mb->cmdr_off = CMDR_OFF;
+ mb->cmdr_size = udev->cmdr_size;
+
+ WARN_ON(!PAGE_ALIGNED(udev->data_off));
+ WARN_ON(data_size % PAGE_SIZE);
+
+ info->version = __stringify(TCMU_MAILBOX_VERSION);
+
+ info->mem[0].name = "tcm-user command & data buffer";
+ info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
+ info->mem[0].size = data_size + udev->cmdr_size + CMDR_OFF;
+ info->mem[0].memtype = UIO_MEM_NONE;
+
+ info->irqcontrol = tcmu_irqcontrol;
+ info->irq = UIO_IRQ_CUSTOM;
+
+ info->mmap = tcmu_mmap;
+ info->open = tcmu_open;
+ info->release = tcmu_release;
+
+ ret = uio_register_device(tcmu_root_device, info);
+ if (ret)
+ goto err_register;
+
+ /* User can set hw_block_size before enable the device */
+ if (dev->dev_attrib.hw_block_size == 0)
+ dev->dev_attrib.hw_block_size = 512;
+ /* Other attributes can be configured in userspace */
+ if (!dev->dev_attrib.hw_max_sectors)
+ dev->dev_attrib.hw_max_sectors = 128;
+ if (!dev->dev_attrib.emulate_write_cache)
+ dev->dev_attrib.emulate_write_cache = 0;
+ dev->dev_attrib.hw_queue_depth = 128;
+
+ /* If user didn't explicitly disable netlink reply support, use
+ * module scope setting.
+ */
+ if (udev->nl_reply_supported >= 0)
+ udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
+
+ /*
+ * Get a ref incase userspace does a close on the uio device before
+ * LIO has initiated tcmu_free_device.
+ */
+ kref_get(&udev->kref);
+
+ ret = tcmu_send_dev_add_event(udev);
+ if (ret)
+ goto err_netlink;
+
+ mutex_lock(&root_udev_mutex);
+ list_add(&udev->node, &root_udev);
+ mutex_unlock(&root_udev_mutex);
+
+ return 0;
+
+err_netlink:
+ kref_put(&udev->kref, tcmu_dev_kref_release);
+ uio_unregister_device(&udev->uio_info);
+err_register:
+ vfree(udev->mb_addr);
+ udev->mb_addr = NULL;
+err_vzalloc:
+ bitmap_free(udev->data_bitmap);
+ udev->data_bitmap = NULL;
+err_bitmap_alloc:
+ kfree(info->name);
+ info->name = NULL;
+
+ return ret;
+}
+
+static void tcmu_free_device(struct se_device *dev)
+{
+ struct tcmu_dev *udev = TCMU_DEV(dev);
+
+ /* release ref from init */
+ kref_put(&udev->kref, tcmu_dev_kref_release);
+}
+
+static void tcmu_destroy_device(struct se_device *dev)
+{
+ struct tcmu_dev *udev = TCMU_DEV(dev);
+
+ timer_delete_sync(&udev->cmd_timer);
+ timer_delete_sync(&udev->qfull_timer);
+
+ mutex_lock(&root_udev_mutex);
+ list_del(&udev->node);
+ mutex_unlock(&root_udev_mutex);
+
+ tcmu_send_dev_remove_event(udev);
+
+ uio_unregister_device(&udev->uio_info);
+
+ /* release ref from configure */
+ kref_put(&udev->kref, tcmu_dev_kref_release);
+}
+
+static void tcmu_unblock_dev(struct tcmu_dev *udev)
+{
+ mutex_lock(&udev->cmdr_lock);
+ clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags);
+ mutex_unlock(&udev->cmdr_lock);
+}
+
+static void tcmu_block_dev(struct tcmu_dev *udev)
+{
+ mutex_lock(&udev->cmdr_lock);
+
+ if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
+ goto unlock;
+
+ /* complete IO that has executed successfully */
+ tcmu_handle_completions(udev);
+ /* fail IO waiting to be queued */
+ run_qfull_queue(udev, true);
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+}
+
+static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
+{
+ struct tcmu_mailbox *mb;
+ struct tcmu_cmd *cmd;
+ unsigned long i;
+
+ mutex_lock(&udev->cmdr_lock);
+
+ xa_for_each(&udev->commands, i, cmd) {
+ pr_debug("removing cmd %u on dev %s from ring %s\n",
+ cmd->cmd_id, udev->name,
+ test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ?
+ "(is expired)" :
+ (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags) ?
+ "(is keep buffer)" : ""));
+
+ xa_erase(&udev->commands, i);
+ if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) &&
+ !test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
+ WARN_ON(!cmd->se_cmd);
+ list_del_init(&cmd->queue_entry);
+ cmd->se_cmd->priv = NULL;
+ if (err_level == 1) {
+ /*
+ * Userspace was not able to start the
+ * command or it is retryable.
+ */
+ target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY);
+ } else {
+ /* hard failure */
+ target_complete_cmd(cmd->se_cmd,
+ SAM_STAT_CHECK_CONDITION);
+ }
+ }
+ tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
+ tcmu_free_cmd(cmd);
+ }
+
+ mb = udev->mb_addr;
+ tcmu_flush_dcache_range(mb, sizeof(*mb));
+ pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned,
+ mb->cmd_tail, mb->cmd_head);
+
+ udev->cmdr_last_cleaned = 0;
+ mb->cmd_tail = 0;
+ mb->cmd_head = 0;
+ tcmu_flush_dcache_range(mb, sizeof(*mb));
+ clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
+
+ timer_delete(&udev->cmd_timer);
+
+ /*
+ * ring is empty and qfull queue never contains aborted commands.
+ * So TMRs in tmr queue do not contain relevant cmd_ids.
+ * After a ring reset userspace should do a fresh start, so
+ * even LUN RESET message is no longer relevant.
+ * Therefore remove all TMRs from qfull queue
+ */
+ tcmu_remove_all_queued_tmr(udev);
+
+ run_qfull_queue(udev, false);
+
+ mutex_unlock(&udev->cmdr_lock);
+}
+
+enum {
+ Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
+ Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_data_pages_per_blk,
+ Opt_cmd_ring_size_mb, Opt_err,
+};
+
+static const match_table_t tokens = {
+ {Opt_dev_config, "dev_config=%s"},
+ {Opt_dev_size, "dev_size=%s"},
+ {Opt_hw_block_size, "hw_block_size=%d"},
+ {Opt_hw_max_sectors, "hw_max_sectors=%d"},
+ {Opt_nl_reply_supported, "nl_reply_supported=%d"},
+ {Opt_max_data_area_mb, "max_data_area_mb=%d"},
+ {Opt_data_pages_per_blk, "data_pages_per_blk=%d"},
+ {Opt_cmd_ring_size_mb, "cmd_ring_size_mb=%d"},
+ {Opt_err, NULL}
+};
+
+static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
+{
+ int val, ret;
+
+ ret = match_int(arg, &val);
+ if (ret < 0) {
+ pr_err("match_int() failed for dev attrib. Error %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (val <= 0) {
+ pr_err("Invalid dev attrib value %d. Must be greater than zero.\n",
+ val);
+ return -EINVAL;
+ }
+ *dev_attrib = val;
+ return 0;
+}
+
+static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
+{
+ int val, ret;
+ uint32_t pages_per_blk = udev->data_pages_per_blk;
+
+ ret = match_int(arg, &val);
+ if (ret < 0) {
+ pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
+ ret);
+ return ret;
+ }
+ if (val <= 0) {
+ pr_err("Invalid max_data_area %d.\n", val);
+ return -EINVAL;
+ }
+ if (val > TCMU_PAGES_TO_MBS(tcmu_global_max_pages)) {
+ pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
+ val, TCMU_PAGES_TO_MBS(tcmu_global_max_pages));
+ val = TCMU_PAGES_TO_MBS(tcmu_global_max_pages);
+ }
+ if (TCMU_MBS_TO_PAGES(val) < pages_per_blk) {
+ pr_err("Invalid max_data_area %d (%zu pages): smaller than data_pages_per_blk (%u pages).\n",
+ val, TCMU_MBS_TO_PAGES(val), pages_per_blk);
+ return -EINVAL;
+ }
+
+ mutex_lock(&udev->cmdr_lock);
+ if (udev->data_bitmap) {
+ pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ udev->data_area_mb = val;
+ udev->max_blocks = TCMU_MBS_TO_PAGES(val) / pages_per_blk;
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return ret;
+}
+
+static int tcmu_set_data_pages_per_blk(struct tcmu_dev *udev, substring_t *arg)
+{
+ int val, ret;
+
+ ret = match_int(arg, &val);
+ if (ret < 0) {
+ pr_err("match_int() failed for data_pages_per_blk=. Error %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (val > TCMU_MBS_TO_PAGES(udev->data_area_mb)) {
+ pr_err("Invalid data_pages_per_blk %d: greater than max_data_area_mb %d -> %zd pages).\n",
+ val, udev->data_area_mb,
+ TCMU_MBS_TO_PAGES(udev->data_area_mb));
+ return -EINVAL;
+ }
+
+ mutex_lock(&udev->cmdr_lock);
+ if (udev->data_bitmap) {
+ pr_err("Cannot set data_pages_per_blk after it has been enabled.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ udev->data_pages_per_blk = val;
+ udev->max_blocks = TCMU_MBS_TO_PAGES(udev->data_area_mb) / val;
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return ret;
+}
+
+static int tcmu_set_cmd_ring_size(struct tcmu_dev *udev, substring_t *arg)
+{
+ int val, ret;
+
+ ret = match_int(arg, &val);
+ if (ret < 0) {
+ pr_err("match_int() failed for cmd_ring_size_mb=. Error %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (val <= 0) {
+ pr_err("Invalid cmd_ring_size_mb %d.\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&udev->cmdr_lock);
+ if (udev->data_bitmap) {
+ pr_err("Cannot set cmd_ring_size_mb after it has been enabled.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ udev->cmdr_size = (val << 20) - CMDR_OFF;
+ if (val > (MB_CMDR_SIZE_DEF >> 20)) {
+ pr_err("%d is too large. Adjusting cmd_ring_size_mb to global limit of %u\n",
+ val, (MB_CMDR_SIZE_DEF >> 20));
+ udev->cmdr_size = CMDR_SIZE_DEF;
+ }
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return ret;
+}
+
+static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
+ const char *page, ssize_t count)
+{
+ struct tcmu_dev *udev = TCMU_DEV(dev);
+ char *orig, *ptr, *opts;
+ substring_t args[MAX_OPT_ARGS];
+ int ret = 0, token;
+
+ opts = kstrdup(page, GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ orig = opts;
+
+ while ((ptr = strsep(&opts, ",\n")) != NULL) {
+ if (!*ptr)
+ continue;
+
+ token = match_token(ptr, tokens, args);
+ switch (token) {
+ case Opt_dev_config:
+ if (match_strlcpy(udev->dev_config, &args[0],
+ TCMU_CONFIG_LEN) == 0) {
+ ret = -EINVAL;
+ break;
+ }
+ pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
+ break;
+ case Opt_dev_size:
+ ret = match_u64(&args[0], &udev->dev_size);
+ if (ret < 0)
+ pr_err("match_u64() failed for dev_size=. Error %d.\n",
+ ret);
+ break;
+ case Opt_hw_block_size:
+ ret = tcmu_set_dev_attrib(&args[0],
+ &(dev->dev_attrib.hw_block_size));
+ break;
+ case Opt_hw_max_sectors:
+ ret = tcmu_set_dev_attrib(&args[0],
+ &(dev->dev_attrib.hw_max_sectors));
+ break;
+ case Opt_nl_reply_supported:
+ ret = match_int(&args[0], &udev->nl_reply_supported);
+ if (ret < 0)
+ pr_err("match_int() failed for nl_reply_supported=. Error %d.\n",
+ ret);
+ break;
+ case Opt_max_data_area_mb:
+ ret = tcmu_set_max_blocks_param(udev, &args[0]);
+ break;
+ case Opt_data_pages_per_blk:
+ ret = tcmu_set_data_pages_per_blk(udev, &args[0]);
+ break;
+ case Opt_cmd_ring_size_mb:
+ ret = tcmu_set_cmd_ring_size(udev, &args[0]);
+ break;
+ default:
+ break;
+ }
+
+ if (ret)
+ break;
+ }
+
+ kfree(orig);
+ return (!ret) ? count : ret;
+}
+
+static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
+{
+ struct tcmu_dev *udev = TCMU_DEV(dev);
+ ssize_t bl = 0;
+
+ bl = sprintf(b + bl, "Config: %s ",
+ udev->dev_config[0] ? udev->dev_config : "NULL");
+ bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
+ bl += sprintf(b + bl, "MaxDataAreaMB: %u ", udev->data_area_mb);
+ bl += sprintf(b + bl, "DataPagesPerBlk: %u ", udev->data_pages_per_blk);
+ bl += sprintf(b + bl, "CmdRingSizeMB: %u\n",
+ (udev->cmdr_size + CMDR_OFF) >> 20);
+
+ return bl;
+}
+
+static sector_t tcmu_get_blocks(struct se_device *dev)
+{
+ struct tcmu_dev *udev = TCMU_DEV(dev);
+
+ return div_u64(udev->dev_size - dev->dev_attrib.block_size,
+ dev->dev_attrib.block_size);
+}
+
+static sense_reason_t
+tcmu_parse_cdb(struct se_cmd *cmd)
+{
+ return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
+}
+
+static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
+}
+
+static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = container_of(da->da_dev,
+ struct tcmu_dev, se_dev);
+ u32 val;
+ int ret;
+
+ if (da->da_dev->export_count) {
+ pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ udev->cmd_time_out = val * MSEC_PER_SEC;
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, cmd_time_out);
+
+static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ?
+ udev->qfull_time_out :
+ udev->qfull_time_out / MSEC_PER_SEC);
+}
+
+static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ s32 val;
+ int ret;
+
+ ret = kstrtos32(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val >= 0) {
+ udev->qfull_time_out = val * MSEC_PER_SEC;
+ } else if (val == -1) {
+ udev->qfull_time_out = val;
+ } else {
+ printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
+ return -EINVAL;
+ }
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, qfull_time_out);
+
+static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", udev->data_area_mb);
+}
+CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb);
+
+static ssize_t tcmu_data_pages_per_blk_show(struct config_item *item,
+ char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%u\n", udev->data_pages_per_blk);
+}
+CONFIGFS_ATTR_RO(tcmu_, data_pages_per_blk);
+
+static ssize_t tcmu_cmd_ring_size_mb_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%u\n",
+ (udev->cmdr_size + CMDR_OFF) >> 20);
+}
+CONFIGFS_ATTR_RO(tcmu_, cmd_ring_size_mb);
+
+static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
+}
+
+static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
+ const char *reconfig_data)
+{
+ struct sk_buff *skb = NULL;
+ void *msg_header = NULL;
+ int ret = 0;
+
+ ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
+ &skb, &msg_header);
+ if (ret < 0)
+ return ret;
+ ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data);
+ if (ret < 0) {
+ nlmsg_free(skb);
+ return ret;
+ }
+ return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
+ skb, msg_header);
+}
+
+
+static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ int ret, len;
+
+ len = strlen(page);
+ if (!len || len > TCMU_CONFIG_LEN - 1)
+ return -EINVAL;
+
+ /* Check if device has been configured before */
+ if (target_dev_configured(&udev->se_dev)) {
+ ret = tcmu_send_dev_config_event(udev, page);
+ if (ret) {
+ pr_err("Unable to reconfigure device\n");
+ return ret;
+ }
+ strscpy(udev->dev_config, page, TCMU_CONFIG_LEN);
+
+ ret = tcmu_update_uio_info(udev);
+ if (ret)
+ return ret;
+ return count;
+ }
+ strscpy(udev->dev_config, page, TCMU_CONFIG_LEN);
+
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, dev_config);
+
+static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
+}
+
+static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
+{
+ struct sk_buff *skb = NULL;
+ void *msg_header = NULL;
+ int ret = 0;
+
+ ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
+ &skb, &msg_header);
+ if (ret < 0)
+ return ret;
+ ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE,
+ size, TCMU_ATTR_PAD);
+ if (ret < 0) {
+ nlmsg_free(skb);
+ return ret;
+ }
+ return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
+ skb, msg_header);
+}
+
+static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ u64 val;
+ int ret;
+
+ ret = kstrtou64(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ /* Check if device has been configured before */
+ if (target_dev_configured(&udev->se_dev)) {
+ ret = tcmu_send_dev_size_event(udev, val);
+ if (ret) {
+ pr_err("Unable to reconfigure device\n");
+ return ret;
+ }
+ }
+ udev->dev_size = val;
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, dev_size);
+
+static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
+ char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
+}
+
+static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ s8 val;
+ int ret;
+
+ ret = kstrtos8(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ udev->nl_reply_supported = val;
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, nl_reply_supported);
+
+static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
+ char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+
+ return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
+}
+
+static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
+{
+ struct sk_buff *skb = NULL;
+ void *msg_header = NULL;
+ int ret = 0;
+
+ ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
+ &skb, &msg_header);
+ if (ret < 0)
+ return ret;
+ ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val);
+ if (ret < 0) {
+ nlmsg_free(skb);
+ return ret;
+ }
+ return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
+ skb, msg_header);
+}
+
+static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ u8 val;
+ int ret;
+
+ ret = kstrtou8(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ /* Check if device has been configured before */
+ if (target_dev_configured(&udev->se_dev)) {
+ ret = tcmu_send_emulate_write_cache(udev, val);
+ if (ret) {
+ pr_err("Unable to reconfigure device\n");
+ return ret;
+ }
+ }
+
+ da->emulate_write_cache = val;
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, emulate_write_cache);
+
+static ssize_t tcmu_tmr_notification_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%i\n",
+ test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags));
+}
+
+static ssize_t tcmu_tmr_notification_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ u8 val;
+ int ret;
+
+ ret = kstrtou8(page, 0, &val);
+ if (ret < 0)
+ return ret;
+ if (val > 1)
+ return -EINVAL;
+
+ if (val)
+ set_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags);
+ else
+ clear_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags);
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, tmr_notification);
+
+static ssize_t tcmu_block_dev_show(struct config_item *item, char *page)
+{
+ struct se_device *se_dev = container_of(to_config_group(item),
+ struct se_device,
+ dev_action_group);
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+
+ if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
+ return snprintf(page, PAGE_SIZE, "%s\n", "blocked");
+ else
+ return snprintf(page, PAGE_SIZE, "%s\n", "unblocked");
+}
+
+static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_device *se_dev = container_of(to_config_group(item),
+ struct se_device,
+ dev_action_group);
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+ u8 val;
+ int ret;
+
+ if (!target_dev_configured(&udev->se_dev)) {
+ pr_err("Device is not configured.\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtou8(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val > 1) {
+ pr_err("Invalid block value %d\n", val);
+ return -EINVAL;
+ }
+
+ if (!val)
+ tcmu_unblock_dev(udev);
+ else
+ tcmu_block_dev(udev);
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, block_dev);
+
+static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_device *se_dev = container_of(to_config_group(item),
+ struct se_device,
+ dev_action_group);
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+ u8 val;
+ int ret;
+
+ if (!target_dev_configured(&udev->se_dev)) {
+ pr_err("Device is not configured.\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtou8(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != 1 && val != 2) {
+ pr_err("Invalid reset ring value %d\n", val);
+ return -EINVAL;
+ }
+
+ tcmu_reset_ring(udev, val);
+ return count;
+}
+CONFIGFS_ATTR_WO(tcmu_, reset_ring);
+
+static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_device *se_dev = container_of(to_config_group(item),
+ struct se_device,
+ dev_action_group);
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+ struct tcmu_cmd *cmd;
+ u16 cmd_id;
+ int ret;
+
+ if (!target_dev_configured(&udev->se_dev)) {
+ pr_err("Device is not configured.\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtou16(page, 0, &cmd_id);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&udev->cmdr_lock);
+
+ {
+ XA_STATE(xas, &udev->commands, cmd_id);
+
+ xas_lock(&xas);
+ cmd = xas_load(&xas);
+ if (!cmd) {
+ pr_err("free_kept_buf: cmd_id %d not found\n", cmd_id);
+ count = -EINVAL;
+ xas_unlock(&xas);
+ goto out_unlock;
+ }
+ if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
+ pr_err("free_kept_buf: cmd_id %d was not completed with KEEP_BUF\n",
+ cmd_id);
+ count = -EINVAL;
+ xas_unlock(&xas);
+ goto out_unlock;
+ }
+ xas_store(&xas, NULL);
+ xas_unlock(&xas);
+ }
+
+ tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
+ tcmu_free_cmd(cmd);
+ /*
+ * We only freed data space, not ring space. Therefore we dont call
+ * run_tmr_queue, but call run_qfull_queue if tmr_list is empty.
+ */
+ if (list_empty(&udev->tmr_queue))
+ run_qfull_queue(udev, false);
+
+out_unlock:
+ mutex_unlock(&udev->cmdr_lock);
+ return count;
+}
+CONFIGFS_ATTR_WO(tcmu_, free_kept_buf);
+
+static struct configfs_attribute *tcmu_attrib_attrs[] = {
+ &tcmu_attr_cmd_time_out,
+ &tcmu_attr_qfull_time_out,
+ &tcmu_attr_max_data_area_mb,
+ &tcmu_attr_data_pages_per_blk,
+ &tcmu_attr_cmd_ring_size_mb,
+ &tcmu_attr_dev_config,
+ &tcmu_attr_dev_size,
+ &tcmu_attr_emulate_write_cache,
+ &tcmu_attr_tmr_notification,
+ &tcmu_attr_nl_reply_supported,
+ NULL,
+};
+
+static struct configfs_attribute **tcmu_attrs;
+
+static struct configfs_attribute *tcmu_action_attrs[] = {
+ &tcmu_attr_block_dev,
+ &tcmu_attr_reset_ring,
+ &tcmu_attr_free_kept_buf,
+ NULL,
+};
+
+static struct target_backend_ops tcmu_ops = {
+ .name = "user",
+ .owner = THIS_MODULE,
+ .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH,
+ .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR |
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA,
+ .attach_hba = tcmu_attach_hba,
+ .detach_hba = tcmu_detach_hba,
+ .alloc_device = tcmu_alloc_device,
+ .configure_device = tcmu_configure_device,
+ .destroy_device = tcmu_destroy_device,
+ .free_device = tcmu_free_device,
+ .unplug_device = tcmu_unplug_device,
+ .plug_device = tcmu_plug_device,
+ .parse_cdb = tcmu_parse_cdb,
+ .tmr_notify = tcmu_tmr_notify,
+ .set_configfs_dev_params = tcmu_set_configfs_dev_params,
+ .show_configfs_dev_params = tcmu_show_configfs_dev_params,
+ .get_device_type = sbc_get_device_type,
+ .get_blocks = tcmu_get_blocks,
+ .tb_dev_action_attrs = tcmu_action_attrs,
+};
+
+static void find_free_blocks(void)
+{
+ struct tcmu_dev *udev;
+ loff_t off;
+ u32 pages_freed, total_pages_freed = 0;
+ u32 start, end, block, total_blocks_freed = 0;
+
+ if (atomic_read(&global_page_count) <= tcmu_global_max_pages)
+ return;
+
+ mutex_lock(&root_udev_mutex);
+ list_for_each_entry(udev, &root_udev, node) {
+ mutex_lock(&udev->cmdr_lock);
+
+ if (!target_dev_configured(&udev->se_dev)) {
+ mutex_unlock(&udev->cmdr_lock);
+ continue;
+ }
+
+ /* Try to complete the finished commands first */
+ if (tcmu_handle_completions(udev))
+ run_qfull_queue(udev, false);
+
+ /* Skip the udevs in idle */
+ if (!udev->dbi_thresh) {
+ mutex_unlock(&udev->cmdr_lock);
+ continue;
+ }
+
+ end = udev->dbi_max + 1;
+ block = find_last_bit(udev->data_bitmap, end);
+ if (block == udev->dbi_max) {
+ /*
+ * The last bit is dbi_max, so it is not possible
+ * reclaim any blocks.
+ */
+ mutex_unlock(&udev->cmdr_lock);
+ continue;
+ } else if (block == end) {
+ /* The current udev will goto idle state */
+ udev->dbi_thresh = start = 0;
+ udev->dbi_max = 0;
+ } else {
+ udev->dbi_thresh = start = block + 1;
+ udev->dbi_max = block;
+ }
+
+ /*
+ * Release the block pages.
+ *
+ * Also note that since tcmu_vma_fault() gets an extra page
+ * refcount, tcmu_blocks_release() won't free pages if pages
+ * are mapped. This means it is safe to call
+ * tcmu_blocks_release() before unmap_mapping_range() which
+ * drops the refcount of any pages it unmaps and thus releases
+ * them.
+ */
+ pages_freed = tcmu_blocks_release(udev, start, end - 1);
+
+ /* Here will truncate the data area from off */
+ off = udev->data_off + (loff_t)start * udev->data_blk_size;
+ unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
+
+ mutex_unlock(&udev->cmdr_lock);
+
+ total_pages_freed += pages_freed;
+ total_blocks_freed += end - start;
+ pr_debug("Freed %u pages (total %u) from %u blocks (total %u) from %s.\n",
+ pages_freed, total_pages_freed, end - start,
+ total_blocks_freed, udev->name);
+ }
+ mutex_unlock(&root_udev_mutex);
+
+ if (atomic_read(&global_page_count) > tcmu_global_max_pages)
+ schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000));
+}
+
+static void check_timedout_devices(void)
+{
+ struct tcmu_dev *udev, *tmp_dev;
+ struct tcmu_cmd *cmd, *tmp_cmd;
+ LIST_HEAD(devs);
+
+ spin_lock_bh(&timed_out_udevs_lock);
+ list_splice_init(&timed_out_udevs, &devs);
+
+ list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) {
+ list_del_init(&udev->timedout_entry);
+ spin_unlock_bh(&timed_out_udevs_lock);
+
+ mutex_lock(&udev->cmdr_lock);
+
+ /*
+ * If cmd_time_out is disabled but qfull is set deadline
+ * will only reflect the qfull timeout. Ignore it.
+ */
+ if (udev->cmd_time_out) {
+ list_for_each_entry_safe(cmd, tmp_cmd,
+ &udev->inflight_queue,
+ queue_entry) {
+ tcmu_check_expired_ring_cmd(cmd);
+ }
+ tcmu_set_next_deadline(&udev->inflight_queue,
+ &udev->cmd_timer);
+ }
+ list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue,
+ queue_entry) {
+ tcmu_check_expired_queue_cmd(cmd);
+ }
+ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
+
+ mutex_unlock(&udev->cmdr_lock);
+
+ spin_lock_bh(&timed_out_udevs_lock);
+ }
+
+ spin_unlock_bh(&timed_out_udevs_lock);
+}
+
+static void tcmu_unmap_work_fn(struct work_struct *work)
+{
+ check_timedout_devices();
+ find_free_blocks();
+}
+
+static int __init tcmu_module_init(void)
+{
+ int ret, i, k, len = 0;
+
+ BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
+
+ INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn);
+
+ tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
+ sizeof(struct tcmu_cmd),
+ __alignof__(struct tcmu_cmd),
+ 0, NULL);
+ if (!tcmu_cmd_cache)
+ return -ENOMEM;
+
+ tcmu_root_device = root_device_register("tcm_user");
+ if (IS_ERR(tcmu_root_device)) {
+ ret = PTR_ERR(tcmu_root_device);
+ goto out_free_cache;
+ }
+
+ ret = genl_register_family(&tcmu_genl_family);
+ if (ret < 0) {
+ goto out_unreg_device;
+ }
+
+ for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
+ len += sizeof(struct configfs_attribute *);
+ for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++)
+ len += sizeof(struct configfs_attribute *);
+ for (i = 0; tcmu_attrib_attrs[i] != NULL; i++)
+ len += sizeof(struct configfs_attribute *);
+ len += sizeof(struct configfs_attribute *);
+
+ tcmu_attrs = kzalloc(len, GFP_KERNEL);
+ if (!tcmu_attrs) {
+ ret = -ENOMEM;
+ goto out_unreg_genl;
+ }
+
+ for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
+ tcmu_attrs[i] = passthrough_attrib_attrs[i];
+ for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++)
+ tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k];
+ for (k = 0; tcmu_attrib_attrs[k] != NULL; k++)
+ tcmu_attrs[i++] = tcmu_attrib_attrs[k];
+ tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
+
+ ret = transport_backend_register(&tcmu_ops);
+ if (ret)
+ goto out_attrs;
+
+ return 0;
+
+out_attrs:
+ kfree(tcmu_attrs);
+out_unreg_genl:
+ genl_unregister_family(&tcmu_genl_family);
+out_unreg_device:
+ root_device_unregister(tcmu_root_device);
+out_free_cache:
+ kmem_cache_destroy(tcmu_cmd_cache);
+
+ return ret;
+}
+
+static void __exit tcmu_module_exit(void)
+{
+ cancel_delayed_work_sync(&tcmu_unmap_work);
+ target_backend_unregister(&tcmu_ops);
+ kfree(tcmu_attrs);
+ genl_unregister_family(&tcmu_genl_family);
+ root_device_unregister(tcmu_root_device);
+ kmem_cache_destroy(tcmu_cmd_cache);
+}
+
+MODULE_DESCRIPTION("TCM USER subsystem plugin");
+MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
+MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
+MODULE_LICENSE("GPL");
+
+module_init(tcmu_module_init);
+module_exit(tcmu_module_exit);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
new file mode 100644
index 000000000000..93534a6e14b7
--- /dev/null
+++ b/drivers/target/target_core_xcopy.c
@@ -0,0 +1,1041 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ * Filename: target_core_xcopy.c
+ *
+ * This file contains support for SPC-4 Extended-Copy offload with generic
+ * TCM backends.
+ *
+ * Copyright (c) 2011-2013 Datera, Inc. All rights reserved.
+ *
+ * Author:
+ * Nicholas A. Bellinger <nab@daterainc.com>
+ *
+ ******************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/configfs.h>
+#include <linux/ratelimit.h>
+#include <scsi/scsi_proto.h>
+#include <linux/unaligned.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_pr.h"
+#include "target_core_ua.h"
+#include "target_core_xcopy.h"
+
+static struct workqueue_struct *xcopy_wq = NULL;
+
+static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop);
+
+/**
+ * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers
+ *
+ * @se_dev: device being considered for match
+ * @dev_wwn: XCOPY requested NAA dev_wwn
+ * @return: 1 on match, 0 on no-match
+ */
+static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
+ const unsigned char *dev_wwn)
+{
+ unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+ int rc;
+
+ if (!se_dev->dev_attrib.emulate_3pc) {
+ pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev);
+ return 0;
+ }
+
+ memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
+ spc_gen_naa_6h_vendor_specific(se_dev, &tmp_dev_wwn[0]);
+
+ rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
+ if (rc != 0) {
+ pr_debug("XCOPY: skip non-matching: %*ph\n",
+ XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn);
+ return 0;
+ }
+ pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
+
+ return 1;
+}
+
+static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
+ const unsigned char *dev_wwn,
+ struct se_device **_found_dev,
+ struct percpu_ref **_found_lun_ref)
+{
+ struct se_dev_entry *deve;
+ struct se_node_acl *nacl;
+ struct se_lun *this_lun = NULL;
+ struct se_device *found_dev = NULL;
+
+ /* cmd with NULL sess indicates no associated $FABRIC_MOD */
+ if (!sess)
+ goto err_out;
+
+ pr_debug("XCOPY 0xe4: searching for: %*ph\n",
+ XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn);
+
+ nacl = sess->se_node_acl;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+ struct se_device *this_dev;
+ int rc;
+
+ this_lun = deve->se_lun;
+ this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
+
+ rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
+ if (rc) {
+ if (percpu_ref_tryget_live(&this_lun->lun_ref))
+ found_dev = this_dev;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (found_dev == NULL)
+ goto err_out;
+
+ pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n",
+ found_dev, &found_dev->dev_group);
+ *_found_dev = found_dev;
+ *_found_lun_ref = &this_lun->lun_ref;
+ return 0;
+err_out:
+ pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+ return -EINVAL;
+}
+
+static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
+ unsigned char *p, unsigned short cscd_index)
+{
+ unsigned char *desc = p;
+ unsigned short ript;
+ u8 desig_len;
+ /*
+ * Extract RELATIVE INITIATOR PORT IDENTIFIER
+ */
+ ript = get_unaligned_be16(&desc[2]);
+ pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript);
+ /*
+ * Check for supported code set, association, and designator type
+ */
+ if ((desc[4] & 0x0f) != 0x1) {
+ pr_err("XCOPY 0xe4: code set of non binary type not supported\n");
+ return -EINVAL;
+ }
+ if ((desc[5] & 0x30) != 0x00) {
+ pr_err("XCOPY 0xe4: association other than LUN not supported\n");
+ return -EINVAL;
+ }
+ if ((desc[5] & 0x0f) != 0x3) {
+ pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n",
+ (desc[5] & 0x0f));
+ return -EINVAL;
+ }
+ /*
+ * Check for matching 16 byte length for NAA IEEE Registered Extended
+ * Assigned designator
+ */
+ desig_len = desc[7];
+ if (desig_len != XCOPY_NAA_IEEE_REGEX_LEN) {
+ pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len);
+ return -EINVAL;
+ }
+ pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len);
+ /*
+ * Check for NAA IEEE Registered Extended Assigned header..
+ */
+ if ((desc[8] & 0xf0) != 0x60) {
+ pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n",
+ (desc[8] & 0xf0));
+ return -EINVAL;
+ }
+
+ if (cscd_index != xop->stdi && cscd_index != xop->dtdi) {
+ pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor "
+ "dest\n", cscd_index);
+ return 0;
+ }
+
+ if (cscd_index == xop->stdi) {
+ memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
+ /*
+ * Determine if the source designator matches the local device
+ */
+ if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0],
+ XCOPY_NAA_IEEE_REGEX_LEN)) {
+ xop->op_origin = XCOL_SOURCE_RECV_OP;
+ xop->src_dev = se_cmd->se_dev;
+ pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
+ " received xop\n", xop->src_dev);
+ }
+ }
+
+ if (cscd_index == xop->dtdi) {
+ memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
+ /*
+ * Determine if the destination designator matches the local
+ * device. If @cscd_index corresponds to both source (stdi) and
+ * destination (dtdi), or dtdi comes after stdi, then
+ * XCOL_DEST_RECV_OP wins.
+ */
+ if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
+ XCOPY_NAA_IEEE_REGEX_LEN)) {
+ xop->op_origin = XCOL_DEST_RECV_OP;
+ xop->dst_dev = se_cmd->se_dev;
+ pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination"
+ " received xop\n", xop->dst_dev);
+ }
+ }
+
+ return 0;
+}
+
+static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
+ struct xcopy_op *xop, unsigned char *p,
+ unsigned short tdll, sense_reason_t *sense_ret)
+{
+ struct se_device *local_dev = se_cmd->se_dev;
+ unsigned char *desc = p;
+ int offset = tdll % XCOPY_TARGET_DESC_LEN, rc;
+ unsigned short cscd_index = 0;
+ unsigned short start = 0;
+
+ *sense_ret = TCM_INVALID_PARAMETER_LIST;
+
+ if (offset != 0) {
+ pr_err("XCOPY target descriptor list length is not"
+ " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
+ *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
+ return -EINVAL;
+ }
+ if (tdll > RCR_OP_MAX_TARGET_DESC_COUNT * XCOPY_TARGET_DESC_LEN) {
+ pr_err("XCOPY target descriptor supports a maximum"
+ " two src/dest descriptors, tdll: %hu too large..\n", tdll);
+ /* spc4r37 6.4.3.4 CSCD DESCRIPTOR LIST LENGTH field */
+ *sense_ret = TCM_TOO_MANY_TARGET_DESCS;
+ return -EINVAL;
+ }
+ /*
+ * Generate an IEEE Registered Extended designator based upon the
+ * se_device the XCOPY was received upon..
+ */
+ memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
+ spc_gen_naa_6h_vendor_specific(local_dev, &xop->local_dev_wwn[0]);
+
+ while (start < tdll) {
+ /*
+ * Check target descriptor identification with 0xE4 type, and
+ * compare the current index with the CSCD descriptor IDs in
+ * the segment descriptor. Use VPD 0x83 WWPN matching ..
+ */
+ switch (desc[0]) {
+ case 0xe4:
+ rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
+ &desc[0], cscd_index);
+ if (rc != 0)
+ goto out;
+ start += XCOPY_TARGET_DESC_LEN;
+ desc += XCOPY_TARGET_DESC_LEN;
+ cscd_index++;
+ break;
+ default:
+ pr_err("XCOPY unsupported descriptor type code:"
+ " 0x%02x\n", desc[0]);
+ *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
+ goto out;
+ }
+ }
+
+ switch (xop->op_origin) {
+ case XCOL_SOURCE_RECV_OP:
+ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
+ xop->dst_tid_wwn,
+ &xop->dst_dev,
+ &xop->remote_lun_ref);
+ break;
+ case XCOL_DEST_RECV_OP:
+ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
+ xop->src_tid_wwn,
+ &xop->src_dev,
+ &xop->remote_lun_ref);
+ break;
+ default:
+ pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
+ "stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi);
+ rc = -EINVAL;
+ break;
+ }
+ /*
+ * If a matching IEEE NAA 0x83 descriptor for the requested device
+ * is not located on this node, return COPY_ABORTED with ASQ/ASQC
+ * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the
+ * initiator to fall back to normal copy method.
+ */
+ if (rc < 0) {
+ *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
+ goto out;
+ }
+
+ pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
+ xop->src_dev, &xop->src_tid_wwn[0]);
+ pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
+ xop->dst_dev, &xop->dst_tid_wwn[0]);
+
+ return cscd_index;
+
+out:
+ return -EINVAL;
+}
+
+static int target_xcopy_parse_segdesc_02(struct xcopy_op *xop, unsigned char *p)
+{
+ unsigned char *desc = p;
+ int dc = (desc[1] & 0x02);
+ unsigned short desc_len;
+
+ desc_len = get_unaligned_be16(&desc[2]);
+ if (desc_len != 0x18) {
+ pr_err("XCOPY segment desc 0x02: Illegal desc_len:"
+ " %hu\n", desc_len);
+ return -EINVAL;
+ }
+
+ xop->stdi = get_unaligned_be16(&desc[4]);
+ xop->dtdi = get_unaligned_be16(&desc[6]);
+
+ if (xop->stdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX ||
+ xop->dtdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX) {
+ pr_err("XCOPY segment desc 0x02: unsupported CSCD ID > 0x%x; stdi: %hu dtdi: %hu\n",
+ XCOPY_CSCD_DESC_ID_LIST_OFF_MAX, xop->stdi, xop->dtdi);
+ return -EINVAL;
+ }
+
+ pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
+ desc_len, xop->stdi, xop->dtdi, dc);
+
+ xop->nolb = get_unaligned_be16(&desc[10]);
+ xop->src_lba = get_unaligned_be64(&desc[12]);
+ xop->dst_lba = get_unaligned_be64(&desc[20]);
+ pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n",
+ xop->nolb, (unsigned long long)xop->src_lba,
+ (unsigned long long)xop->dst_lba);
+
+ return 0;
+}
+
+static int target_xcopy_parse_segment_descriptors(struct xcopy_op *xop,
+ unsigned char *p, unsigned int sdll,
+ sense_reason_t *sense_ret)
+{
+ unsigned char *desc = p;
+ unsigned int start = 0;
+ int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
+
+ *sense_ret = TCM_INVALID_PARAMETER_LIST;
+
+ if (offset != 0) {
+ pr_err("XCOPY segment descriptor list length is not"
+ " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
+ *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
+ return -EINVAL;
+ }
+ if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) {
+ pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too"
+ " large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll);
+ /* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */
+ *sense_ret = TCM_TOO_MANY_SEGMENT_DESCS;
+ return -EINVAL;
+ }
+
+ while (start < sdll) {
+ /*
+ * Check segment descriptor type code for block -> block
+ */
+ switch (desc[0]) {
+ case 0x02:
+ rc = target_xcopy_parse_segdesc_02(xop, desc);
+ if (rc < 0)
+ goto out;
+
+ ret++;
+ start += XCOPY_SEGMENT_DESC_LEN;
+ desc += XCOPY_SEGMENT_DESC_LEN;
+ break;
+ default:
+ pr_err("XCOPY unsupported segment descriptor"
+ "type: 0x%02x\n", desc[0]);
+ *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
+ goto out;
+ }
+ }
+
+ return ret;
+
+out:
+ return -EINVAL;
+}
+
+/*
+ * Start xcopy_pt ops
+ */
+
+struct xcopy_pt_cmd {
+ struct se_cmd se_cmd;
+ struct completion xpt_passthrough_sem;
+ unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
+};
+
+struct se_portal_group xcopy_pt_tpg;
+static struct se_session xcopy_pt_sess;
+static struct se_node_acl xcopy_pt_nacl;
+
+static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
+{
+ if (xop->op_origin == XCOL_SOURCE_RECV_OP)
+ pr_debug("putting dst lun_ref for %p\n", xop->dst_dev);
+ else
+ pr_debug("putting src lun_ref for %p\n", xop->src_dev);
+
+ percpu_ref_put(xop->remote_lun_ref);
+}
+
+static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
+{
+ struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
+ struct xcopy_pt_cmd, se_cmd);
+
+ /* xpt_cmd is on the stack, nothing to free here */
+ pr_debug("xpt_cmd done: %p\n", xpt_cmd);
+}
+
+static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd)
+{
+ struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
+ struct xcopy_pt_cmd, se_cmd);
+
+ complete(&xpt_cmd->xpt_passthrough_sem);
+ return 0;
+}
+
+static int xcopy_pt_write_pending(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static const struct target_core_fabric_ops xcopy_pt_tfo = {
+ .fabric_name = "xcopy-pt",
+ .get_cmd_state = xcopy_pt_get_cmd_state,
+ .release_cmd = xcopy_pt_release_cmd,
+ .check_stop_free = xcopy_pt_check_stop_free,
+ .write_pending = xcopy_pt_write_pending,
+ .queue_data_in = xcopy_pt_queue_data_in,
+ .queue_status = xcopy_pt_queue_status,
+};
+
+/*
+ * End xcopy_pt_ops
+ */
+
+int target_xcopy_setup_pt(void)
+{
+ xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
+ if (!xcopy_wq) {
+ pr_err("Unable to allocate xcopy_wq\n");
+ return -ENOMEM;
+ }
+
+ memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group));
+ INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list);
+ INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list);
+
+ xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo;
+
+ memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl));
+ INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
+ INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
+ memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
+ transport_init_session(&xcopy_pt_sess);
+
+ xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
+ xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
+
+ xcopy_pt_sess.se_tpg = &xcopy_pt_tpg;
+ xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
+
+ return 0;
+}
+
+void target_xcopy_release_pt(void)
+{
+ if (xcopy_wq)
+ destroy_workqueue(xcopy_wq);
+}
+
+/*
+ * target_xcopy_setup_pt_cmd - set up a pass-through command
+ * @xpt_cmd: Data structure to initialize.
+ * @xop: Describes the XCOPY operation received from an initiator.
+ * @se_dev: Backend device to associate with @xpt_cmd if
+ * @remote_port == true.
+ * @cdb: SCSI CDB to be copied into @xpt_cmd.
+ * @remote_port: If false, use the LUN through which the XCOPY command has
+ * been received. If true, use @se_dev->xcopy_lun.
+ *
+ * Set up a SCSI command (READ or WRITE) that will be used to execute an
+ * XCOPY command.
+ */
+static int target_xcopy_setup_pt_cmd(
+ struct xcopy_pt_cmd *xpt_cmd,
+ struct xcopy_op *xop,
+ struct se_device *se_dev,
+ unsigned char *cdb,
+ bool remote_port)
+{
+ struct se_cmd *cmd = &xpt_cmd->se_cmd;
+
+ /*
+ * Setup LUN+port to honor reservations based upon xop->op_origin for
+ * X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
+ */
+ if (remote_port) {
+ cmd->se_lun = &se_dev->xcopy_lun;
+ cmd->se_dev = se_dev;
+ } else {
+ cmd->se_lun = xop->xop_se_cmd->se_lun;
+ cmd->se_dev = xop->xop_se_cmd->se_dev;
+ }
+ cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+
+ if (target_cmd_init_cdb(cmd, cdb, GFP_KERNEL))
+ return -EINVAL;
+
+ cmd->tag = 0;
+ if (target_cmd_parse_cdb(cmd))
+ return -EINVAL;
+
+ if (transport_generic_map_mem_to_cmd(cmd, xop->xop_data_sg,
+ xop->xop_data_nents, NULL, 0))
+ return -EINVAL;
+
+ pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
+ " %u\n", cmd->t_data_sg, cmd->t_data_nents);
+
+ return 0;
+}
+
+static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
+{
+ struct se_cmd *se_cmd = &xpt_cmd->se_cmd;
+ sense_reason_t sense_rc;
+
+ sense_rc = transport_generic_new_cmd(se_cmd);
+ if (sense_rc)
+ return -EINVAL;
+
+ if (se_cmd->data_direction == DMA_TO_DEVICE)
+ target_execute_cmd(se_cmd);
+
+ wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem);
+
+ pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n",
+ se_cmd->scsi_status);
+
+ return (se_cmd->scsi_status) ? -EINVAL : 0;
+}
+
+static int target_xcopy_read_source(
+ struct se_cmd *ec_cmd,
+ struct xcopy_op *xop,
+ struct se_device *src_dev,
+ sector_t src_lba,
+ u32 src_bytes)
+{
+ struct xcopy_pt_cmd xpt_cmd;
+ struct se_cmd *se_cmd = &xpt_cmd.se_cmd;
+ u32 transfer_length_block = src_bytes / src_dev->dev_attrib.block_size;
+ int rc;
+ unsigned char cdb[16];
+ bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
+
+ memset(&xpt_cmd, 0, sizeof(xpt_cmd));
+ init_completion(&xpt_cmd.xpt_passthrough_sem);
+
+ memset(&cdb[0], 0, 16);
+ cdb[0] = READ_16;
+ put_unaligned_be64(src_lba, &cdb[2]);
+ put_unaligned_be32(transfer_length_block, &cdb[10]);
+ pr_debug("XCOPY: Built READ_16: LBA: %llu Blocks: %u Length: %u\n",
+ (unsigned long long)src_lba, transfer_length_block, src_bytes);
+
+ __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, src_bytes,
+ DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0,
+ NULL);
+ rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0],
+ remote_port);
+ if (rc < 0) {
+ ec_cmd->scsi_status = se_cmd->scsi_status;
+ goto out;
+ }
+
+ pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
+ " memory\n", xop->xop_data_sg, xop->xop_data_nents);
+
+ rc = target_xcopy_issue_pt_cmd(&xpt_cmd);
+ if (rc < 0)
+ ec_cmd->scsi_status = se_cmd->scsi_status;
+out:
+ transport_generic_free_cmd(se_cmd, 0);
+ return rc;
+}
+
+static int target_xcopy_write_destination(
+ struct se_cmd *ec_cmd,
+ struct xcopy_op *xop,
+ struct se_device *dst_dev,
+ sector_t dst_lba,
+ u32 dst_bytes)
+{
+ struct xcopy_pt_cmd xpt_cmd;
+ struct se_cmd *se_cmd = &xpt_cmd.se_cmd;
+ u32 transfer_length_block = dst_bytes / dst_dev->dev_attrib.block_size;
+ int rc;
+ unsigned char cdb[16];
+ bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
+
+ memset(&xpt_cmd, 0, sizeof(xpt_cmd));
+ init_completion(&xpt_cmd.xpt_passthrough_sem);
+
+ memset(&cdb[0], 0, 16);
+ cdb[0] = WRITE_16;
+ put_unaligned_be64(dst_lba, &cdb[2]);
+ put_unaligned_be32(transfer_length_block, &cdb[10]);
+ pr_debug("XCOPY: Built WRITE_16: LBA: %llu Blocks: %u Length: %u\n",
+ (unsigned long long)dst_lba, transfer_length_block, dst_bytes);
+
+ __target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, dst_bytes,
+ DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0,
+ NULL);
+ rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0],
+ remote_port);
+ if (rc < 0) {
+ ec_cmd->scsi_status = se_cmd->scsi_status;
+ goto out;
+ }
+
+ rc = target_xcopy_issue_pt_cmd(&xpt_cmd);
+ if (rc < 0)
+ ec_cmd->scsi_status = se_cmd->scsi_status;
+out:
+ transport_generic_free_cmd(se_cmd, 0);
+ return rc;
+}
+
+static void target_xcopy_do_work(struct work_struct *work)
+{
+ struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work);
+ struct se_cmd *ec_cmd = xop->xop_se_cmd;
+ struct se_device *src_dev, *dst_dev;
+ sector_t src_lba, dst_lba, end_lba;
+ unsigned long long max_bytes, max_bytes_src, max_bytes_dst, max_blocks;
+ int rc = 0;
+ unsigned short nolb;
+ unsigned int copied_bytes = 0;
+ sense_reason_t sense_rc;
+
+ sense_rc = target_parse_xcopy_cmd(xop);
+ if (sense_rc != TCM_NO_SENSE)
+ goto err_free;
+
+ if (WARN_ON_ONCE(!xop->src_dev) || WARN_ON_ONCE(!xop->dst_dev)) {
+ sense_rc = TCM_INVALID_PARAMETER_LIST;
+ goto err_free;
+ }
+
+ src_dev = xop->src_dev;
+ dst_dev = xop->dst_dev;
+ src_lba = xop->src_lba;
+ dst_lba = xop->dst_lba;
+ nolb = xop->nolb;
+ end_lba = src_lba + nolb;
+ /*
+ * Break up XCOPY I/O into hw_max_sectors * hw_block_size sized
+ * I/O based on the smallest max_bytes between src_dev + dst_dev
+ */
+ max_bytes_src = (unsigned long long) src_dev->dev_attrib.hw_max_sectors *
+ src_dev->dev_attrib.hw_block_size;
+ max_bytes_dst = (unsigned long long) dst_dev->dev_attrib.hw_max_sectors *
+ dst_dev->dev_attrib.hw_block_size;
+
+ max_bytes = min_t(u64, max_bytes_src, max_bytes_dst);
+ max_bytes = min_t(u64, max_bytes, XCOPY_MAX_BYTES);
+
+ /*
+ * Using shift instead of the division because otherwise GCC
+ * generates __udivdi3 that is missing on i386
+ */
+ max_blocks = max_bytes >> ilog2(src_dev->dev_attrib.block_size);
+
+ pr_debug("%s: nolb: %u, max_blocks: %llu end_lba: %llu\n", __func__,
+ nolb, max_blocks, (unsigned long long)end_lba);
+ pr_debug("%s: Starting src_lba: %llu, dst_lba: %llu\n", __func__,
+ (unsigned long long)src_lba, (unsigned long long)dst_lba);
+
+ while (nolb) {
+ u32 cur_bytes = min_t(u64, max_bytes, nolb * src_dev->dev_attrib.block_size);
+ unsigned short cur_nolb = cur_bytes / src_dev->dev_attrib.block_size;
+
+ if (cur_bytes != xop->xop_data_bytes) {
+ /*
+ * (Re)allocate a buffer large enough to hold the XCOPY
+ * I/O size, which can be reused each read / write loop.
+ */
+ target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
+ rc = target_alloc_sgl(&xop->xop_data_sg,
+ &xop->xop_data_nents,
+ cur_bytes,
+ false, false);
+ if (rc < 0)
+ goto out;
+ xop->xop_data_bytes = cur_bytes;
+ }
+
+ pr_debug("%s: Calling read src_dev: %p src_lba: %llu, cur_nolb: %hu\n",
+ __func__, src_dev, (unsigned long long)src_lba, cur_nolb);
+
+ rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_bytes);
+ if (rc < 0)
+ goto out;
+
+ src_lba += cur_bytes / src_dev->dev_attrib.block_size;
+ pr_debug("%s: Incremented READ src_lba to %llu\n", __func__,
+ (unsigned long long)src_lba);
+
+ pr_debug("%s: Calling write dst_dev: %p dst_lba: %llu, cur_nolb: %u\n",
+ __func__, dst_dev, (unsigned long long)dst_lba, cur_nolb);
+
+ rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
+ dst_lba, cur_bytes);
+ if (rc < 0)
+ goto out;
+
+ dst_lba += cur_bytes / dst_dev->dev_attrib.block_size;
+ pr_debug("%s: Incremented WRITE dst_lba to %llu\n", __func__,
+ (unsigned long long)dst_lba);
+
+ copied_bytes += cur_bytes;
+ nolb -= cur_bytes / src_dev->dev_attrib.block_size;
+ }
+
+ xcopy_pt_undepend_remotedev(xop);
+ target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
+ kfree(xop);
+
+ pr_debug("%s: Final src_lba: %llu, dst_lba: %llu\n", __func__,
+ (unsigned long long)src_lba, (unsigned long long)dst_lba);
+ pr_debug("%s: Blocks copied: %u, Bytes Copied: %u\n", __func__,
+ copied_bytes / dst_dev->dev_attrib.block_size, copied_bytes);
+
+ pr_debug("%s: Setting X-COPY GOOD status -> sending response\n", __func__);
+ target_complete_cmd(ec_cmd, SAM_STAT_GOOD);
+ return;
+
+out:
+ /*
+ * The XCOPY command was aborted after some data was transferred.
+ * Terminate command with CHECK CONDITION status, with the sense key
+ * set to COPY ABORTED.
+ */
+ sense_rc = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
+ xcopy_pt_undepend_remotedev(xop);
+ target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
+
+err_free:
+ kfree(xop);
+ pr_warn_ratelimited("%s: rc: %d, sense: %u, XCOPY operation failed\n",
+ __func__, rc, sense_rc);
+ target_complete_cmd_with_sense(ec_cmd, SAM_STAT_CHECK_CONDITION, sense_rc);
+}
+
+/*
+ * Returns TCM_NO_SENSE upon success or a sense code != TCM_NO_SENSE if parsing
+ * fails.
+ */
+static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop)
+{
+ struct se_cmd *se_cmd = xop->xop_se_cmd;
+ unsigned char *p = NULL, *seg_desc;
+ unsigned int list_id, list_id_usage, sdll, inline_dl;
+ sense_reason_t ret = TCM_INVALID_PARAMETER_LIST;
+ int rc;
+ unsigned short tdll;
+
+ p = transport_kmap_data_sg(se_cmd);
+ if (!p) {
+ pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n");
+ return TCM_OUT_OF_RESOURCES;
+ }
+
+ list_id = p[0];
+ list_id_usage = (p[1] & 0x18) >> 3;
+
+ /*
+ * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH
+ */
+ tdll = get_unaligned_be16(&p[2]);
+ sdll = get_unaligned_be32(&p[8]);
+ if (tdll + sdll > RCR_OP_MAX_DESC_LIST_LEN) {
+ pr_err("XCOPY descriptor list length %u exceeds maximum %u\n",
+ tdll + sdll, RCR_OP_MAX_DESC_LIST_LEN);
+ ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
+ goto out;
+ }
+
+ inline_dl = get_unaligned_be32(&p[12]);
+ if (inline_dl != 0) {
+ pr_err("XCOPY with non zero inline data length\n");
+ goto out;
+ }
+
+ if (se_cmd->data_length < (XCOPY_HDR_LEN + tdll + sdll + inline_dl)) {
+ pr_err("XCOPY parameter truncation: data length %u too small "
+ "for tdll: %hu sdll: %u inline_dl: %u\n",
+ se_cmd->data_length, tdll, sdll, inline_dl);
+ ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
+ goto out;
+ }
+
+ pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
+ " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
+ tdll, sdll, inline_dl);
+
+ /*
+ * skip over the target descriptors until segment descriptors
+ * have been passed - CSCD ids are needed to determine src and dest.
+ */
+ seg_desc = &p[16] + tdll;
+
+ rc = target_xcopy_parse_segment_descriptors(xop, seg_desc, sdll, &ret);
+ if (rc <= 0)
+ goto out;
+
+ pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
+ rc * XCOPY_SEGMENT_DESC_LEN);
+
+ rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
+ if (rc <= 0)
+ goto out;
+
+ if (xop->src_dev->dev_attrib.block_size !=
+ xop->dst_dev->dev_attrib.block_size) {
+ pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev"
+ " block_size: %u currently unsupported\n",
+ xop->src_dev->dev_attrib.block_size,
+ xop->dst_dev->dev_attrib.block_size);
+ xcopy_pt_undepend_remotedev(xop);
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ goto out;
+ }
+
+ pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
+ rc * XCOPY_TARGET_DESC_LEN);
+ transport_kunmap_data_sg(se_cmd);
+ return TCM_NO_SENSE;
+
+out:
+ if (p)
+ transport_kunmap_data_sg(se_cmd);
+ return ret;
+}
+
+sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
+{
+ struct se_device *dev = se_cmd->se_dev;
+ struct xcopy_op *xop;
+ unsigned int sa;
+
+ if (!dev->dev_attrib.emulate_3pc) {
+ pr_err("EXTENDED_COPY operation explicitly disabled\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ sa = se_cmd->t_task_cdb[1] & 0x1f;
+ if (sa != 0x00) {
+ pr_err("EXTENDED_COPY(LID4) not supported\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ if (se_cmd->data_length == 0) {
+ target_complete_cmd(se_cmd, SAM_STAT_GOOD);
+ return TCM_NO_SENSE;
+ }
+ if (se_cmd->data_length < XCOPY_HDR_LEN) {
+ pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n",
+ se_cmd->data_length, XCOPY_HDR_LEN);
+ return TCM_PARAMETER_LIST_LENGTH_ERROR;
+ }
+
+ xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
+ if (!xop)
+ goto err;
+ xop->xop_se_cmd = se_cmd;
+ INIT_WORK(&xop->xop_work, target_xcopy_do_work);
+ if (WARN_ON_ONCE(!queue_work(xcopy_wq, &xop->xop_work)))
+ goto free;
+ return TCM_NO_SENSE;
+
+free:
+ kfree(xop);
+
+err:
+ return TCM_OUT_OF_RESOURCES;
+}
+
+static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd)
+{
+ unsigned char *p;
+
+ p = transport_kmap_data_sg(se_cmd);
+ if (!p) {
+ pr_err("transport_kmap_data_sg failed in"
+ " target_rcr_operating_parameters\n");
+ return TCM_OUT_OF_RESOURCES;
+ }
+
+ if (se_cmd->data_length < 54) {
+ pr_err("Receive Copy Results Op Parameters length"
+ " too small: %u\n", se_cmd->data_length);
+ transport_kunmap_data_sg(se_cmd);
+ return TCM_INVALID_CDB_FIELD;
+ }
+ /*
+ * Set SNLID=1 (Supports no List ID)
+ */
+ p[4] = 0x1;
+ /*
+ * MAXIMUM TARGET DESCRIPTOR COUNT
+ */
+ put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]);
+ /*
+ * MAXIMUM SEGMENT DESCRIPTOR COUNT
+ */
+ put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]);
+ /*
+ * MAXIMUM DESCRIPTOR LIST LENGTH
+ */
+ put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]);
+ /*
+ * MAXIMUM SEGMENT LENGTH
+ */
+ put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]);
+ /*
+ * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED)
+ */
+ put_unaligned_be32(0x0, &p[20]);
+ /*
+ * HELD DATA LIMIT
+ */
+ put_unaligned_be32(0x0, &p[24]);
+ /*
+ * MAXIMUM STREAM DEVICE TRANSFER SIZE
+ */
+ put_unaligned_be32(0x0, &p[28]);
+ /*
+ * TOTAL CONCURRENT COPIES
+ */
+ put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]);
+ /*
+ * MAXIMUM CONCURRENT COPIES
+ */
+ p[36] = RCR_OP_MAX_CONCURR_COPIES;
+ /*
+ * DATA SEGMENT GRANULARITY (log 2)
+ */
+ p[37] = RCR_OP_DATA_SEG_GRAN_LOG2;
+ /*
+ * INLINE DATA GRANULARITY log 2)
+ */
+ p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2;
+ /*
+ * HELD DATA GRANULARITY
+ */
+ p[39] = RCR_OP_HELD_DATA_GRAN_LOG2;
+ /*
+ * IMPLEMENTED DESCRIPTOR LIST LENGTH
+ */
+ p[43] = 0x2;
+ /*
+ * List of implemented descriptor type codes (ordered)
+ */
+ p[44] = 0x02; /* Copy Block to Block device */
+ p[45] = 0xe4; /* Identification descriptor target descriptor */
+
+ /*
+ * AVAILABLE DATA (n-3)
+ */
+ put_unaligned_be32(42, &p[0]);
+
+ transport_kunmap_data_sg(se_cmd);
+ target_complete_cmd(se_cmd, SAM_STAT_GOOD);
+
+ return TCM_NO_SENSE;
+}
+
+sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd)
+{
+ unsigned char *cdb = &se_cmd->t_task_cdb[0];
+ int sa = (cdb[1] & 0x1f), list_id = cdb[2];
+ struct se_device *dev = se_cmd->se_dev;
+ sense_reason_t rc = TCM_NO_SENSE;
+
+ if (!dev->dev_attrib.emulate_3pc) {
+ pr_debug("Third-party copy operations explicitly disabled\n");
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
+ }
+
+ pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:"
+ " 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length);
+
+ if (list_id != 0) {
+ pr_err("Receive Copy Results with non zero list identifier"
+ " not supported\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ switch (sa) {
+ case RCR_SA_OPERATING_PARAMETERS:
+ rc = target_rcr_operating_parameters(se_cmd);
+ break;
+ case RCR_SA_COPY_STATUS:
+ case RCR_SA_RECEIVE_DATA:
+ case RCR_SA_FAILED_SEGMENT_DETAILS:
+ default:
+ pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa);
+ return TCM_INVALID_CDB_FIELD;
+ }
+
+ return rc;
+}
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
new file mode 100644
index 000000000000..0aad7dc65895
--- /dev/null
+++ b/drivers/target/target_core_xcopy.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <target/target_core_base.h>
+
+#define XCOPY_HDR_LEN 16
+#define XCOPY_TARGET_DESC_LEN 32
+#define XCOPY_SEGMENT_DESC_LEN 28
+#define XCOPY_NAA_IEEE_REGEX_LEN 16
+#define XCOPY_MAX_BYTES 16777216 /* 16 MB */
+
+/*
+ * SPC4r37 6.4.6.1
+ * Table 150 — CSCD descriptor ID values
+ */
+#define XCOPY_CSCD_DESC_ID_LIST_OFF_MAX 0x07FF
+
+enum xcopy_origin_list {
+ XCOL_SOURCE_RECV_OP = 0x01,
+ XCOL_DEST_RECV_OP = 0x02,
+};
+
+struct xcopy_op {
+ int op_origin;
+
+ struct se_cmd *xop_se_cmd;
+ struct se_device *src_dev;
+ unsigned char src_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+ struct se_device *dst_dev;
+ unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+ unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+ struct percpu_ref *remote_lun_ref;
+
+ sector_t src_lba;
+ sector_t dst_lba;
+ unsigned short stdi;
+ unsigned short dtdi;
+ unsigned short nolb;
+
+ u32 xop_data_bytes;
+ u32 xop_data_nents;
+ struct scatterlist *xop_data_sg;
+ struct work_struct xop_work;
+};
+
+/*
+ * Receive Copy Results Sevice Actions
+ */
+#define RCR_SA_COPY_STATUS 0x00
+#define RCR_SA_RECEIVE_DATA 0x01
+#define RCR_SA_OPERATING_PARAMETERS 0x03
+#define RCR_SA_FAILED_SEGMENT_DETAILS 0x04
+
+/*
+ * Receive Copy Results defs for Operating Parameters
+ */
+#define RCR_OP_MAX_TARGET_DESC_COUNT 0x2
+#define RCR_OP_MAX_SG_DESC_COUNT 0x1
+#define RCR_OP_MAX_DESC_LIST_LEN 1024
+#define RCR_OP_MAX_SEGMENT_LEN 268435456 /* 256 MB */
+#define RCR_OP_TOTAL_CONCURR_COPIES 0x1 /* Must be <= 16384 */
+#define RCR_OP_MAX_CONCURR_COPIES 0x1 /* Must be <= 255 */
+#define RCR_OP_DATA_SEG_GRAN_LOG2 9 /* 512 bytes in log 2 */
+#define RCR_OP_INLINE_DATA_GRAN_LOG2 9 /* 512 bytes in log 2 */
+#define RCR_OP_HELD_DATA_GRAN_LOG2 9 /* 512 bytes in log 2 */
+
+extern int target_xcopy_setup_pt(void);
+extern void target_xcopy_release_pt(void);
+extern sense_reason_t target_do_xcopy(struct se_cmd *);
+extern sense_reason_t target_do_receive_copy_results(struct se_cmd *);
diff --git a/drivers/target/tcm_fc/Kconfig b/drivers/target/tcm_fc/Kconfig
index 40caf458e89e..4f3b926b6a1a 100644
--- a/drivers/target/tcm_fc/Kconfig
+++ b/drivers/target/tcm_fc/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
config TCM_FC
tristate "TCM_FC fabric Plugin"
depends on LIBFC
diff --git a/drivers/target/tcm_fc/Makefile b/drivers/target/tcm_fc/Makefile
index 20b14bb087c9..a7d1593ab5af 100644
--- a/drivers/target/tcm_fc/Makefile
+++ b/drivers/target/tcm_fc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
tcm_fc-y += tfc_cmd.o \
tfc_conf.o \
tfc_io.o \
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 0dd54a44abcf..00e5573c6296 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -1,27 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2010 Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef __TCM_FC_H__
#define __TCM_FC_H__
+#include <linux/types.h>
+#include <target/target_core_base.h>
+
#define FT_VERSION "0.4"
#define FT_NAMELEN 32 /* length of ASCII WWPNs including pad */
#define FT_TPG_NAMELEN 32 /* max length of TPG name */
#define FT_LUN_NAMELEN 32 /* max length of LUN name */
+#define TCM_FC_DEFAULT_TAGS 512 /* tags used for per-session preallocation */
struct ft_transport_id {
__u8 format;
@@ -79,8 +71,8 @@ struct ft_node_auth {
* Node ACL for FC remote port session.
*/
struct ft_node_acl {
- struct ft_node_auth node_auth;
struct se_node_acl se_node_acl;
+ struct ft_node_auth node_auth;
};
struct ft_lun {
@@ -93,20 +85,19 @@ struct ft_lun {
*/
struct ft_tpg {
u32 index;
- struct ft_lport_acl *lport_acl;
+ struct ft_lport_wwn *lport_wwn;
struct ft_tport *tport; /* active tport or NULL */
- struct list_head list; /* linkage in ft_lport_acl tpg_list */
struct list_head lun_list; /* head of LUNs */
struct se_portal_group se_tpg;
struct workqueue_struct *workqueue;
};
-struct ft_lport_acl {
+struct ft_lport_wwn {
u64 wwpn;
char name[FT_NAMELEN];
- struct list_head list;
- struct list_head tpg_list;
- struct se_wwn fc_lport_wwn;
+ struct list_head ft_wwn_node;
+ struct ft_tpg *tpg;
+ struct se_wwn se_wwn;
};
/*
@@ -127,10 +118,8 @@ struct ft_cmd {
u32 sg_cnt; /* No. of item in scatterlist */
};
-extern struct list_head ft_lport_list;
extern struct mutex ft_lport_lock;
extern struct fc4_prov ft_prov;
-extern struct target_fabric_configfs *ft_configfs;
extern unsigned int ft_debug_logging;
/*
@@ -141,7 +130,6 @@ extern unsigned int ft_debug_logging;
* Session ops.
*/
void ft_sess_put(struct ft_sess *);
-int ft_sess_shutdown(struct se_session *);
void ft_sess_close(struct se_session *);
u32 ft_sess_get_index(struct se_session *);
u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32);
@@ -158,17 +146,14 @@ void ft_release_cmd(struct se_cmd *);
int ft_queue_status(struct se_cmd *);
int ft_queue_data_in(struct se_cmd *);
int ft_write_pending(struct se_cmd *);
-int ft_write_pending_status(struct se_cmd *);
-u32 ft_get_task_tag(struct se_cmd *);
-int ft_get_cmd_state(struct se_cmd *);
void ft_queue_tm_resp(struct se_cmd *);
+void ft_aborted_task(struct se_cmd *);
/*
* other internal functions.
*/
void ft_recv_req(struct ft_sess *, struct fc_frame *);
struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);
-struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *);
void ft_recv_write_data(struct ft_cmd *, struct fc_frame *);
void ft_dump_cmd(struct ft_cmd *, const char *caller);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 0e5a1caed176..34ab628809e8 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2010 Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* XXX TBD some includes may be extraneous */
@@ -28,19 +16,12 @@
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
-#include <asm/unaligned.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_cmnd.h>
+#include <linux/unaligned.h>
#include <scsi/scsi_tcq.h>
#include <scsi/libfc.h>
-#include <scsi/fc_encode.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
-#include <target/configfs_macros.h>
#include "tcm_fc.h"
@@ -88,17 +69,17 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
static void ft_free_cmd(struct ft_cmd *cmd)
{
struct fc_frame *fp;
- struct fc_lport *lport;
+ struct ft_sess *sess;
if (!cmd)
return;
+ sess = cmd->sess;
fp = cmd->req_frame;
- lport = fr_dev(fp);
if (fr_seq(fp))
- lport->tt.seq_release(fr_seq(fp));
+ fc_seq_release(fr_seq(fp));
fc_frame_free(fp);
- ft_sess_put(cmd->sess); /* undo get from lookup at recv */
- kfree(cmd);
+ target_free_tag(sess->se_sess, &cmd->se_cmd);
+ ft_sess_put(sess); /* undo get from lookup at recv */
}
void ft_release_cmd(struct se_cmd *se_cmd)
@@ -110,8 +91,7 @@ void ft_release_cmd(struct se_cmd *se_cmd)
int ft_check_stop_free(struct se_cmd *se_cmd)
{
- transport_generic_free_cmd(se_cmd, 0);
- return 1;
+ return transport_generic_free_cmd(se_cmd, 0);
}
/*
@@ -125,6 +105,7 @@ int ft_queue_status(struct se_cmd *se_cmd)
struct fc_lport *lport;
struct fc_exch *ep;
size_t len;
+ int rc;
if (cmd->aborted)
return 0;
@@ -134,9 +115,10 @@ int ft_queue_status(struct se_cmd *se_cmd)
len = sizeof(*fcp) + se_cmd->scsi_sense_length;
fp = fc_frame_alloc(lport, len);
if (!fp) {
- /* XXX shouldn't just drop it - requeue and retry? */
- return 0;
+ se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+ return -ENOMEM;
}
+
fcp = fc_frame_payload_get(fp, len);
memset(fcp, 0, len);
fcp->resp.fr_status = se_cmd->scsi_status;
@@ -163,22 +145,32 @@ int ft_queue_status(struct se_cmd *se_cmd)
/*
* Send response.
*/
- cmd->seq = lport->tt.seq_start_next(cmd->seq);
+ cmd->seq = fc_seq_start_next(cmd->seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
- lport->tt.seq_send(lport, cmd->seq, fp);
- lport->tt.exch_done(cmd->seq);
+ rc = fc_seq_send(lport, cmd->seq, fp);
+ if (rc) {
+ pr_info_ratelimited("%s: Failed to send response frame %p, "
+ "xid <0x%x>\n", __func__, fp, ep->xid);
+ /*
+ * Generate a TASK_SET_FULL status to notify the initiator
+ * to reduce it's queue_depth after the se_cmd response has
+ * been re-queued by target-core.
+ */
+ se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+ return -ENOMEM;
+ }
+ fc_exch_done(cmd->seq);
+ /*
+ * Drop the extra ACK_KREF reference taken by target_submit_cmd()
+ * ahead of ft_check_stop_free() -> transport_generic_free_cmd()
+ * final se_cmd->cmd_kref put.
+ */
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
}
-int ft_write_pending_status(struct se_cmd *se_cmd)
-{
- struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
-
- return cmd->write_data_len != se_cmd->data_length;
-}
-
/*
* Send TX_RDY (transfer ready).
*/
@@ -206,7 +198,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
memset(txrdy, 0, sizeof(*txrdy));
txrdy->ft_burst_len = htonl(se_cmd->data_length);
- cmd->seq = lport->tt.seq_start_next(cmd->seq);
+ cmd->seq = fc_seq_start_next(cmd->seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
@@ -227,21 +219,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
cmd->was_ddp_setup = 1;
}
}
- lport->tt.seq_send(lport, cmd->seq, fp);
- return 0;
-}
-
-u32 ft_get_task_tag(struct se_cmd *se_cmd)
-{
- struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
-
- if (cmd->aborted)
- return ~0;
- return fc_seq_exch(cmd->seq)->rxid;
-}
-
-int ft_get_cmd_state(struct se_cmd *se_cmd)
-{
+ fc_seq_send(lport, cmd->seq, fp);
return 0;
}
@@ -253,7 +231,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
struct ft_cmd *cmd = arg;
struct fc_frame_header *fh;
- if (unlikely(IS_ERR(fp))) {
+ if (IS_ERR(fp)) {
/* XXX need to find cmd if queued */
cmd->seq = NULL;
cmd->aborted = true;
@@ -317,8 +295,8 @@ static void ft_send_resp_status(struct fc_lport *lport,
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
sp = fr_seq(fp);
if (sp) {
- lport->tt.seq_send(lport, sp, fp);
- lport->tt.exch_done(sp);
+ fc_seq_send(lport, sp, fp);
+ fc_exch_done(sp);
} else {
lport->tt.frame_send(lport, fp);
}
@@ -386,7 +364,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
/* FIXME: Add referenced task tag for ABORT_TASK */
rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
- cmd, tm_func, GFP_KERNEL, 0, 0);
+ cmd, tm_func, GFP_KERNEL, 0, TARGET_SCF_ACK_KREF);
if (rc < 0)
ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
}
@@ -421,6 +399,17 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd)
pr_debug("tmr fn %d resp %d fcp code %d\n",
tmr->function, tmr->response, code);
ft_send_resp_code(cmd, code);
+ /*
+ * Drop the extra ACK_KREF reference taken by target_submit_tmr()
+ * ahead of ft_check_stop_free() -> transport_generic_free_cmd()
+ * final se_cmd->cmd_kref put.
+ */
+ target_put_sess_cmd(&cmd->se_cmd);
+}
+
+void ft_aborted_task(struct se_cmd *se_cmd)
+{
+ return;
}
static void ft_send_work(struct work_struct *work);
@@ -432,14 +421,22 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
{
struct ft_cmd *cmd;
struct fc_lport *lport = sess->tport->lport;
+ struct se_session *se_sess = sess->se_sess;
+ int tag, cpu;
- cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
- if (!cmd)
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
+ if (tag < 0)
goto busy;
+
+ cmd = &((struct ft_cmd *)se_sess->sess_cmd_map)[tag];
+ memset(cmd, 0, sizeof(struct ft_cmd));
+
+ cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.map_cpu = cpu;
cmd->sess = sess;
- cmd->seq = lport->tt.seq_assign(lport, fp);
+ cmd->seq = fc_seq_assign(lport, fp);
if (!cmd->seq) {
- kfree(cmd);
+ target_free_tag(se_sess, &cmd->se_cmd);
goto busy;
}
cmd->req_frame = fp; /* hold frame during cmd */
@@ -526,30 +523,38 @@ static void ft_send_work(struct work_struct *work)
*/
switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
case FCP_PTA_HEADQ:
- task_attr = MSG_HEAD_TAG;
+ task_attr = TCM_HEAD_TAG;
break;
case FCP_PTA_ORDERED:
- task_attr = MSG_ORDERED_TAG;
+ task_attr = TCM_ORDERED_TAG;
break;
case FCP_PTA_ACA:
- task_attr = MSG_ACA_TAG;
+ task_attr = TCM_ACA_TAG;
break;
- case FCP_PTA_SIMPLE: /* Fallthrough */
+ case FCP_PTA_SIMPLE:
default:
- task_attr = MSG_SIMPLE_TAG;
+ task_attr = TCM_SIMPLE_TAG;
}
- fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
+ fc_seq_set_resp(cmd->seq, ft_recv_seq, cmd);
+ cmd->se_cmd.tag = fc_seq_exch(cmd->seq)->rxid;
+
/*
* Use a single se_cmd->cmd_kref as we expect to release se_cmd
* directly from ft_check_stop_free callback in response path.
*/
- if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
- &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
- ntohl(fcp->fc_dl), task_attr, data_dir, 0))
+ if (target_init_cmd(&cmd->se_cmd, cmd->sess->se_sess,
+ &cmd->ft_sense_buffer[0],
+ scsilun_to_int(&fcp->fc_lun), ntohl(fcp->fc_dl),
+ task_attr, data_dir, TARGET_SCF_ACK_KREF))
goto err;
- pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
+ if (target_submit_prep(&cmd->se_cmd, fcp->fc_cdb, NULL, 0, NULL, 0,
+ NULL, 0, GFP_KERNEL))
+ return;
+
+ target_submit(&cmd->se_cmd);
+ pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd);
return;
err:
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index b74feb0d5133..f686d95d3273 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* Filename: tcm_fc.c
*
@@ -10,15 +11,6 @@
*
* Copyright (c) 2009,2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
****************************************************************************/
#include <linux/module.h>
@@ -33,24 +25,15 @@
#include <linux/configfs.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
-#include <asm/unaligned.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_cmnd.h>
+#include <linux/unaligned.h>
#include <scsi/libfc.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_configfs.h>
-#include <target/configfs_macros.h>
#include "tcm_fc.h"
-struct target_fabric_configfs *ft_configfs;
-
-LIST_HEAD(ft_lport_list);
+static LIST_HEAD(ft_wwn_list);
DEFINE_MUTEX(ft_lport_lock);
unsigned int ft_debug_logging;
@@ -138,55 +121,73 @@ static ssize_t ft_wwn_store(void *arg, const char *buf, size_t len)
* ACL auth ops.
*/
-static ssize_t ft_nacl_show_port_name(
- struct se_node_acl *se_nacl,
- char *page)
+static ssize_t ft_nacl_port_name_show(struct config_item *item, char *page)
{
+ struct se_node_acl *se_nacl = acl_to_nacl(item);
struct ft_node_acl *acl = container_of(se_nacl,
struct ft_node_acl, se_node_acl);
return ft_wwn_show(&acl->node_auth.port_name, page);
}
-static ssize_t ft_nacl_store_port_name(
- struct se_node_acl *se_nacl,
- const char *page,
- size_t count)
+static ssize_t ft_nacl_port_name_store(struct config_item *item,
+ const char *page, size_t count)
{
+ struct se_node_acl *se_nacl = acl_to_nacl(item);
struct ft_node_acl *acl = container_of(se_nacl,
struct ft_node_acl, se_node_acl);
return ft_wwn_store(&acl->node_auth.port_name, page, count);
}
-TF_NACL_BASE_ATTR(ft, port_name, S_IRUGO | S_IWUSR);
-
-static ssize_t ft_nacl_show_node_name(
- struct se_node_acl *se_nacl,
- char *page)
+static ssize_t ft_nacl_node_name_show(struct config_item *item,
+ char *page)
{
+ struct se_node_acl *se_nacl = acl_to_nacl(item);
struct ft_node_acl *acl = container_of(se_nacl,
struct ft_node_acl, se_node_acl);
return ft_wwn_show(&acl->node_auth.node_name, page);
}
-static ssize_t ft_nacl_store_node_name(
- struct se_node_acl *se_nacl,
- const char *page,
- size_t count)
+static ssize_t ft_nacl_node_name_store(struct config_item *item,
+ const char *page, size_t count)
{
+ struct se_node_acl *se_nacl = acl_to_nacl(item);
struct ft_node_acl *acl = container_of(se_nacl,
struct ft_node_acl, se_node_acl);
return ft_wwn_store(&acl->node_auth.node_name, page, count);
}
-TF_NACL_BASE_ATTR(ft, node_name, S_IRUGO | S_IWUSR);
+CONFIGFS_ATTR(ft_nacl_, node_name);
+CONFIGFS_ATTR(ft_nacl_, port_name);
+
+static ssize_t ft_nacl_tag_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s", acl_to_nacl(item)->acl_tag);
+}
+
+static ssize_t ft_nacl_tag_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_node_acl *se_nacl = acl_to_nacl(item);
+ int ret;
+
+ ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page);
+
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+CONFIGFS_ATTR(ft_nacl_, tag);
static struct configfs_attribute *ft_nacl_base_attrs[] = {
- &ft_nacl_port_name.attr,
- &ft_nacl_node_name.attr,
+ &ft_nacl_attr_port_name,
+ &ft_nacl_attr_node_name,
+ &ft_nacl_attr_tag,
NULL,
};
@@ -198,107 +199,25 @@ static struct configfs_attribute *ft_nacl_base_attrs[] = {
* Add ACL for an initiator. The ACL is named arbitrarily.
* The port_name and/or node_name are attributes.
*/
-static struct se_node_acl *ft_add_acl(
- struct se_portal_group *se_tpg,
- struct config_group *group,
- const char *name)
+static int ft_init_nodeacl(struct se_node_acl *nacl, const char *name)
{
- struct ft_node_acl *acl;
- struct ft_tpg *tpg;
+ struct ft_node_acl *acl =
+ container_of(nacl, struct ft_node_acl, se_node_acl);
u64 wwpn;
- u32 q_depth;
-
- pr_debug("add acl %s\n", name);
- tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
if (ft_parse_wwn(name, &wwpn, 1) < 0)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
- acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL);
- if (!acl)
- return ERR_PTR(-ENOMEM);
acl->node_auth.port_name = wwpn;
-
- q_depth = 32; /* XXX bogus default - get from tpg? */
- return core_tpg_add_initiator_node_acl(&tpg->se_tpg,
- &acl->se_node_acl, name, q_depth);
-}
-
-static void ft_del_acl(struct se_node_acl *se_acl)
-{
- struct se_portal_group *se_tpg = se_acl->se_tpg;
- struct ft_tpg *tpg;
- struct ft_node_acl *acl = container_of(se_acl,
- struct ft_node_acl, se_node_acl);
-
- pr_debug("del acl %s\n",
- config_item_name(&se_acl->acl_group.cg_item));
-
- tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
- pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n",
- acl, se_acl, tpg, &tpg->se_tpg);
-
- core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
- kfree(acl);
-}
-
-struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
-{
- struct ft_node_acl *found = NULL;
- struct ft_node_acl *acl;
- struct se_portal_group *se_tpg = &tpg->se_tpg;
- struct se_node_acl *se_acl;
-
- spin_lock_irq(&se_tpg->acl_node_lock);
- list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
- acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
- pr_debug("acl %p port_name %llx\n",
- acl, (unsigned long long)acl->node_auth.port_name);
- if (acl->node_auth.port_name == rdata->ids.port_name ||
- acl->node_auth.node_name == rdata->ids.node_name) {
- pr_debug("acl %p port_name %llx matched\n", acl,
- (unsigned long long)rdata->ids.port_name);
- found = acl;
- /* XXX need to hold onto ACL */
- break;
- }
- }
- spin_unlock_irq(&se_tpg->acl_node_lock);
- return found;
-}
-
-struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
-{
- struct ft_node_acl *acl;
-
- acl = kzalloc(sizeof(*acl), GFP_KERNEL);
- if (!acl) {
- pr_err("Unable to allocate struct ft_node_acl\n");
- return NULL;
- }
- pr_debug("acl %p\n", acl);
- return &acl->se_node_acl;
-}
-
-static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg,
- struct se_node_acl *se_acl)
-{
- struct ft_node_acl *acl = container_of(se_acl,
- struct ft_node_acl, se_node_acl);
-
- pr_debug("acl %p\n", acl);
- kfree(acl);
+ return 0;
}
/*
* local_port port_group (tpg) ops.
*/
-static struct se_portal_group *ft_add_tpg(
- struct se_wwn *wwn,
- struct config_group *group,
- const char *name)
+static struct se_portal_group *ft_add_tpg(struct se_wwn *wwn, const char *name)
{
- struct ft_lport_acl *lacl;
+ struct ft_lport_wwn *ft_wwn;
struct ft_tpg *tpg;
struct workqueue_struct *wq;
unsigned long index;
@@ -311,25 +230,33 @@ static struct se_portal_group *ft_add_tpg(
*/
if (strstr(name, "tpgt_") != name)
return NULL;
- if (strict_strtoul(name + 5, 10, &index) || index > UINT_MAX)
+
+ ret = kstrtoul(name + 5, 10, &index);
+ if (ret)
+ return NULL;
+ if (index > UINT_MAX)
return NULL;
- lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn);
+ if ((index != 1)) {
+ pr_err("Error, a single TPG=1 is used for HW port mappings\n");
+ return ERR_PTR(-ENOSYS);
+ }
+
+ ft_wwn = container_of(wwn, struct ft_lport_wwn, se_wwn);
tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
if (!tpg)
return NULL;
tpg->index = index;
- tpg->lport_acl = lacl;
+ tpg->lport_wwn = ft_wwn;
INIT_LIST_HEAD(&tpg->lun_list);
- wq = alloc_workqueue("tcm_fc", 0, 1);
+ wq = alloc_workqueue("tcm_fc", WQ_PERCPU, 1);
if (!wq) {
kfree(tpg);
return NULL;
}
- ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
- tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);
if (ret < 0) {
destroy_workqueue(wq);
kfree(tpg);
@@ -338,7 +265,7 @@ static struct se_portal_group *ft_add_tpg(
tpg->workqueue = wq;
mutex_lock(&ft_lport_lock);
- list_add_tail(&tpg->list, &lacl->tpg_list);
+ ft_wwn->tpg = tpg;
mutex_unlock(&ft_lport_lock);
return &tpg->se_tpg;
@@ -347,6 +274,7 @@ static struct se_portal_group *ft_add_tpg(
static void ft_del_tpg(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
+ struct ft_lport_wwn *ft_wwn = tpg->lport_wwn;
pr_debug("del tpg %s\n",
config_item_name(&tpg->se_tpg.tpg_group.cg_item));
@@ -357,7 +285,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
synchronize_rcu();
mutex_lock(&ft_lport_lock);
- list_del(&tpg->list);
+ ft_wwn->tpg = NULL;
if (tpg->tport) {
tpg->tport->tpg = NULL;
tpg->tport = NULL;
@@ -376,15 +304,11 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
*/
struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
{
- struct ft_lport_acl *lacl;
- struct ft_tpg *tpg;
+ struct ft_lport_wwn *ft_wwn;
- list_for_each_entry(lacl, &ft_lport_list, list) {
- if (lacl->wwpn == lport->wwpn) {
- list_for_each_entry(tpg, &lacl->tpg_list, list)
- return tpg; /* XXX for now return first entry */
- return NULL;
- }
+ list_for_each_entry(ft_wwn, &ft_wwn_list, ft_wwn_node) {
+ if (ft_wwn->wwpn == lport->wwpn)
+ return ft_wwn->tpg;
}
return NULL;
}
@@ -397,211 +321,121 @@ struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
* Add lport to allowed config.
* The name is the WWPN in lower-case ASCII, colon-separated bytes.
*/
-static struct se_wwn *ft_add_lport(
+static struct se_wwn *ft_add_wwn(
struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
- struct ft_lport_acl *lacl;
- struct ft_lport_acl *old_lacl;
+ struct ft_lport_wwn *ft_wwn;
+ struct ft_lport_wwn *old_ft_wwn;
u64 wwpn;
- pr_debug("add lport %s\n", name);
+ pr_debug("add wwn %s\n", name);
if (ft_parse_wwn(name, &wwpn, 1) < 0)
return NULL;
- lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
- if (!lacl)
+ ft_wwn = kzalloc(sizeof(*ft_wwn), GFP_KERNEL);
+ if (!ft_wwn)
return NULL;
- lacl->wwpn = wwpn;
- INIT_LIST_HEAD(&lacl->tpg_list);
+ ft_wwn->wwpn = wwpn;
mutex_lock(&ft_lport_lock);
- list_for_each_entry(old_lacl, &ft_lport_list, list) {
- if (old_lacl->wwpn == wwpn) {
+ list_for_each_entry(old_ft_wwn, &ft_wwn_list, ft_wwn_node) {
+ if (old_ft_wwn->wwpn == wwpn) {
mutex_unlock(&ft_lport_lock);
- kfree(lacl);
+ kfree(ft_wwn);
return NULL;
}
}
- list_add_tail(&lacl->list, &ft_lport_list);
- ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn);
+ list_add_tail(&ft_wwn->ft_wwn_node, &ft_wwn_list);
+ ft_format_wwn(ft_wwn->name, sizeof(ft_wwn->name), wwpn);
mutex_unlock(&ft_lport_lock);
- return &lacl->fc_lport_wwn;
+ return &ft_wwn->se_wwn;
}
-static void ft_del_lport(struct se_wwn *wwn)
+static void ft_del_wwn(struct se_wwn *wwn)
{
- struct ft_lport_acl *lacl = container_of(wwn,
- struct ft_lport_acl, fc_lport_wwn);
+ struct ft_lport_wwn *ft_wwn = container_of(wwn,
+ struct ft_lport_wwn, se_wwn);
- pr_debug("del lport %s\n", lacl->name);
+ pr_debug("del wwn %s\n", ft_wwn->name);
mutex_lock(&ft_lport_lock);
- list_del(&lacl->list);
+ list_del(&ft_wwn->ft_wwn_node);
mutex_unlock(&ft_lport_lock);
- kfree(lacl);
+ kfree(ft_wwn);
}
-static ssize_t ft_wwn_show_attr_version(
- struct target_fabric_configfs *tf,
- char *page)
+static ssize_t ft_wwn_version_show(struct config_item *item, char *page)
{
return sprintf(page, "TCM FC " FT_VERSION " on %s/%s on "
""UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
}
-TF_WWN_ATTR_RO(ft, version);
+CONFIGFS_ATTR_RO(ft_wwn_, version);
static struct configfs_attribute *ft_wwn_attrs[] = {
- &ft_wwn_version.attr,
+ &ft_wwn_attr_version,
NULL,
};
-static char *ft_get_fabric_name(void)
+static inline struct ft_tpg *ft_tpg(struct se_portal_group *se_tpg)
{
- return "fc";
+ return container_of(se_tpg, struct ft_tpg, se_tpg);
}
static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
{
- struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
-
- return tpg->lport_acl->name;
+ return ft_tpg(se_tpg)->lport_wwn->name;
}
static u16 ft_get_tag(struct se_portal_group *se_tpg)
{
- struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
-
/*
* This tag is used when forming SCSI Name identifier in EVPD=1 0x83
* to represent the SCSI Target Port.
*/
- return tpg->index;
-}
-
-static u32 ft_get_default_depth(struct se_portal_group *se_tpg)
-{
- return 1;
-}
-
-static int ft_check_false(struct se_portal_group *se_tpg)
-{
- return 0;
-}
-
-static void ft_set_default_node_attr(struct se_node_acl *se_nacl)
-{
+ return ft_tpg(se_tpg)->index;
}
static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
{
- struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
-
- return tpg->index;
+ return ft_tpg(se_tpg)->index;
}
-static struct target_core_fabric_ops ft_fabric_ops = {
- .get_fabric_name = ft_get_fabric_name,
- .get_fabric_proto_ident = fc_get_fabric_proto_ident,
+static const struct target_core_fabric_ops ft_fabric_ops = {
+ .module = THIS_MODULE,
+ .fabric_name = "fc",
+ .node_acl_size = sizeof(struct ft_node_acl),
.tpg_get_wwn = ft_get_fabric_wwn,
.tpg_get_tag = ft_get_tag,
- .tpg_get_default_depth = ft_get_default_depth,
- .tpg_get_pr_transport_id = fc_get_pr_transport_id,
- .tpg_get_pr_transport_id_len = fc_get_pr_transport_id_len,
- .tpg_parse_pr_out_transport_id = fc_parse_pr_out_transport_id,
- .tpg_check_demo_mode = ft_check_false,
- .tpg_check_demo_mode_cache = ft_check_false,
- .tpg_check_demo_mode_write_protect = ft_check_false,
- .tpg_check_prod_mode_write_protect = ft_check_false,
- .tpg_alloc_fabric_acl = ft_tpg_alloc_fabric_acl,
- .tpg_release_fabric_acl = ft_tpg_release_fabric_acl,
.tpg_get_inst_index = ft_tpg_get_inst_index,
.check_stop_free = ft_check_stop_free,
.release_cmd = ft_release_cmd,
- .shutdown_session = ft_sess_shutdown,
.close_session = ft_sess_close,
.sess_get_index = ft_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = ft_write_pending,
- .write_pending_status = ft_write_pending_status,
- .set_default_node_attributes = ft_set_default_node_attr,
- .get_task_tag = ft_get_task_tag,
- .get_cmd_state = ft_get_cmd_state,
.queue_data_in = ft_queue_data_in,
.queue_status = ft_queue_status,
.queue_tm_rsp = ft_queue_tm_resp,
+ .aborted_task = ft_aborted_task,
/*
* Setup function pointers for generic logic in
* target_core_fabric_configfs.c
*/
- .fabric_make_wwn = &ft_add_lport,
- .fabric_drop_wwn = &ft_del_lport,
+ .fabric_make_wwn = &ft_add_wwn,
+ .fabric_drop_wwn = &ft_del_wwn,
.fabric_make_tpg = &ft_add_tpg,
.fabric_drop_tpg = &ft_del_tpg,
- .fabric_post_link = NULL,
- .fabric_pre_unlink = NULL,
- .fabric_make_np = NULL,
- .fabric_drop_np = NULL,
- .fabric_make_nodeacl = &ft_add_acl,
- .fabric_drop_nodeacl = &ft_del_acl,
-};
-
-int ft_register_configfs(void)
-{
- struct target_fabric_configfs *fabric;
- int ret;
-
- /*
- * Register the top level struct config_item_type with TCM core
- */
- fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
- if (IS_ERR(fabric)) {
- pr_err("%s: target_fabric_configfs_init() failed!\n",
- __func__);
- return PTR_ERR(fabric);
- }
- fabric->tf_ops = ft_fabric_ops;
+ .fabric_init_nodeacl = &ft_init_nodeacl,
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- */
- TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs =
- ft_nacl_base_attrs;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
- /*
- * register the fabric for use within TCM
- */
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- pr_debug("target_fabric_configfs_register() for"
- " FC Target failed!\n");
- target_fabric_configfs_free(fabric);
- return -1;
- }
+ .tfc_wwn_attrs = ft_wwn_attrs,
+ .tfc_tpg_nacl_base_attrs = ft_nacl_base_attrs,
- /*
- * Setup our local pointer to *fabric.
- */
- ft_configfs = fabric;
- return 0;
-}
-
-void ft_deregister_configfs(void)
-{
- if (!ft_configfs)
- return;
- target_fabric_configfs_deregister(ft_configfs);
- ft_configfs = NULL;
-}
+ .default_submit_type = TARGET_DIRECT_SUBMIT,
+ .direct_submit_supp = 1,
+};
static struct notifier_block ft_notifier = {
.notifier_call = ft_lport_notify
@@ -609,15 +443,24 @@ static struct notifier_block ft_notifier = {
static int __init ft_init(void)
{
- if (ft_register_configfs())
- return -1;
- if (fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov)) {
- ft_deregister_configfs();
- return -1;
- }
+ int ret;
+
+ ret = target_register_template(&ft_fabric_ops);
+ if (ret)
+ goto out;
+
+ ret = fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov);
+ if (ret)
+ goto out_unregister_template;
+
blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier);
fc_lport_iterate(ft_lport_add, NULL);
return 0;
+
+out_unregister_template:
+ target_unregister_template(&ft_fabric_ops);
+out:
+ return ret;
}
static void __exit ft_exit(void)
@@ -626,7 +469,7 @@ static void __exit ft_exit(void)
&ft_notifier);
fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov);
fc_lport_iterate(ft_lport_del, NULL);
- ft_deregister_configfs();
+ target_unregister_template(&ft_fabric_ops);
synchronize_rcu();
}
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index e415af32115a..45329284f52f 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2010 Cisco Systems, Inc.
*
@@ -9,19 +10,6 @@
* Copyright (c) 2009 Rising Tide, Inc.
* Copyright (c) 2009 Linux-iSCSI.org
* Copyright (c) 2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* XXX TBD some includes may be extraneous */
@@ -38,18 +26,11 @@
#include <linux/ctype.h>
#include <linux/hash.h>
#include <linux/ratelimit.h>
-#include <asm/unaligned.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_cmnd.h>
+#include <linux/unaligned.h>
#include <scsi/libfc.h>
-#include <scsi/fc_encode.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
-#include <target/configfs_macros.h>
#include "tcm_fc.h"
@@ -82,9 +63,13 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
if (cmd->aborted)
return 0;
+
+ if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL)
+ goto queue_status;
+
ep = fc_seq_exch(cmd->seq);
lport = ep->lp;
- cmd->seq = lport->tt.seq_start_next(cmd->seq);
+ cmd->seq = fc_seq_start_next(cmd->seq);
remaining = se_cmd->data_length;
@@ -150,15 +135,14 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
page, off_in_page, tlen);
fr_len(fp) += tlen;
fp_skb(fp)->data_len += tlen;
- fp_skb(fp)->truesize +=
- PAGE_SIZE << compound_order(page);
+ fp_skb(fp)->truesize += page_size(page);
} else {
BUG_ON(!page);
from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
page_addr = from;
- from += mem_off & ~PAGE_MASK;
+ from += offset_in_page(mem_off);
tlen = min(tlen, (size_t)(PAGE_SIZE -
- (mem_off & ~PAGE_MASK)));
+ offset_in_page(mem_off)));
memcpy(to, from, tlen);
kunmap_atomic(page_addr);
to += tlen;
@@ -176,16 +160,25 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
f_ctl |= FC_FC_END_SEQ;
fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
FC_TYPE_FCP, f_ctl, fh_off);
- error = lport->tt.seq_send(lport, seq, fp);
+ error = fc_seq_send(lport, seq, fp);
if (error) {
- /* XXX For now, initiator will retry */
- pr_err_ratelimited("%s: Failed to send frame %p, "
+ pr_info_ratelimited("%s: Failed to send frame %p, "
"xid <0x%x>, remaining %zu, "
"lso_max <0x%x>\n",
__func__, fp, ep->xid,
remaining, lport->lso_max);
+ /*
+ * Go ahead and set TASK_SET_FULL status ignoring the
+ * rest of the DataIN, and immediately attempt to
+ * send the response via ft_queue_status() in order
+ * to notify the initiator that it should reduce it's
+ * per LUN queue_depth.
+ */
+ se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+ break;
}
}
+queue_status:
return ft_queue_status(se_cmd);
}
@@ -227,7 +220,6 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
ep = fc_seq_exch(seq);
lport = ep->lp;
if (cmd->was_ddp_setup) {
- BUG_ON(!ep);
BUG_ON(!lport);
/*
* Since DDP (Large Rx offload) was setup for this request,
@@ -307,9 +299,9 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
page_addr = to;
- to += mem_off & ~PAGE_MASK;
+ to += offset_in_page(mem_off);
tlen = min(tlen, (size_t)(PAGE_SIZE -
- (mem_off & ~PAGE_MASK)));
+ offset_in_page(mem_off)));
memcpy(to, from, tlen);
kunmap_atomic(page_addr);
@@ -346,7 +338,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd)
ep = fc_seq_exch(seq);
if (ep) {
lport = ep->lp;
- if (lport && (ep->xid <= lport->lro_xid))
+ if (lport && (ep->xid <= lport->lro_xid)) {
/*
* "ddp_done" trigger invalidation of HW
* specific DDP context
@@ -361,6 +353,7 @@ void ft_invl_hw_context(struct ft_cmd *cmd)
* identified using ep->xid)
*/
cmd->was_ddp_setup = 0;
+ }
}
}
}
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 4859505ae2ed..d6afaba52ea5 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -1,18 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2010 Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* XXX TBD some includes may be extraneous */
@@ -31,27 +19,26 @@
#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/kref.h>
-#include <asm/unaligned.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_cmnd.h>
+#include <linux/unaligned.h>
#include <scsi/libfc.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
-#include <target/target_core_configfs.h>
-#include <target/configfs_macros.h>
#include "tcm_fc.h"
+#define TFC_SESS_DBG(lport, fmt, args...) \
+ pr_debug("host%u: rport %6.6x: " fmt, \
+ (lport)->host->host_no, \
+ (lport)->port_id, ##args )
+
static void ft_sess_delete_all(struct ft_tport *);
/*
* Lookup or allocate target local port.
* Caller holds ft_lport_lock.
*/
-static struct ft_tport *ft_tport_create(struct fc_lport *lport)
+static struct ft_tport *ft_tport_get(struct fc_lport *lport)
{
struct ft_tpg *tpg;
struct ft_tport *tport;
@@ -68,6 +55,7 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
if (tport) {
tport->tpg = tpg;
+ tpg->tport = tport;
return tport;
}
@@ -96,8 +84,9 @@ static void ft_tport_delete(struct ft_tport *tport)
ft_sess_delete_all(tport);
lport = tport->lport;
+ lport->service_params &= ~FCP_SPPF_TARG_FCN;
BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
- rcu_assign_pointer(lport->prov[FC_TYPE_FCP], NULL);
+ RCU_INIT_POINTER(lport->prov[FC_TYPE_FCP], NULL);
tpg = tport->tpg;
if (tpg) {
@@ -114,7 +103,8 @@ static void ft_tport_delete(struct ft_tport *tport)
void ft_lport_add(struct fc_lport *lport, void *arg)
{
mutex_lock(&ft_lport_lock);
- ft_tport_create(lport);
+ ft_tport_get(lport);
+ lport->service_params |= FCP_SPPF_TARG_FCN;
mutex_unlock(&ft_lport_lock);
}
@@ -170,36 +160,59 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
struct ft_tport *tport;
struct hlist_head *head;
struct ft_sess *sess;
+ char *reason = "no session created";
rcu_read_lock();
tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
- if (!tport)
+ if (!tport) {
+ reason = "not an FCP port";
goto out;
+ }
head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) {
kref_get(&sess->kref);
rcu_read_unlock();
- pr_debug("port_id %x found %p\n", port_id, sess);
+ TFC_SESS_DBG(lport, "port_id %x found %p\n",
+ port_id, sess);
return sess;
}
}
out:
rcu_read_unlock();
- pr_debug("port_id %x not found\n", port_id);
+ TFC_SESS_DBG(lport, "port_id %x not found, %s\n",
+ port_id, reason);
return NULL;
}
+static int ft_sess_alloc_cb(struct se_portal_group *se_tpg,
+ struct se_session *se_sess, void *p)
+{
+ struct ft_sess *sess = p;
+ struct ft_tport *tport = sess->tport;
+ struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)];
+
+ TFC_SESS_DBG(tport->lport, "port_id %x sess %p\n", sess->port_id, sess);
+ hlist_add_head_rcu(&sess->hash, head);
+ tport->sess_count++;
+
+ return 0;
+}
+
/*
* Allocate session and enter it in the hash for the local port.
* Caller holds ft_lport_lock.
*/
static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
- struct ft_node_acl *acl)
+ struct fc_rport_priv *rdata)
{
+ struct se_portal_group *se_tpg = &tport->tpg->se_tpg;
struct ft_sess *sess;
struct hlist_head *head;
+ unsigned char initiatorname[TRANSPORT_IQN_LEN];
+
+ ft_format_wwn(&initiatorname[0], TRANSPORT_IQN_LEN, rdata->ids.port_name);
head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, head, hash)
@@ -208,24 +221,21 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
sess = kzalloc(sizeof(*sess), GFP_KERNEL);
if (!sess)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- sess->se_sess = transport_init_session();
- if (IS_ERR(sess->se_sess)) {
- kfree(sess);
- return NULL;
- }
- sess->se_sess->se_node_acl = &acl->se_node_acl;
+ kref_init(&sess->kref); /* ref for table entry */
sess->tport = tport;
sess->port_id = port_id;
- kref_init(&sess->kref); /* ref for table entry */
- hlist_add_head_rcu(&sess->hash, head);
- tport->sess_count++;
- pr_debug("port_id %x sess %p\n", port_id, sess);
-
- transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
- sess->se_sess, sess);
+ sess->se_sess = target_setup_session(se_tpg, TCM_FC_DEFAULT_TAGS,
+ sizeof(struct ft_cmd),
+ TARGET_PROT_NORMAL, &initiatorname[0],
+ sess, ft_sess_alloc_cb);
+ if (IS_ERR(sess->se_sess)) {
+ int rc = PTR_ERR(sess->se_sess);
+ kfree(sess);
+ sess = ERR_PTR(rc);
+ }
return sess;
}
@@ -263,6 +273,13 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id)
return NULL;
}
+static void ft_close_sess(struct ft_sess *sess)
+{
+ target_stop_session(sess->se_sess);
+ target_wait_for_sess_cmds(sess->se_sess);
+ ft_sess_put(sess);
+}
+
/*
* Delete all sessions from tport.
* Caller holds ft_lport_lock.
@@ -276,8 +293,7 @@ static void ft_sess_delete_all(struct ft_tport *tport)
head < &tport->hash[FT_SESS_HASH_SIZE]; head++) {
hlist_for_each_entry_rcu(sess, head, hash) {
ft_sess_unhash(sess);
- transport_deregister_session_configfs(sess->se_sess);
- ft_sess_put(sess); /* release from table */
+ ft_close_sess(sess); /* release from table */
}
}
}
@@ -287,18 +303,6 @@ static void ft_sess_delete_all(struct ft_tport *tport)
*/
/*
- * Determine whether session is allowed to be shutdown in the current context.
- * Returns non-zero if the session should be shutdown.
- */
-int ft_sess_shutdown(struct se_session *se_sess)
-{
- struct ft_sess *sess = se_sess->fabric_sess_ptr;
-
- pr_debug("port_id %x\n", sess->port_id);
- return 1;
-}
-
-/*
* Remove session and send PRLO.
* This is called when the ACL is being deleted or queue depth is changing.
*/
@@ -313,11 +317,10 @@ void ft_sess_close(struct se_session *se_sess)
mutex_unlock(&ft_lport_lock);
return;
}
- pr_debug("port_id %x\n", port_id);
+ TFC_SESS_DBG(sess->tport->lport, "port_id %x close session\n", port_id);
ft_sess_unhash(sess);
mutex_unlock(&ft_lport_lock);
- transport_deregister_session_configfs(se_sess);
- ft_sess_put(sess);
+ ft_close_sess(sess);
/* XXX Send LOGO or PRLO */
synchronize_rcu(); /* let transport deregister happen */
}
@@ -346,17 +349,12 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
{
struct ft_tport *tport;
struct ft_sess *sess;
- struct ft_node_acl *acl;
u32 fcp_parm;
- tport = ft_tport_create(rdata->local_port);
+ tport = ft_tport_get(rdata->local_port);
if (!tport)
goto not_target; /* not a target for this local port */
- acl = ft_acl_get(tport->tpg, rdata);
- if (!acl)
- goto not_target; /* no target for this remote */
-
if (!rspp)
goto fill;
@@ -378,9 +376,14 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
spp->spp_flags |= FC_SPP_EST_IMG_PAIR;
if (!(fcp_parm & FCP_SPPF_INIT_FCN))
return FC_SPP_RESP_CONF;
- sess = ft_sess_create(tport, rdata->ids.port_id, acl);
- if (!sess)
- return FC_SPP_RESP_RES;
+ sess = ft_sess_create(tport, rdata->ids.port_id, rdata);
+ if (IS_ERR(sess)) {
+ if (PTR_ERR(sess) == -EACCES) {
+ spp->spp_flags &= ~FC_SPP_EST_IMG_PAIR;
+ return FC_SPP_RESP_CONF;
+ } else
+ return FC_SPP_RESP_RES;
+ }
if (!sess->params)
rdata->prli_count++;
sess->params = fcp_parm;
@@ -407,7 +410,7 @@ not_target:
}
/**
- * tcm_fcp_prli() - Handle incoming or outgoing PRLI for the FCP target
+ * ft_prli() - Handle incoming or outgoing PRLI for the FCP target
* @rdata: remote port private
* @spp_len: service parameter page length
* @rspp: received service parameter page (NULL for outgoing PRLI)
@@ -423,8 +426,8 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
mutex_lock(&ft_lport_lock);
ret = ft_prli_locked(rdata, spp_len, rspp, spp);
mutex_unlock(&ft_lport_lock);
- pr_debug("port_id %x flags %x ret %x\n",
- rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
+ TFC_SESS_DBG(rdata->local_port, "port_id %x flags %x ret %x\n",
+ rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
return ret;
}
@@ -432,13 +435,13 @@ static void ft_sess_free(struct kref *kref)
{
struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
- transport_deregister_session(sess->se_sess);
+ target_remove_session(sess->se_sess);
kfree_rcu(sess, rcu);
}
void ft_sess_put(struct ft_sess *sess)
{
- int sess_held = atomic_read(&sess->kref.refcount);
+ int sess_held = kref_read(&sess->kref);
BUG_ON(!sess_held);
kref_put(&sess->kref, ft_sess_free);
@@ -463,8 +466,7 @@ static void ft_prlo(struct fc_rport_priv *rdata)
return;
}
mutex_unlock(&ft_lport_lock);
- transport_deregister_session_configfs(sess->se_sess);
- ft_sess_put(sess); /* release from table */
+ ft_close_sess(sess); /* release from table */
rdata->prli_count--;
/* XXX TBD - clearing actions. unit attn, see 4.10 */
}
@@ -478,11 +480,11 @@ static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
struct ft_sess *sess;
u32 sid = fc_frame_sid(fp);
- pr_debug("sid %x\n", sid);
+ TFC_SESS_DBG(lport, "recv sid %x\n", sid);
sess = ft_sess_get(lport, sid);
if (!sess) {
- pr_debug("sid %x sess lookup failed\n", sid);
+ TFC_SESS_DBG(lport, "sid %x sess lookup failed\n", sid);
/* TBD XXX - if FCP_CMND, send PRLO */
fc_frame_free(fp);
return;
diff --git a/drivers/target/tcm_remote/Kconfig b/drivers/target/tcm_remote/Kconfig
new file mode 100644
index 000000000000..e6bebb5fe6f1
--- /dev/null
+++ b/drivers/target/tcm_remote/Kconfig
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config REMOTE_TARGET
+ tristate "TCM Virtual Remote target"
+ depends on SCSI
+ help
+ Say Y here to enable the TCM Virtual Remote fabric
+ That fabric is a dummy fabric to tell TCM about configuration
+ of TPG/ACL/LUN on peer nodes in a cluster.
diff --git a/drivers/target/tcm_remote/Makefile b/drivers/target/tcm_remote/Makefile
new file mode 100644
index 000000000000..5818ffd0b0fa
--- /dev/null
+++ b/drivers/target/tcm_remote/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_REMOTE_TARGET) += tcm_remote.o
diff --git a/drivers/target/tcm_remote/tcm_remote.c b/drivers/target/tcm_remote/tcm_remote.c
new file mode 100644
index 000000000000..cb8db2558056
--- /dev/null
+++ b/drivers/target/tcm_remote/tcm_remote.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/configfs.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "tcm_remote.h"
+
+static inline struct tcm_remote_tpg *remote_tpg(struct se_portal_group *se_tpg)
+{
+ return container_of(se_tpg, struct tcm_remote_tpg, remote_se_tpg);
+}
+
+static char *tcm_remote_get_endpoint_wwn(struct se_portal_group *se_tpg)
+{
+ /*
+ * Return the passed NAA identifier for the Target Port
+ */
+ return &remote_tpg(se_tpg)->remote_hba->remote_wwn_address[0];
+}
+
+static u16 tcm_remote_get_tag(struct se_portal_group *se_tpg)
+{
+ /*
+ * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
+ * to represent the SCSI Target Port.
+ */
+ return remote_tpg(se_tpg)->remote_tpgt;
+}
+
+static int tcm_remote_dummy_cmd_fn(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static void tcm_remote_dummy_cmd_void_fn(struct se_cmd *se_cmd)
+{
+
+}
+
+static char *tcm_remote_dump_proto_id(struct tcm_remote_hba *remote_hba)
+{
+ switch (remote_hba->remote_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return "SAS";
+ case SCSI_PROTOCOL_SRP:
+ return "SRP";
+ case SCSI_PROTOCOL_FCP:
+ return "FCP";
+ case SCSI_PROTOCOL_ISCSI:
+ return "iSCSI";
+ default:
+ break;
+ }
+
+ return "Unknown";
+}
+
+static int tcm_remote_port_link(
+ struct se_portal_group *se_tpg,
+ struct se_lun *lun)
+{
+ pr_debug("TCM_Remote_ConfigFS: Port Link LUN %lld Successful\n",
+ lun->unpacked_lun);
+ return 0;
+}
+
+static void tcm_remote_port_unlink(
+ struct se_portal_group *se_tpg,
+ struct se_lun *lun)
+{
+ pr_debug("TCM_Remote_ConfigFS: Port Unlink LUN %lld Successful\n",
+ lun->unpacked_lun);
+}
+
+static struct se_portal_group *tcm_remote_make_tpg(
+ struct se_wwn *wwn,
+ const char *name)
+{
+ struct tcm_remote_hba *remote_hba = container_of(wwn,
+ struct tcm_remote_hba, remote_hba_wwn);
+ struct tcm_remote_tpg *remote_tpg;
+ unsigned long tpgt;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name) {
+ pr_err("Unable to locate \"tpgt_#\" directory group\n");
+ return ERR_PTR(-EINVAL);
+ }
+ if (kstrtoul(name + 5, 10, &tpgt))
+ return ERR_PTR(-EINVAL);
+
+ if (tpgt >= TL_TPGS_PER_HBA) {
+ pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n",
+ tpgt, TL_TPGS_PER_HBA);
+ return ERR_PTR(-EINVAL);
+ }
+ remote_tpg = &remote_hba->remote_hba_tpgs[tpgt];
+ remote_tpg->remote_hba = remote_hba;
+ remote_tpg->remote_tpgt = tpgt;
+ /*
+ * Register the remote_tpg as a emulated TCM Target Endpoint
+ */
+ ret = core_tpg_register(wwn, &remote_tpg->remote_se_tpg,
+ remote_hba->remote_proto_id);
+ if (ret < 0)
+ return ERR_PTR(-ENOMEM);
+
+ pr_debug("TCM_Remote_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n",
+ tcm_remote_dump_proto_id(remote_hba),
+ config_item_name(&wwn->wwn_group.cg_item), tpgt);
+ return &remote_tpg->remote_se_tpg;
+}
+
+static void tcm_remote_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct se_wwn *wwn = se_tpg->se_tpg_wwn;
+ struct tcm_remote_tpg *remote_tpg = container_of(se_tpg,
+ struct tcm_remote_tpg, remote_se_tpg);
+ struct tcm_remote_hba *remote_hba;
+ unsigned short tpgt;
+
+ remote_hba = remote_tpg->remote_hba;
+ tpgt = remote_tpg->remote_tpgt;
+
+ /*
+ * Deregister the remote_tpg as a emulated TCM Target Endpoint
+ */
+ core_tpg_deregister(se_tpg);
+
+ remote_tpg->remote_hba = NULL;
+ remote_tpg->remote_tpgt = 0;
+
+ pr_debug("TCM_Remote_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n",
+ tcm_remote_dump_proto_id(remote_hba),
+ config_item_name(&wwn->wwn_group.cg_item), tpgt);
+}
+
+static struct se_wwn *tcm_remote_make_wwn(
+ struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_remote_hba *remote_hba;
+ char *ptr;
+ int ret, off = 0;
+
+ remote_hba = kzalloc(sizeof(*remote_hba), GFP_KERNEL);
+ if (!remote_hba)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Determine the emulated Protocol Identifier and Target Port Name
+ * based on the incoming configfs directory name.
+ */
+ ptr = strstr(name, "naa.");
+ if (ptr) {
+ remote_hba->remote_proto_id = SCSI_PROTOCOL_SAS;
+ goto check_len;
+ }
+ ptr = strstr(name, "fc.");
+ if (ptr) {
+ remote_hba->remote_proto_id = SCSI_PROTOCOL_FCP;
+ off = 3; /* Skip over "fc." */
+ goto check_len;
+ }
+ ptr = strstr(name, "0x");
+ if (ptr) {
+ remote_hba->remote_proto_id = SCSI_PROTOCOL_SRP;
+ off = 2; /* Skip over "0x" */
+ goto check_len;
+ }
+ ptr = strstr(name, "iqn.");
+ if (!ptr) {
+ pr_err("Unable to locate prefix for emulated Target Port: %s\n",
+ name);
+ ret = -EINVAL;
+ goto out;
+ }
+ remote_hba->remote_proto_id = SCSI_PROTOCOL_ISCSI;
+
+check_len:
+ if (strlen(name) >= TL_WWN_ADDR_LEN) {
+ pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n",
+ name, tcm_remote_dump_proto_id(remote_hba), TL_WWN_ADDR_LEN);
+ ret = -EINVAL;
+ goto out;
+ }
+ snprintf(&remote_hba->remote_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
+
+ pr_debug("TCM_Remote_ConfigFS: Allocated emulated Target %s Address: %s\n",
+ tcm_remote_dump_proto_id(remote_hba), name);
+ return &remote_hba->remote_hba_wwn;
+out:
+ kfree(remote_hba);
+ return ERR_PTR(ret);
+}
+
+static void tcm_remote_drop_wwn(struct se_wwn *wwn)
+{
+ struct tcm_remote_hba *remote_hba = container_of(wwn,
+ struct tcm_remote_hba, remote_hba_wwn);
+
+ pr_debug("TCM_Remote_ConfigFS: Deallocating emulated Target %s Address: %s\n",
+ tcm_remote_dump_proto_id(remote_hba),
+ remote_hba->remote_wwn_address);
+ kfree(remote_hba);
+}
+
+static ssize_t tcm_remote_wwn_version_show(struct config_item *item, char *page)
+{
+ return sprintf(page, "TCM Remote Fabric module %s\n", TCM_REMOTE_VERSION);
+}
+
+CONFIGFS_ATTR_RO(tcm_remote_wwn_, version);
+
+static struct configfs_attribute *tcm_remote_wwn_attrs[] = {
+ &tcm_remote_wwn_attr_version,
+ NULL,
+};
+
+static const struct target_core_fabric_ops remote_ops = {
+ .module = THIS_MODULE,
+ .fabric_name = "remote",
+ .tpg_get_wwn = tcm_remote_get_endpoint_wwn,
+ .tpg_get_tag = tcm_remote_get_tag,
+ .check_stop_free = tcm_remote_dummy_cmd_fn,
+ .release_cmd = tcm_remote_dummy_cmd_void_fn,
+ .write_pending = tcm_remote_dummy_cmd_fn,
+ .queue_data_in = tcm_remote_dummy_cmd_fn,
+ .queue_status = tcm_remote_dummy_cmd_fn,
+ .queue_tm_rsp = tcm_remote_dummy_cmd_void_fn,
+ .aborted_task = tcm_remote_dummy_cmd_void_fn,
+ .fabric_make_wwn = tcm_remote_make_wwn,
+ .fabric_drop_wwn = tcm_remote_drop_wwn,
+ .fabric_make_tpg = tcm_remote_make_tpg,
+ .fabric_drop_tpg = tcm_remote_drop_tpg,
+ .fabric_post_link = tcm_remote_port_link,
+ .fabric_pre_unlink = tcm_remote_port_unlink,
+ .tfc_wwn_attrs = tcm_remote_wwn_attrs,
+};
+
+static int __init tcm_remote_fabric_init(void)
+{
+ return target_register_template(&remote_ops);
+}
+
+static void __exit tcm_remote_fabric_exit(void)
+{
+ target_unregister_template(&remote_ops);
+}
+
+MODULE_DESCRIPTION("TCM virtual remote target");
+MODULE_AUTHOR("Dmitry Bogdanov <d.bogdanov@yadro.com>");
+MODULE_LICENSE("GPL");
+module_init(tcm_remote_fabric_init);
+module_exit(tcm_remote_fabric_exit);
diff --git a/drivers/target/tcm_remote/tcm_remote.h b/drivers/target/tcm_remote/tcm_remote.h
new file mode 100644
index 000000000000..913d1a6eb3a2
--- /dev/null
+++ b/drivers/target/tcm_remote/tcm_remote.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/types.h>
+#include <linux/device.h>
+
+#define TCM_REMOTE_VERSION "v0.1"
+#define TL_WWN_ADDR_LEN 256
+#define TL_TPGS_PER_HBA 32
+
+struct tcm_remote_tpg {
+ unsigned short remote_tpgt;
+ struct se_portal_group remote_se_tpg;
+ struct tcm_remote_hba *remote_hba;
+};
+
+struct tcm_remote_hba {
+ u8 remote_proto_id;
+ unsigned char remote_wwn_address[TL_WWN_ADDR_LEN];
+ struct tcm_remote_tpg remote_hba_tpgs[TL_TPGS_PER_HBA];
+ struct se_wwn remote_hba_wwn;
+};