summaryrefslogtreecommitdiff
path: root/drivers/scsi/smartpqi
diff options
context:
space:
mode:
authorDon Brace <don.brace@microchip.com>2021-03-11 14:15:56 -0600
committerMartin K. Petersen <martin.petersen@oracle.com>2021-04-05 23:02:30 -0400
commitc7ffedb3a774a835450a518566639254534e72c4 (patch)
tree0da64b6f5d7347725eca9058787159ae01fb286c /drivers/scsi/smartpqi
parent583891c9e509256a2b2902607c2e7a7c36beb0d3 (diff)
scsi: smartpqi: Add stream detection
Enhance performance by adding sequential stream detection for RAID5/RAID6 sequential write requests. Reduce stripe lock contention with full-stripe write operations. There is one common stripe lock for each RAID volume that can be set by either the RAID engine or the AIO engine. The AIO path has I/O request sizes well below the stripe size resulting in many Read-Modify-Write operations. Sending the request to the RAID engine allows for coalescing requests into full stripe operations resulting in reduced Read-Modify-Write operations. Link: https://lore.kernel.org/r/161549375693.25025.2962141451773219796.stgit@brunhilda Reviewed-by: Scott Benesh <scott.benesh@microchip.com> Reviewed-by: Mike McGowen <mike.mcgowen@microchip.com> Reviewed-by: Scott Teel <scott.teel@microchip.com> Reviewed-by: Kevin Barnett <kevin.barnett@microchip.com> Signed-off-by: Don Brace <don.brace@microchip.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/smartpqi')
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h9
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c87
2 files changed, 90 insertions, 6 deletions
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 6639432f3dab..976bfd8c5192 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1043,6 +1043,13 @@ struct pqi_scsi_dev_raid_map_data {
#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
+#define NUM_STREAMS_PER_LUN 8
+
+struct pqi_stream_data {
+ u64 next_lba;
+ u32 last_accessed;
+};
+
struct pqi_scsi_dev {
int devtype; /* as reported by INQUIRY commmand */
u8 device_type; /* as reported by */
@@ -1097,6 +1104,7 @@ struct pqi_scsi_dev {
struct list_head add_list_entry;
struct list_head delete_list_entry;
+ struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
atomic_t scsi_cmds_outstanding;
atomic_t raid_bypass_cnt;
};
@@ -1296,6 +1304,7 @@ struct pqi_ctrl_info {
u8 enable_r5_writes : 1;
u8 enable_r6_writes : 1;
u8 lv_drive_type_mix_valid : 1;
+ u8 enable_stream_detection : 1;
u8 ciss_report_log_flags;
u32 max_transfer_encrypted_sas_sata;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 143bb7b64095..27bd3d9a3810 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -5688,9 +5688,83 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
atomic_dec(&device->scsi_cmds_outstanding);
}
-static int pqi_scsi_queue_command(struct Scsi_Host *shost,
+static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd)
{
+ u32 oldest_jiffies;
+ u8 lru_index;
+ int i;
+ int rc;
+ struct pqi_scsi_dev *device;
+ struct pqi_stream_data *pqi_stream_data;
+ struct pqi_scsi_dev_raid_map_data rmd;
+
+ if (!ctrl_info->enable_stream_detection)
+ return false;
+
+ rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
+ if (rc)
+ return false;
+
+ /* Check writes only. */
+ if (!rmd.is_write)
+ return false;
+
+ device = scmd->device->hostdata;
+
+ /* Check for RAID 5/6 streams. */
+ if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
+ return false;
+
+ /*
+ * If controller does not support AIO RAID{5,6} writes, need to send
+ * requests down non-AIO path.
+ */
+ if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
+ (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
+ return true;
+
+ lru_index = 0;
+ oldest_jiffies = INT_MAX;
+ for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
+ pqi_stream_data = &device->stream_data[i];
+ /*
+ * Check for adjacent request or request is within
+ * the previous request.
+ */
+ if ((pqi_stream_data->next_lba &&
+ rmd.first_block >= pqi_stream_data->next_lba) &&
+ rmd.first_block <= pqi_stream_data->next_lba +
+ rmd.block_cnt) {
+ pqi_stream_data->next_lba = rmd.first_block +
+ rmd.block_cnt;
+ pqi_stream_data->last_accessed = jiffies;
+ return true;
+ }
+
+ /* unused entry */
+ if (pqi_stream_data->last_accessed == 0) {
+ lru_index = i;
+ break;
+ }
+
+ /* Find entry with oldest last accessed time. */
+ if (pqi_stream_data->last_accessed <= oldest_jiffies) {
+ oldest_jiffies = pqi_stream_data->last_accessed;
+ lru_index = i;
+ }
+ }
+
+ /* Set LRU entry. */
+ pqi_stream_data = &device->stream_data[lru_index];
+ pqi_stream_data->last_accessed = jiffies;
+ pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
+
+ return false;
+}
+
+static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
int rc;
struct pqi_ctrl_info *ctrl_info;
struct pqi_scsi_dev *device;
@@ -5736,11 +5810,12 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
raid_bypassed = false;
if (device->raid_bypass_enabled &&
!blk_rq_is_passthrough(scmd->request)) {
- rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
- scmd, queue_group);
- if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
- raid_bypassed = true;
- atomic_inc(&device->raid_bypass_cnt);
+ if (!pqi_is_parity_write_stream(ctrl_info, scmd)) {
+ rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
+ if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
+ raid_bypassed = true;
+ atomic_inc(&device->raid_bypass_cnt);
+ }
}
}
if (!raid_bypassed)