summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2025-07-23 19:58:54 +0300
committerMartin K. Petersen <martin.petersen@oracle.com>2025-07-24 22:37:30 -0400
commitb4c0cab4eb8d79cf426ac7bca20864881c8b9b8b (patch)
tree768b30bf4ee80ac45cd6a30ca3ff7ba1b7d82c91
parentc5977c4c0731b60c8c0b3f7cc4b0082a688a07f8 (diff)
scsi: ufs: core: Set and clear UIC Completion interrupt as needed
Currently the UIC Completion interrupt is left enabled except for when issuing link hibernate commands, in which case the interrupt is disabled and then re-enabled. Instead, set and clear the interrupt enable bit as needed. That is slightly simpler and less error prone, but also avoids side effects of accessing the interrupt enable register after entering link hibernation. Specifically, for some host controllers like Intel MTL, doing so disrupts the link state transition. Note also, the interrupt register is not read back anymore after it is updated. No other code does that, so it is assumed to be no longer necessary if it ever was. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Link: https://lore.kernel.org/r/20250723165856.145750-7-adrian.hunter@intel.com Reviewed-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r--drivers/ufs/core/ufshcd.c18
1 files changed, 6 insertions, 12 deletions
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 6beb169016fd..54082af7f65e 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -2622,6 +2622,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
*/
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
+ unsigned long flags;
int ret;
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
@@ -2631,6 +2632,10 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
if (!ret)
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
@@ -4275,7 +4280,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
unsigned long flags;
u8 status;
int ret;
- bool reenable_intr = false;
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
@@ -4286,15 +4290,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
goto out_unlock;
}
hba->uic_async_done = &uic_async_done;
- if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
- ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
- /*
- * Make sure UIC command completion interrupt is disabled before
- * issuing UIC command.
- */
- ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- reenable_intr = true;
- }
+ ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
spin_unlock_irqrestore(hba->host->host_lock, flags);
ret = __ufshcd_send_uic_cmd(hba, cmd);
if (ret) {
@@ -4338,8 +4334,6 @@ out:
spin_lock_irqsave(hba->host->host_lock, flags);
hba->active_uic_cmd = NULL;
hba->uic_async_done = NULL;
- if (reenable_intr)
- ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
if (ret) {
ufshcd_set_link_broken(hba);
ufshcd_schedule_eh_work(hba);