diff options
Diffstat (limited to 'drivers/ufs/host/ufs-qcom.c')
| -rw-r--r-- | drivers/ufs/host/ufs-qcom.c | 945 |
1 files changed, 675 insertions, 270 deletions
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 3b592492e152..8d119b3223cb 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -5,6 +5,7 @@ #include <linux/acpi.h> #include <linux/clk.h> +#include <linux/cleanup.h> #include <linux/delay.h> #include <linux/devfreq.h> #include <linux/gpio/consumer.h> @@ -15,6 +16,8 @@ #include <linux/platform_device.h> #include <linux/reset-controller.h> #include <linux/time.h> +#include <linux/unaligned.h> +#include <linux/units.h> #include <soc/qcom/ice.h> @@ -31,6 +34,13 @@ ((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT) #define MCQ_QCFG_SIZE 0x40 +/* De-emphasis for gear-5 */ +#define DEEMPHASIS_3_5_dB 0x04 +#define NO_DEEMPHASIS 0x0 + +#define UFS_ICE_SYNC_RST_SEL BIT(3) +#define UFS_ICE_SYNC_RST_SW BIT(4) + enum { TSTBUS_UAWM, TSTBUS_UARM, @@ -96,8 +106,28 @@ static const struct __ufs_qcom_bw_table { [MODE_MAX][0][0] = { 7643136, 819200 }, }; +static const struct { + int nminor; + char *prefix; +} testbus_info[TSTBUS_MAX] = { + [TSTBUS_UAWM] = {32, "TSTBUS_UAWM"}, + [TSTBUS_UARM] = {32, "TSTBUS_UARM"}, + [TSTBUS_TXUC] = {32, "TSTBUS_TXUC"}, + [TSTBUS_RXUC] = {32, "TSTBUS_RXUC"}, + [TSTBUS_DFC] = {32, "TSTBUS_DFC"}, + [TSTBUS_TRLUT] = {32, "TSTBUS_TRLUT"}, + [TSTBUS_TMRLUT] = {32, "TSTBUS_TMRLUT"}, + [TSTBUS_OCSC] = {32, "TSTBUS_OCSC"}, + [TSTBUS_UTP_HCI] = {32, "TSTBUS_UTP_HCI"}, + [TSTBUS_COMBINED] = {32, "TSTBUS_COMBINED"}, + [TSTBUS_WRAPPER] = {32, "TSTBUS_WRAPPER"}, + [TSTBUS_UNIPRO] = {256, "TSTBUS_UNIPRO"}, +}; + static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); -static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up); +static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba, + unsigned long freq, char *name); +static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq); static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd) { @@ -105,6 +135,26 @@ static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd) } #ifdef CONFIG_SCSI_UFS_CRYPTO +/** + * ufs_qcom_config_ice_allocator() - ICE core allocator configuration + * + * @host: pointer to qcom specific variant structure. + */ +static void ufs_qcom_config_ice_allocator(struct ufs_qcom_host *host) +{ + struct ufs_hba *hba = host->hba; + static const uint8_t val[4] = { NUM_RX_R1W0, NUM_TX_R0W1, NUM_RX_R1W1, NUM_TX_R1W1 }; + u32 config; + + if (!(host->caps & UFS_QCOM_CAP_ICE_CONFIG) || + !(host->hba->caps & UFSHCD_CAP_CRYPTO)) + return; + + config = get_unaligned_le32(val); + + ufshcd_writel(hba, ICE_ALLOCATOR_TYPE, REG_UFS_MEM_ICE_CONFIG); + ufshcd_writel(hba, config, REG_UFS_MEM_ICE_NUM_CORE); +} static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host) { @@ -112,13 +162,20 @@ static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host) qcom_ice_enable(host->ice); } +static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops; /* forward decl */ + static int ufs_qcom_ice_init(struct ufs_qcom_host *host) { struct ufs_hba *hba = host->hba; + struct blk_crypto_profile *profile = &hba->crypto_profile; struct device *dev = hba->dev; struct qcom_ice *ice; + union ufs_crypto_capabilities caps; + union ufs_crypto_cap_entry cap; + int err; + int i; - ice = of_qcom_ice_get(dev); + ice = devm_of_qcom_ice_get(dev); if (ice == ERR_PTR(-EOPNOTSUPP)) { dev_warn(dev, "Disabling inline encryption support\n"); ice = NULL; @@ -128,8 +185,39 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host) return PTR_ERR_OR_ZERO(ice); host->ice = ice; - hba->caps |= UFSHCD_CAP_CRYPTO; + /* Initialize the blk_crypto_profile */ + + caps.reg_val = cpu_to_le32(ufshcd_readl(hba, REG_UFS_CCAP)); + + /* The number of keyslots supported is (CFGC+1) */ + err = devm_blk_crypto_profile_init(dev, profile, caps.config_count + 1); + if (err) + return err; + + profile->ll_ops = ufs_qcom_crypto_ops; + profile->max_dun_bytes_supported = 8; + profile->key_types_supported = qcom_ice_get_supported_key_type(ice); + profile->dev = dev; + + /* + * Currently this driver only supports AES-256-XTS. All known versions + * of ICE support it, but to be safe make sure it is really declared in + * the crypto capability registers. The crypto capability registers + * also give the supported data unit size(s). + */ + for (i = 0; i < caps.num_crypto_cap; i++) { + cap.reg_val = cpu_to_le32(ufshcd_readl(hba, + REG_UFS_CRYPTOCAP + + i * sizeof(__le32))); + if (cap.algorithm_id == UFS_CRYPTO_ALG_AES_XTS && + cap.key_size == UFS_CRYPTO_KEY_SIZE_256) + profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] |= + cap.sdus_mask * 512; + } + + hba->caps |= UFSHCD_CAP_CRYPTO; + hba->quirks |= UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE; return 0; } @@ -149,34 +237,84 @@ static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host) return 0; } -static int ufs_qcom_ice_program_key(struct ufs_hba *hba, - const union ufs_crypto_cfg_entry *cfg, - int slot) +static int ufs_qcom_ice_keyslot_program(struct blk_crypto_profile *profile, + const struct blk_crypto_key *key, + unsigned int slot) { + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); struct ufs_qcom_host *host = ufshcd_get_variant(hba); - union ufs_crypto_cap_entry cap; - bool config_enable = - cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE; + int err; - /* Only AES-256-XTS has been tested so far. */ - cap = hba->crypto_cap_array[cfg->crypto_cap_idx]; - if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS || - cap.key_size != UFS_CRYPTO_KEY_SIZE_256) - return -EOPNOTSUPP; + ufshcd_hold(hba); + err = qcom_ice_program_key(host->ice, slot, key); + ufshcd_release(hba); + return err; +} - if (config_enable) - return qcom_ice_program_key(host->ice, - QCOM_ICE_CRYPTO_ALG_AES_XTS, - QCOM_ICE_CRYPTO_KEY_SIZE_256, - cfg->crypto_key, - cfg->data_unit_size, slot); - else - return qcom_ice_evict_key(host->ice, slot); +static int ufs_qcom_ice_keyslot_evict(struct blk_crypto_profile *profile, + const struct blk_crypto_key *key, + unsigned int slot) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int err; + + ufshcd_hold(hba); + err = qcom_ice_evict_key(host->ice, slot); + ufshcd_release(hba); + return err; } -#else +static int ufs_qcom_ice_derive_sw_secret(struct blk_crypto_profile *profile, + const u8 *eph_key, size_t eph_key_size, + u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_derive_sw_secret(host->ice, eph_key, eph_key_size, + sw_secret); +} + +static int ufs_qcom_ice_import_key(struct blk_crypto_profile *profile, + const u8 *raw_key, size_t raw_key_size, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); -#define ufs_qcom_ice_program_key NULL + return qcom_ice_import_key(host->ice, raw_key, raw_key_size, lt_key); +} + +static int ufs_qcom_ice_generate_key(struct blk_crypto_profile *profile, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_generate_key(host->ice, lt_key); +} + +static int ufs_qcom_ice_prepare_key(struct blk_crypto_profile *profile, + const u8 *lt_key, size_t lt_key_size, + u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_prepare_key(host->ice, lt_key, lt_key_size, eph_key); +} + +static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops = { + .keyslot_program = ufs_qcom_ice_keyslot_program, + .keyslot_evict = ufs_qcom_ice_keyslot_evict, + .derive_sw_secret = ufs_qcom_ice_derive_sw_secret, + .import_key = ufs_qcom_ice_import_key, + .generate_key = ufs_qcom_ice_generate_key, + .prepare_key = ufs_qcom_ice_prepare_key, +}; + +#else static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host) { @@ -196,6 +334,11 @@ static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host) { return 0; } + +static void ufs_qcom_config_ice_allocator(struct ufs_qcom_host *host) +{ +} + #endif static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host) @@ -354,12 +497,8 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) * If the HS-G5 PHY gear is used, update host_params->hs_rate to Rate-A, * so that the subsequent power mode change shall stick to Rate-A. */ - if (host->hw_ver.major == 0x5) { - if (host->phy_gear == UFS_HS_G5) - host_params->hs_rate = PA_HS_MODE_A; - else - host_params->hs_rate = PA_HS_MODE_B; - } + if (host->hw_ver.major == 0x5 && host->phy_gear == UFS_HS_G5) + host_params->hs_rate = PA_HS_MODE_A; mode = host_params->hs_rate == PA_HS_MODE_B ? PHY_MODE_UFS_HS_B : PHY_MODE_UFS_HS_A; @@ -368,6 +507,10 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) if (ret) return ret; + if (phy->power_count) + phy_power_off(phy); + + /* phy initialization - calibrate the phy */ ret = phy_init(phy); if (ret) { @@ -388,6 +531,12 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) goto out_disable_phy; } + ret = phy_calibrate(phy); + if (ret) { + dev_err(hba->dev, "Failed to calibrate PHY: %d\n", ret); + goto out_disable_phy; + } + ufs_qcom_select_unipro_mode(host); return 0; @@ -408,11 +557,32 @@ out_disable_phy: */ static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba) { + int err; + + /* Enable UTP internal clock gating */ ufshcd_rmwl(hba, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2_CGC_EN_ALL, REG_UFS_CFG2); /* Ensure that HW clock gating is enabled before next operations */ ufshcd_readl(hba, REG_UFS_CFG2); + + /* Enable Unipro internal clock gating */ + err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK, + DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG); + if (err) + goto out; + + err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK, + PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG); + if (err) + goto out; + + err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN, + DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN, + DME_VS_CORE_CLK_CTRL); +out: + if (err) + dev_err(hba->dev, "hw clk gating enabled failed\n"); } static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, @@ -439,6 +609,7 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, err = ufs_qcom_check_hibern8(hba); ufs_qcom_enable_hw_clk_gating(hba); ufs_qcom_ice_enable(host); + ufs_qcom_config_ice_allocator(host); break; default: dev_err(hba->dev, "%s: invalid status %d\n", __func__, status); @@ -452,20 +623,15 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, * ufs_qcom_cfg_timers - Configure ufs qcom cfg timers * * @hba: host controller instance - * @gear: Current operating gear - * @hs: current power mode - * @rate: current operating rate (A or B) - * @update_link_startup_timer: indicate if link_start ongoing * @is_pre_scale_up: flag to check if pre scale up condition. + * @freq: target opp freq * Return: zero for success and non-zero in case of a failure. */ -static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, - u32 hs, u32 rate, bool update_link_startup_timer, - bool is_pre_scale_up) +static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up, unsigned long freq) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct ufs_clk_info *clki; - unsigned long core_clk_rate = 0; + unsigned long clk_freq = 0; u32 core_clk_cycles_per_us; /* @@ -477,27 +643,34 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba)) return 0; - if (gear == 0) { - dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear); - return -EINVAL; + if (hba->use_pm_opp && freq != ULONG_MAX) { + clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk"); + if (clk_freq) + goto cfg_timers; } list_for_each_entry(clki, &hba->clk_list_head, list) { if (!strcmp(clki->name, "core_clk")) { + if (freq == ULONG_MAX) { + clk_freq = clki->max_freq; + break; + } + if (is_pre_scale_up) - core_clk_rate = clki->max_freq; + clk_freq = clki->max_freq; else - core_clk_rate = clk_get_rate(clki->clk); + clk_freq = clk_get_rate(clki->clk); break; } } +cfg_timers: /* If frequency is smaller than 1MHz, set to 1MHz */ - if (core_clk_rate < DEFAULT_CLK_RATE_HZ) - core_clk_rate = DEFAULT_CLK_RATE_HZ; + if (clk_freq < DEFAULT_CLK_RATE_HZ) + clk_freq = DEFAULT_CLK_RATE_HZ; - core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; + core_clk_cycles_per_us = clk_freq / USEC_PER_SEC; if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); /* @@ -517,14 +690,13 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, switch (status) { case PRE_CHANGE: - if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, - 0, true, false)) { + if (ufs_qcom_cfg_timers(hba, false, ULONG_MAX)) { dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", __func__); return -EINVAL; } - err = ufs_qcom_set_core_clk_ctrl(hba, true); + err = ufs_qcom_set_core_clk_ctrl(hba, true, ULONG_MAX); if (err) dev_err(hba->dev, "cfg core clk ctrl failed\n"); /* @@ -559,25 +731,29 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, enum ufs_notify_change_status status) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct phy *phy = host->generic_phy; if (status == PRE_CHANGE) return 0; - if (ufs_qcom_is_link_off(hba)) { - /* - * Disable the tx/rx lane symbol clocks before PHY is - * powered down as the PLL source should be disabled - * after downstream clocks are disabled. - */ + if (!ufs_qcom_is_link_active(hba)) ufs_qcom_disable_lane_clks(host); - phy_power_off(phy); - /* reset the connected UFS device during power down */ - ufs_qcom_device_reset_ctrl(hba, true); - } else if (!ufs_qcom_is_link_active(hba)) { - ufs_qcom_disable_lane_clks(host); + /* reset the connected UFS device during power down */ + if (ufs_qcom_is_link_off(hba) && host->device_reset) { + ufs_qcom_device_reset_ctrl(hba, true); + /* + * After sending the SSU command, asserting the rst_n + * line causes the device firmware to wake up and + * execute its reset routine. + * + * During this process, the device may draw current + * beyond the permissible limit for low-power mode (LPM). + * A 10ms delay, based on experimental observations, + * allows the UFS device to complete its hardware reset + * before transitioning the power rail to LPM. + */ + usleep_range(10000, 11000); } return ufs_qcom_ice_suspend(host); @@ -586,25 +762,28 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct phy *phy = host->generic_phy; int err; + u32 reg_val; - if (ufs_qcom_is_link_off(hba)) { - err = phy_power_on(phy); - if (err) { - dev_err(hba->dev, "%s: failed PHY power on: %d\n", - __func__, err); - return err; - } - - err = ufs_qcom_enable_lane_clks(host); - if (err) - return err; + err = ufs_qcom_enable_lane_clks(host); + if (err) + return err; - } else if (!ufs_qcom_is_link_active(hba)) { - err = ufs_qcom_enable_lane_clks(host); - if (err) - return err; + if ((!ufs_qcom_is_link_active(hba)) && + host->hw_ver.major == 5 && + host->hw_ver.minor == 0 && + host->hw_ver.step == 0) { + ufshcd_writel(hba, UFS_ICE_SYNC_RST_SEL | UFS_ICE_SYNC_RST_SW, UFS_MEM_ICE_CFG); + reg_val = ufshcd_readl(hba, UFS_MEM_ICE_CFG); + reg_val &= ~(UFS_ICE_SYNC_RST_SEL | UFS_ICE_SYNC_RST_SW); + /* + * HW documentation doesn't recommend any delay between the + * reset set and clear. But we are enforcing an arbitrary delay + * to give flops enough time to settle in. + */ + usleep_range(50, 100); + ufshcd_writel(hba, reg_val, UFS_MEM_ICE_CFG); + ufshcd_readl(hba, UFS_MEM_ICE_CFG); } return ufs_qcom_ice_resume(host); @@ -721,9 +900,26 @@ static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host) return ufs_qcom_icc_set_bw(host, bw_table.mem_bw, bw_table.cfg_bw); } +static void ufs_qcom_set_tx_hs_equalizer(struct ufs_hba *hba, u32 gear, u32 tx_lanes) +{ + u32 equalizer_val; + int ret, i; + + /* Determine the equalizer value based on the gear */ + equalizer_val = (gear == 5) ? DEEMPHASIS_3_5_dB : NO_DEEMPHASIS; + + for (i = 0; i < tx_lanes; i++) { + ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HS_EQUALIZER, i), + equalizer_val); + if (ret) + dev_err(hba->dev, "%s: failed equalizer lane %d\n", + __func__, i); + } +} + static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status, - struct ufs_pa_layer_attr *dev_max_params, + const struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); @@ -772,21 +968,13 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, dev_req_params->gear_tx, PA_INITIAL_ADAPT); } + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING) + ufs_qcom_set_tx_hs_equalizer(hba, + dev_req_params->gear_tx, dev_req_params->lane_tx); + break; case POST_CHANGE: - if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, - dev_req_params->pwr_rx, - dev_req_params->hs_rate, false, false)) { - dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", - __func__); - /* - * we return error code at the end of the routine, - * but continue to configure UFS_PHY_TX_LANE_ENABLE - * and bus voting as usual - */ - ret = -EINVAL; - } - /* cache the power mode parameters to use internally */ memcpy(&host->dev_req_params, dev_req_params, sizeof(*dev_req_params)); @@ -821,6 +1009,16 @@ static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba) (pa_vs_config_reg1 | (1 << 12))); } +static void ufs_qcom_override_pa_tx_hsg1_sync_len(struct ufs_hba *hba) +{ + int err; + + err = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TX_HSG1_SYNC_LENGTH), + PA_TX_HSG1_SYNC_LENGTH_VAL); + if (err) + dev_err(hba->dev, "Failed (%d) set PA_TX_HSG1_SYNC_LENGTH\n", err); +} + static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) { int err = 0; @@ -828,6 +1026,9 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba) if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME) err = ufs_qcom_quirk_host_pa_saveconfigtime(hba); + if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH) + ufs_qcom_override_pa_tx_hsg1_sync_len(hba); + return err; } @@ -836,12 +1037,13 @@ static struct ufs_dev_quirk ufs_qcom_dev_fixups[] = { { .wmanufacturerid = UFS_VENDOR_SKHYNIX, .model = UFS_ANY_MODEL, .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, - { .wmanufacturerid = UFS_VENDOR_TOSHIBA, - .model = UFS_ANY_MODEL, - .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM }, { .wmanufacturerid = UFS_VENDOR_WDC, .model = UFS_ANY_MODEL, .quirk = UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE }, + { .wmanufacturerid = UFS_VENDOR_SAMSUNG, + .model = UFS_ANY_MODEL, + .quirk = UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH | + UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING }, {} }; @@ -866,6 +1068,7 @@ static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba) */ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) { + const struct ufs_qcom_drvdata *drvdata = of_device_get_match_data(hba->dev); struct ufs_qcom_host *host = ufshcd_get_variant(hba); if (host->hw_ver.major == 0x2) @@ -874,9 +1077,8 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba) if (host->hw_ver.major > 0x3) hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; - if (of_device_is_compatible(hba->dev->of_node, "qcom,sm8550-ufshc") || - of_device_is_compatible(hba->dev->of_node, "qcom,sm8650-ufshc")) - hba->quirks |= UFSHCD_QUIRK_BROKEN_LSDBS_CAP; + if (drvdata && drvdata->quirks) + hba->quirks |= drvdata->quirks; } static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host) @@ -921,6 +1123,18 @@ static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host) } } +static void ufs_qcom_parse_gear_limits(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct ufs_host_params *host_params = &host->host_params; + u32 hs_gear_old = host_params->hs_tx_gear; + + ufshcd_parse_gear_limits(hba, host_params); + if (host_params->hs_tx_gear != hs_gear_old) { + host->phy_gear = host_params->hs_tx_gear; + } +} + static void ufs_qcom_set_host_params(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); @@ -932,6 +1146,14 @@ static void ufs_qcom_set_host_params(struct ufs_hba *hba) host_params->hs_tx_gear = host_params->hs_rx_gear = ufs_qcom_get_hs_gear(hba); } +static void ufs_qcom_set_host_caps(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + if (host->hw_ver.major >= 0x5) + host->caps |= UFS_QCOM_CAP_ICE_CONFIG; +} + static void ufs_qcom_set_caps(struct ufs_hba *hba) { hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; @@ -940,6 +1162,8 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba) hba->caps |= UFSHCD_CAP_WB_EN; hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE; hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; + + ufs_qcom_set_host_caps(hba); } /** @@ -948,12 +1172,20 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba) * @on: If true, enable clocks else disable them. * @status: PRE_CHANGE or POST_CHANGE notify * + * There are certain clocks which comes from the PHY so it needs + * to be managed together along with controller clocks which also + * provides a better power saving. Hence keep phy_power_off/on calls + * in ufs_qcom_setup_clocks, so that PHY's regulators & clks can be + * turned on/off along with UFS's clocks. + * * Return: 0 on success, non-zero on failure. */ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, enum ufs_notify_change_status status) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct phy *phy; + int err; /* * In case ufs_qcom_init() is not yet done, simply ignore. @@ -963,23 +1195,47 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, if (!host) return 0; + phy = host->generic_phy; + switch (status) { case PRE_CHANGE: if (on) { ufs_qcom_icc_update_bw(host); + if (ufs_qcom_is_link_hibern8(hba)) { + err = ufs_qcom_enable_lane_clks(host); + if (err) { + dev_err(hba->dev, "enable lane clks failed, ret=%d\n", err); + return err; + } + } } else { if (!ufs_qcom_is_link_active(hba)) { /* disable device ref_clk */ ufs_qcom_dev_ref_clk_ctrl(host, false); } + + err = phy_power_off(phy); + if (err) { + dev_err(hba->dev, "phy power off failed, ret=%d\n", err); + return err; + } } break; case POST_CHANGE: if (on) { + err = phy_power_on(phy); + if (err) { + dev_err(hba->dev, "phy power on failed, ret = %d\n", err); + return err; + } + /* enable the device ref clock for HS mode*/ if (ufshcd_is_hs_mode(&hba->pwr_info)) ufs_qcom_dev_ref_clk_ctrl(host, true); } else { + if (ufs_qcom_is_link_hibern8(hba)) + ufs_qcom_disable_lane_clks(host); + ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MIN][0][0].mem_bw, ufs_qcom_bw_table[MODE_MIN][0][0].cfg_bw); } @@ -1064,6 +1320,7 @@ static int ufs_qcom_init(struct ufs_hba *hba) struct device *dev = hba->dev; struct ufs_qcom_host *host; struct ufs_clk_info *clki; + const struct ufs_qcom_drvdata *drvdata = of_device_get_match_data(hba->dev); host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); if (!host) @@ -1129,6 +1386,7 @@ static int ufs_qcom_init(struct ufs_hba *hba) ufs_qcom_advertise_quirks(hba); ufs_qcom_set_host_params(hba); ufs_qcom_set_phy_gear(host); + ufs_qcom_parse_gear_limits(hba); err = ufs_qcom_ice_init(host); if (err) @@ -1143,6 +1401,9 @@ static int ufs_qcom_init(struct ufs_hba *hba) dev_warn(dev, "%s: failed to configure the testbus %d\n", __func__, err); + if (drvdata && drvdata->no_phy_retention) + hba->spm_lvl = UFS_PM_LVL_5; + return 0; out_variant_clear: @@ -1231,28 +1492,46 @@ static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba, return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg); } -static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up) +static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct list_head *head = &hba->clk_list_head; struct ufs_clk_info *clki; u32 cycles_in_1us = 0; u32 core_clk_ctrl_reg; + unsigned long clk_freq; int err; + if (hba->use_pm_opp && freq != ULONG_MAX) { + clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro"); + if (clk_freq) { + cycles_in_1us = ceil(clk_freq, HZ_PER_MHZ); + goto set_core_clk_ctrl; + } + } + list_for_each_entry(clki, head, list) { if (!IS_ERR_OR_NULL(clki->clk) && !strcmp(clki->name, "core_clk_unipro")) { - if (!clki->max_freq) + if (!clki->max_freq) { cycles_in_1us = 150; /* default for backwards compatibility */ - else if (is_scale_up) - cycles_in_1us = ceil(clki->max_freq, (1000 * 1000)); + break; + } + + if (freq == ULONG_MAX) { + cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ); + break; + } + + if (is_scale_up) + cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ); else - cycles_in_1us = ceil(clk_get_rate(clki->clk), (1000 * 1000)); + cycles_in_1us = ceil(clk_get_rate(clki->clk), HZ_PER_MHZ); break; } } +set_core_clk_ctrl: err = ufshcd_dme_get(hba, UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), &core_clk_ctrl_reg); @@ -1285,20 +1564,17 @@ static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up) return ufs_qcom_set_clk_40ns_cycles(hba, cycles_in_1us); } -static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba) +static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba, unsigned long freq) { - struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct ufs_pa_layer_attr *attr = &host->dev_req_params; int ret; - ret = ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx, - attr->hs_rate, false, true); + ret = ufs_qcom_cfg_timers(hba, true, freq); if (ret) { dev_err(hba->dev, "%s ufs cfg timer failed\n", __func__); return ret; } /* set unipro core clock attributes and clear clock divider */ - return ufs_qcom_set_core_clk_ctrl(hba, true); + return ufs_qcom_set_core_clk_ctrl(hba, true, freq); } static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) @@ -1327,14 +1603,22 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) return err; } -static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba) +static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba, unsigned long freq) { + int ret; + + ret = ufs_qcom_cfg_timers(hba, false, freq); + if (ret) { + dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", __func__); + return ret; + } /* set unipro core clock attributes and clear clock divider */ - return ufs_qcom_set_core_clk_ctrl(hba, false); + return ufs_qcom_set_core_clk_ctrl(hba, false, freq); } -static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, - bool scale_up, enum ufs_notify_change_status status) +static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, bool scale_up, + unsigned long target_freq, + enum ufs_notify_change_status status) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); int err; @@ -1348,7 +1632,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, if (err) return err; if (scale_up) - err = ufs_qcom_clk_scale_up_pre_change(hba); + err = ufs_qcom_clk_scale_up_pre_change(hba, target_freq); else err = ufs_qcom_clk_scale_down_pre_change(hba); @@ -1360,7 +1644,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, if (scale_up) err = ufs_qcom_clk_scale_up_post_change(hba); else - err = ufs_qcom_clk_scale_down_post_change(hba); + err = ufs_qcom_clk_scale_down_post_change(hba, target_freq); if (err) { @@ -1481,6 +1765,89 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) return 0; } +static void ufs_qcom_dump_testbus(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int i, j, nminor = 0, testbus_len = 0; + u32 *testbus __free(kfree) = NULL; + char *prefix; + + testbus = kmalloc_array(256, sizeof(u32), GFP_KERNEL); + if (!testbus) + return; + + for (j = 0; j < TSTBUS_MAX; j++) { + nminor = testbus_info[j].nminor; + prefix = testbus_info[j].prefix; + host->testbus.select_major = j; + testbus_len = nminor * sizeof(u32); + for (i = 0; i < nminor; i++) { + host->testbus.select_minor = i; + ufs_qcom_testbus_config(host); + testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS); + } + print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, + 16, 4, testbus, testbus_len, false); + } +} + +static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, + const char *prefix, void __iomem *base) +{ + u32 *regs __free(kfree) = NULL; + size_t pos; + + if (offset % 4 != 0 || len % 4 != 0) + return -EINVAL; + + regs = kzalloc(len, GFP_ATOMIC); + if (!regs) + return -ENOMEM; + + for (pos = 0; pos < len; pos += 4) + regs[pos / 4] = readl(base + offset + pos); + + print_hex_dump(KERN_ERR, prefix, + len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, + 16, 4, regs, len, false); + + return 0; +} + +static void ufs_qcom_dump_mcq_hci_regs(struct ufs_hba *hba) +{ + struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[0]; + void __iomem *mcq_vs_base = hba->mcq_base + UFS_MEM_VS_BASE; + + struct dump_info { + void __iomem *base; + size_t offset; + size_t len; + const char *prefix; + }; + + struct dump_info mcq_dumps[] = { + {hba->mcq_base, 0x0, 256 * 4, "MCQ HCI-0 "}, + {hba->mcq_base, 0x400, 256 * 4, "MCQ HCI-1 "}, + {mcq_vs_base, 0x0, 5 * 4, "MCQ VS-0 "}, + {opr->base, 0x0, 256 * 4, "MCQ SQD-0 "}, + {opr->base, 0x400, 256 * 4, "MCQ SQD-1 "}, + {opr->base, 0x800, 256 * 4, "MCQ SQD-2 "}, + {opr->base, 0xc00, 256 * 4, "MCQ SQD-3 "}, + {opr->base, 0x1000, 256 * 4, "MCQ SQD-4 "}, + {opr->base, 0x1400, 256 * 4, "MCQ SQD-5 "}, + {opr->base, 0x1800, 256 * 4, "MCQ SQD-6 "}, + {opr->base, 0x1c00, 256 * 4, "MCQ SQD-7 "}, + + }; + + for (int i = 0; i < ARRAY_SIZE(mcq_dumps); i++) { + ufs_qcom_dump_regs(hba, mcq_dumps[i].offset, mcq_dumps[i].len, + mcq_dumps[i].prefix, mcq_dumps[i].base); + cond_resched(); + } +} + static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) { u32 reg; @@ -1488,6 +1855,15 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) host = ufshcd_get_variant(hba); + dev_err(hba->dev, "HW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_ENTER_CNT)); + dev_err(hba->dev, "HW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_EXIT_CNT)); + + dev_err(hba->dev, "SW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_ENTER_CNT)); + dev_err(hba->dev, "SW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_EXIT_CNT)); + + dev_err(hba->dev, "SW_AFTER_HW_H8_ENTER_CNT=%d\n", + ufshcd_readl(hba, REG_UFS_SW_AFTER_HW_H8_ENTER_CNT)); + ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, "HCI Vendor Specific Registers "); @@ -1530,6 +1906,23 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT); ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT "); + + if (hba->mcq_enabled) { + reg = ufs_qcom_get_debug_reg_offset(host, UFS_RD_REG_MCQ); + ufshcd_dump_regs(hba, reg, 64 * 4, "HCI MCQ Debug Registers "); + } + + /* ensure below dumps occur only in task context due to blocking calls. */ + if (in_task()) { + /* Dump MCQ Host Vendor Specific Registers */ + if (hba->mcq_enabled) + ufs_qcom_dump_mcq_hci_regs(hba); + + /* voluntarily yield the CPU as we are dumping too much data */ + ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS "); + cond_resched(); + ufs_qcom_dump_testbus(hba); + } } /** @@ -1559,7 +1952,6 @@ static int ufs_qcom_device_reset(struct ufs_hba *hba) return 0; } -#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, struct devfreq_dev_profile *p, struct devfreq_simple_ondemand_data *d) @@ -1571,131 +1963,69 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, hba->clk_scaling.suspend_on_no_request = true; } -#else -static void ufs_qcom_config_scaling_param(struct ufs_hba *hba, - struct devfreq_dev_profile *p, - struct devfreq_simple_ondemand_data *data) -{ -} -#endif - -static void ufs_qcom_reinit_notify(struct ufs_hba *hba) -{ - struct ufs_qcom_host *host = ufshcd_get_variant(hba); - - phy_power_off(host->generic_phy); -} - -/* Resources */ -static const struct ufshcd_res_info ufs_res_info[RES_MAX] = { - {.name = "ufs_mem",}, - {.name = "mcq",}, - /* Submission Queue DAO */ - {.name = "mcq_sqd",}, - /* Submission Queue Interrupt Status */ - {.name = "mcq_sqis",}, - /* Completion Queue DAO */ - {.name = "mcq_cqd",}, - /* Completion Queue Interrupt Status */ - {.name = "mcq_cqis",}, - /* MCQ vendor specific */ - {.name = "mcq_vs",}, -}; static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba) { struct platform_device *pdev = to_platform_device(hba->dev); - struct ufshcd_res_info *res; - struct resource *res_mem, *res_mcq; - int i, ret; - - memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info)); - - for (i = 0; i < RES_MAX; i++) { - res = &hba->res[i]; - res->resource = platform_get_resource_byname(pdev, - IORESOURCE_MEM, - res->name); - if (!res->resource) { - dev_info(hba->dev, "Resource %s not provided\n", res->name); - if (i == RES_UFS) - return -ENODEV; - continue; - } else if (i == RES_UFS) { - res_mem = res->resource; - res->base = hba->mmio_base; - continue; - } - - res->base = devm_ioremap_resource(hba->dev, res->resource); - if (IS_ERR(res->base)) { - dev_err(hba->dev, "Failed to map res %s, err=%d\n", - res->name, (int)PTR_ERR(res->base)); - ret = PTR_ERR(res->base); - res->base = NULL; - return ret; - } - } - - /* MCQ resource provided in DT */ - res = &hba->res[RES_MCQ]; - /* Bail if MCQ resource is provided */ - if (res->base) - goto out; - - /* Explicitly allocate MCQ resource from ufs_mem */ - res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL); - if (!res_mcq) - return -ENOMEM; + struct resource *res; - res_mcq->start = res_mem->start + - MCQ_SQATTR_OFFSET(hba->mcq_capabilities); - res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1; - res_mcq->flags = res_mem->flags; - res_mcq->name = "mcq"; - - ret = insert_resource(&iomem_resource, res_mcq); - if (ret) { - dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n", - ret); - return ret; + /* Map the MCQ configuration region */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mcq"); + if (!res) { + dev_err(hba->dev, "MCQ resource not found in device tree\n"); + return -ENODEV; } - res->base = devm_ioremap_resource(hba->dev, res_mcq); - if (IS_ERR(res->base)) { - dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n", - (int)PTR_ERR(res->base)); - ret = PTR_ERR(res->base); - goto ioremap_err; + hba->mcq_base = devm_ioremap_resource(hba->dev, res); + if (IS_ERR(hba->mcq_base)) { + dev_err(hba->dev, "Failed to map MCQ region: %ld\n", + PTR_ERR(hba->mcq_base)); + return PTR_ERR(hba->mcq_base); } -out: - hba->mcq_base = res->base; return 0; -ioremap_err: - res->base = NULL; - remove_resource(res_mcq); - return ret; } static int ufs_qcom_op_runtime_config(struct ufs_hba *hba) { - struct ufshcd_res_info *mem_res, *sqdao_res; struct ufshcd_mcq_opr_info_t *opr; int i; + u32 doorbell_offsets[OPR_MAX]; - mem_res = &hba->res[RES_UFS]; - sqdao_res = &hba->res[RES_MCQ_SQD]; + /* + * Configure doorbell address offsets in MCQ configuration registers. + * These values are offsets relative to mmio_base (UFS_HCI_BASE). + * + * Memory Layout: + * - mmio_base = UFS_HCI_BASE + * - mcq_base = MCQ_CONFIG_BASE = mmio_base + (UFS_QCOM_MCQCAP_QCFGPTR * 0x200) + * - Doorbell registers are at: mmio_base + (UFS_QCOM_MCQCAP_QCFGPTR * 0x200) + + * - UFS_QCOM_MCQ_SQD_OFFSET + * - Which is also: mcq_base + UFS_QCOM_MCQ_SQD_OFFSET + */ - if (!mem_res->base || !sqdao_res->base) - return -EINVAL; + doorbell_offsets[OPR_SQD] = UFS_QCOM_SQD_ADDR_OFFSET; + doorbell_offsets[OPR_SQIS] = UFS_QCOM_SQIS_ADDR_OFFSET; + doorbell_offsets[OPR_CQD] = UFS_QCOM_CQD_ADDR_OFFSET; + doorbell_offsets[OPR_CQIS] = UFS_QCOM_CQIS_ADDR_OFFSET; + /* + * Configure MCQ operation registers. + * + * The doorbell registers are physically located within the MCQ region: + * - doorbell_physical_addr = mmio_base + doorbell_offset + * - doorbell_physical_addr = mcq_base + (doorbell_offset - MCQ_CONFIG_OFFSET) + */ for (i = 0; i < OPR_MAX; i++) { opr = &hba->mcq_opr[i]; - opr->offset = sqdao_res->resource->start - - mem_res->resource->start + 0x40 * i; - opr->stride = 0x100; - opr->base = sqdao_res->base + 0x40 * i; + opr->offset = doorbell_offsets[i]; /* Offset relative to mmio_base */ + opr->stride = UFS_QCOM_MCQ_STRIDE; /* 256 bytes between queues */ + + /* + * Calculate the actual doorbell base address within MCQ region: + * base = mcq_base + (doorbell_offset - MCQ_CONFIG_OFFSET) + */ + opr->base = hba->mcq_base + (opr->offset - UFS_QCOM_MCQ_CONFIG_OFFSET); } return 0; @@ -1710,12 +2040,8 @@ static int ufs_qcom_get_hba_mac(struct ufs_hba *hba) static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba, unsigned long *ocqs) { - struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS]; - - if (!mcq_vs_res->base) - return -EINVAL; - - *ocqs = readl(mcq_vs_res->base + UFS_MEM_CQIS_VS); + /* Read from MCQ vendor-specific register in MCQ region */ + *ocqs = readl(hba->mcq_base + UFS_MEM_CQIS_VS); return 0; } @@ -1728,15 +2054,19 @@ static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) ufshcd_mcq_config_esi(hba, msg); } +struct ufs_qcom_irq { + unsigned int irq; + unsigned int idx; + struct ufs_hba *hba; +}; + static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data) { - struct msi_desc *desc = data; - struct device *dev = msi_desc_to_dev(desc); - struct ufs_hba *hba = dev_get_drvdata(dev); - u32 id = desc->msi_index; - struct ufs_hw_queue *hwq = &hba->uhq[id]; + struct ufs_qcom_irq *qi = data; + struct ufs_hba *hba = qi->hba; + struct ufs_hw_queue *hwq = &hba->uhq[qi->idx]; - ufshcd_mcq_write_cqis(hba, 0x1, id); + ufshcd_mcq_write_cqis(hba, 0x1, qi->idx); ufshcd_mcq_poll_cqe_lock(hba, hwq); return IRQ_HANDLED; @@ -1745,8 +2075,6 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data) static int ufs_qcom_config_esi(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct msi_desc *desc; - struct msi_desc *failed_desc = NULL; int nr_irqs, ret; if (host->esi_enabled) @@ -1757,48 +2085,119 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) * 2. Poll queues do not need ESI. */ nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; + ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs, ufs_qcom_write_msi_msg); if (ret) { - dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret); - return ret; + dev_warn(hba->dev, "Platform MSI not supported or failed, continuing without ESI\n"); + return ret; /* Continue without ESI */ } - msi_lock_descs(hba->dev); - msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { - ret = devm_request_irq(hba->dev, desc->irq, - ufs_qcom_mcq_esi_handler, - IRQF_SHARED, "qcom-mcq-esi", desc); + struct ufs_qcom_irq *qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL); + + if (!qi) { + platform_device_msi_free_irqs_all(hba->dev); + return -ENOMEM; + } + + for (int idx = 0; idx < nr_irqs; idx++) { + qi[idx].irq = msi_get_virq(hba->dev, idx); + qi[idx].idx = idx; + qi[idx].hba = hba; + + ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler, + IRQF_SHARED, "qcom-mcq-esi", qi + idx); if (ret) { - dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n", - __func__, desc->irq, ret); - failed_desc = desc; - break; + dev_err(hba->dev, "%s: Failed to request IRQ for %d, err = %d\n", + __func__, qi[idx].irq, ret); + /* Free previously allocated IRQs */ + for (int j = 0; j < idx; j++) + devm_free_irq(hba->dev, qi[j].irq, qi + j); + platform_device_msi_free_irqs_all(hba->dev); + devm_kfree(hba->dev, qi); + return ret; } } - msi_unlock_descs(hba->dev); - if (ret) { - /* Rewind */ - msi_lock_descs(hba->dev); - msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { - if (desc == failed_desc) - break; - devm_free_irq(hba->dev, desc->irq, hba); + if (host->hw_ver.major >= 6) { + ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), + REG_UFS_CFG3); + } + ufshcd_mcq_enable_esi(hba); + host->esi_enabled = true; + return 0; +} + +static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba, + unsigned long freq, char *name) +{ + struct ufs_clk_info *clki; + struct dev_pm_opp *opp; + unsigned long clk_freq; + int idx = 0; + bool found = false; + + opp = dev_pm_opp_find_freq_exact_indexed(hba->dev, freq, 0, true); + if (IS_ERR(opp)) { + dev_err(hba->dev, "Failed to find OPP for exact frequency %lu\n", freq); + return 0; + } + + list_for_each_entry(clki, &hba->clk_list_head, list) { + if (!strcmp(clki->name, name)) { + found = true; + break; } - msi_unlock_descs(hba->dev); - platform_device_msi_free_irqs_all(hba->dev); - } else { - if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && - host->hw_ver.step == 0) - ufshcd_rmwl(hba, ESI_VEC_MASK, - FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), - REG_UFS_CFG3); - ufshcd_mcq_enable_esi(hba); - host->esi_enabled = true; + + idx++; } - return ret; + if (!found) { + dev_err(hba->dev, "Failed to find clock '%s' in clk list\n", name); + dev_pm_opp_put(opp); + return 0; + } + + clk_freq = dev_pm_opp_get_freq_indexed(opp, idx); + + dev_pm_opp_put(opp); + + return clk_freq; +} + +static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq) +{ + u32 gear = UFS_HS_DONT_CHANGE; + unsigned long unipro_freq; + + if (!hba->use_pm_opp) + return gear; + + unipro_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro"); + switch (unipro_freq) { + case 403000000: + gear = UFS_HS_G5; + break; + case 300000000: + gear = UFS_HS_G4; + break; + case 201500000: + gear = UFS_HS_G3; + break; + case 150000000: + case 100000000: + gear = UFS_HS_G2; + break; + case 75000000: + case 37500000: + gear = UFS_HS_G1; + break; + default: + dev_err(hba->dev, "%s: Unsupported clock freq : %lu\n", __func__, freq); + return UFS_HS_DONT_CHANGE; + } + + return min_t(u32, gear, hba->max_pwr_info.info.gear_rx); } /* @@ -1824,13 +2223,12 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .dbg_register_dump = ufs_qcom_dump_dbg_regs, .device_reset = ufs_qcom_device_reset, .config_scaling_param = ufs_qcom_config_scaling_param, - .program_key = ufs_qcom_ice_program_key, - .reinit_notify = ufs_qcom_reinit_notify, .mcq_config_resource = ufs_qcom_mcq_config_resource, .get_hba_mac = ufs_qcom_get_hba_mac, .op_runtime_config = ufs_qcom_op_runtime_config, .get_outstanding_cqs = ufs_qcom_get_outstanding_cqs, .config_esi = ufs_qcom_config_esi, + .freq_to_gear_speed = ufs_qcom_freq_to_gear_speed, }; /** @@ -1861,15 +2259,22 @@ static int ufs_qcom_probe(struct platform_device *pdev) static void ufs_qcom_remove(struct platform_device *pdev) { struct ufs_hba *hba = platform_get_drvdata(pdev); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); - pm_runtime_get_sync(&(pdev)->dev); - ufshcd_remove(hba); - platform_device_msi_free_irqs_all(hba->dev); + ufshcd_pltfrm_remove(pdev); + if (host->esi_enabled) + platform_device_msi_free_irqs_all(hba->dev); } +static const struct ufs_qcom_drvdata ufs_qcom_sm8550_drvdata = { + .quirks = UFSHCD_QUIRK_BROKEN_LSDBS_CAP, + .no_phy_retention = true, +}; + static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = { { .compatible = "qcom,ufshc" }, - { .compatible = "qcom,sm8550-ufshc" }, + { .compatible = "qcom,sm8550-ufshc", .data = &ufs_qcom_sm8550_drvdata }, + { .compatible = "qcom,sm8650-ufshc", .data = &ufs_qcom_sm8550_drvdata }, {}, }; MODULE_DEVICE_TABLE(of, ufs_qcom_of_match); @@ -1897,7 +2302,7 @@ static const struct dev_pm_ops ufs_qcom_pm_ops = { static struct platform_driver ufs_qcom_pltform = { .probe = ufs_qcom_probe, - .remove_new = ufs_qcom_remove, + .remove = ufs_qcom_remove, .driver = { .name = "ufshcd-qcom", .pm = &ufs_qcom_pm_ops, |
