diff options
Diffstat (limited to 'drivers/ufs/host/ufs-qcom.c')
-rw-r--r-- | drivers/ufs/host/ufs-qcom.c | 403 |
1 files changed, 321 insertions, 82 deletions
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index c0761ccc1381..18a978452001 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -5,6 +5,7 @@ #include <linux/acpi.h> #include <linux/clk.h> +#include <linux/cleanup.h> #include <linux/delay.h> #include <linux/devfreq.h> #include <linux/gpio/consumer.h> @@ -102,8 +103,28 @@ static const struct __ufs_qcom_bw_table { [MODE_MAX][0][0] = { 7643136, 819200 }, }; +static const struct { + int nminor; + char *prefix; +} testbus_info[TSTBUS_MAX] = { + [TSTBUS_UAWM] = {32, "TSTBUS_UAWM"}, + [TSTBUS_UARM] = {32, "TSTBUS_UARM"}, + [TSTBUS_TXUC] = {32, "TSTBUS_TXUC"}, + [TSTBUS_RXUC] = {32, "TSTBUS_RXUC"}, + [TSTBUS_DFC] = {32, "TSTBUS_DFC"}, + [TSTBUS_TRLUT] = {32, "TSTBUS_TRLUT"}, + [TSTBUS_TMRLUT] = {32, "TSTBUS_TMRLUT"}, + [TSTBUS_OCSC] = {32, "TSTBUS_OCSC"}, + [TSTBUS_UTP_HCI] = {32, "TSTBUS_UTP_HCI"}, + [TSTBUS_COMBINED] = {32, "TSTBUS_COMBINED"}, + [TSTBUS_WRAPPER] = {32, "TSTBUS_WRAPPER"}, + [TSTBUS_UNIPRO] = {256, "TSTBUS_UNIPRO"}, +}; + static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); -static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq); +static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba, + unsigned long freq, char *name); +static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq); static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd) { @@ -173,7 +194,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host) profile->ll_ops = ufs_qcom_crypto_ops; profile->max_dun_bytes_supported = 8; - profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW; + profile->key_types_supported = qcom_ice_get_supported_key_type(ice); profile->dev = dev; /* @@ -221,17 +242,8 @@ static int ufs_qcom_ice_keyslot_program(struct blk_crypto_profile *profile, struct ufs_qcom_host *host = ufshcd_get_variant(hba); int err; - /* Only AES-256-XTS has been tested so far. */ - if (key->crypto_cfg.crypto_mode != BLK_ENCRYPTION_MODE_AES_256_XTS) - return -EOPNOTSUPP; - ufshcd_hold(hba); - err = qcom_ice_program_key(host->ice, - QCOM_ICE_CRYPTO_ALG_AES_XTS, - QCOM_ICE_CRYPTO_KEY_SIZE_256, - key->bytes, - key->crypto_cfg.data_unit_size / 512, - slot); + err = qcom_ice_program_key(host->ice, slot, key); ufshcd_release(hba); return err; } @@ -250,9 +262,53 @@ static int ufs_qcom_ice_keyslot_evict(struct blk_crypto_profile *profile, return err; } +static int ufs_qcom_ice_derive_sw_secret(struct blk_crypto_profile *profile, + const u8 *eph_key, size_t eph_key_size, + u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_derive_sw_secret(host->ice, eph_key, eph_key_size, + sw_secret); +} + +static int ufs_qcom_ice_import_key(struct blk_crypto_profile *profile, + const u8 *raw_key, size_t raw_key_size, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_import_key(host->ice, raw_key, raw_key_size, lt_key); +} + +static int ufs_qcom_ice_generate_key(struct blk_crypto_profile *profile, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_generate_key(host->ice, lt_key); +} + +static int ufs_qcom_ice_prepare_key(struct blk_crypto_profile *profile, + const u8 *lt_key, size_t lt_key_size, + u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct ufs_hba *hba = ufs_hba_from_crypto_profile(profile); + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + return qcom_ice_prepare_key(host->ice, lt_key, lt_key_size, eph_key); +} + static const struct blk_crypto_ll_ops ufs_qcom_crypto_ops = { .keyslot_program = ufs_qcom_ice_keyslot_program, .keyslot_evict = ufs_qcom_ice_keyslot_evict, + .derive_sw_secret = ufs_qcom_ice_derive_sw_secret, + .import_key = ufs_qcom_ice_import_key, + .generate_key = ufs_qcom_ice_generate_key, + .prepare_key = ufs_qcom_ice_prepare_key, }; #else @@ -452,10 +508,9 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) if (ret) return ret; - if (phy->power_count) { + if (phy->power_count) phy_power_off(phy); - phy_exit(phy); - } + /* phy initialization - calibrate the phy */ ret = phy_init(phy); @@ -543,13 +598,14 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, * * @hba: host controller instance * @is_pre_scale_up: flag to check if pre scale up condition. + * @freq: target opp freq * Return: zero for success and non-zero in case of a failure. */ -static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up) +static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up, unsigned long freq) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct ufs_clk_info *clki; - unsigned long core_clk_rate = 0; + unsigned long clk_freq = 0; u32 core_clk_cycles_per_us; /* @@ -561,22 +617,34 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up) if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba)) return 0; + if (hba->use_pm_opp && freq != ULONG_MAX) { + clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk"); + if (clk_freq) + goto cfg_timers; + } + list_for_each_entry(clki, &hba->clk_list_head, list) { if (!strcmp(clki->name, "core_clk")) { + if (freq == ULONG_MAX) { + clk_freq = clki->max_freq; + break; + } + if (is_pre_scale_up) - core_clk_rate = clki->max_freq; + clk_freq = clki->max_freq; else - core_clk_rate = clk_get_rate(clki->clk); + clk_freq = clk_get_rate(clki->clk); break; } } +cfg_timers: /* If frequency is smaller than 1MHz, set to 1MHz */ - if (core_clk_rate < DEFAULT_CLK_RATE_HZ) - core_clk_rate = DEFAULT_CLK_RATE_HZ; + if (clk_freq < DEFAULT_CLK_RATE_HZ) + clk_freq = DEFAULT_CLK_RATE_HZ; - core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; + core_clk_cycles_per_us = clk_freq / USEC_PER_SEC; if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) { ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); /* @@ -596,13 +664,13 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, switch (status) { case PRE_CHANGE: - if (ufs_qcom_cfg_timers(hba, false)) { + if (ufs_qcom_cfg_timers(hba, false, ULONG_MAX)) { dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", __func__); return -EINVAL; } - err = ufs_qcom_set_core_clk_ctrl(hba, ULONG_MAX); + err = ufs_qcom_set_core_clk_ctrl(hba, true, ULONG_MAX); if (err) dev_err(hba->dev, "cfg core clk ctrl failed\n"); /* @@ -874,17 +942,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, break; case POST_CHANGE: - if (ufs_qcom_cfg_timers(hba, false)) { - dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", - __func__); - /* - * we return error code at the end of the routine, - * but continue to configure UFS_PHY_TX_LANE_ENABLE - * and bus voting as usual - */ - ret = -EINVAL; - } - /* cache the power mode parameters to use internally */ memcpy(&host->dev_req_params, dev_req_params, sizeof(*dev_req_params)); @@ -1360,29 +1417,46 @@ static int ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba, return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), reg); } -static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, unsigned long freq) +static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up, unsigned long freq) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct list_head *head = &hba->clk_list_head; struct ufs_clk_info *clki; u32 cycles_in_1us = 0; u32 core_clk_ctrl_reg; + unsigned long clk_freq; int err; + if (hba->use_pm_opp && freq != ULONG_MAX) { + clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro"); + if (clk_freq) { + cycles_in_1us = ceil(clk_freq, HZ_PER_MHZ); + goto set_core_clk_ctrl; + } + } + list_for_each_entry(clki, head, list) { if (!IS_ERR_OR_NULL(clki->clk) && !strcmp(clki->name, "core_clk_unipro")) { - if (!clki->max_freq) + if (!clki->max_freq) { cycles_in_1us = 150; /* default for backwards compatibility */ - else if (freq == ULONG_MAX) + break; + } + + if (freq == ULONG_MAX) { cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ); - else - cycles_in_1us = ceil(freq, HZ_PER_MHZ); + break; + } + if (is_scale_up) + cycles_in_1us = ceil(clki->max_freq, HZ_PER_MHZ); + else + cycles_in_1us = ceil(clk_get_rate(clki->clk), HZ_PER_MHZ); break; } } +set_core_clk_ctrl: err = ufshcd_dme_get(hba, UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL), &core_clk_ctrl_reg); @@ -1419,13 +1493,13 @@ static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba, unsigned long f { int ret; - ret = ufs_qcom_cfg_timers(hba, true); + ret = ufs_qcom_cfg_timers(hba, true, freq); if (ret) { dev_err(hba->dev, "%s ufs cfg timer failed\n", __func__); return ret; } /* set unipro core clock attributes and clear clock divider */ - return ufs_qcom_set_core_clk_ctrl(hba, freq); + return ufs_qcom_set_core_clk_ctrl(hba, true, freq); } static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba) @@ -1456,8 +1530,15 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba) static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba, unsigned long freq) { + int ret; + + ret = ufs_qcom_cfg_timers(hba, false, freq); + if (ret) { + dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", __func__); + return ret; + } /* set unipro core clock attributes and clear clock divider */ - return ufs_qcom_set_core_clk_ctrl(hba, freq); + return ufs_qcom_set_core_clk_ctrl(hba, false, freq); } static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, bool scale_up, @@ -1609,6 +1690,85 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) return 0; } +static void ufs_qcom_dump_testbus(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + int i, j, nminor = 0, testbus_len = 0; + u32 *testbus __free(kfree) = NULL; + char *prefix; + + testbus = kmalloc_array(256, sizeof(u32), GFP_KERNEL); + if (!testbus) + return; + + for (j = 0; j < TSTBUS_MAX; j++) { + nminor = testbus_info[j].nminor; + prefix = testbus_info[j].prefix; + host->testbus.select_major = j; + testbus_len = nminor * sizeof(u32); + for (i = 0; i < nminor; i++) { + host->testbus.select_minor = i; + ufs_qcom_testbus_config(host); + testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS); + } + print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, + 16, 4, testbus, testbus_len, false); + } +} + +static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, + const char *prefix, enum ufshcd_res id) +{ + u32 *regs __free(kfree) = NULL; + size_t pos; + + if (offset % 4 != 0 || len % 4 != 0) + return -EINVAL; + + regs = kzalloc(len, GFP_ATOMIC); + if (!regs) + return -ENOMEM; + + for (pos = 0; pos < len; pos += 4) + regs[pos / 4] = readl(hba->res[id].base + offset + pos); + + print_hex_dump(KERN_ERR, prefix, + len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, + 16, 4, regs, len, false); + + return 0; +} + +static void ufs_qcom_dump_mcq_hci_regs(struct ufs_hba *hba) +{ + struct dump_info { + size_t offset; + size_t len; + const char *prefix; + enum ufshcd_res id; + }; + + struct dump_info mcq_dumps[] = { + {0x0, 256 * 4, "MCQ HCI-0 ", RES_MCQ}, + {0x400, 256 * 4, "MCQ HCI-1 ", RES_MCQ}, + {0x0, 5 * 4, "MCQ VS-0 ", RES_MCQ_VS}, + {0x0, 256 * 4, "MCQ SQD-0 ", RES_MCQ_SQD}, + {0x400, 256 * 4, "MCQ SQD-1 ", RES_MCQ_SQD}, + {0x800, 256 * 4, "MCQ SQD-2 ", RES_MCQ_SQD}, + {0xc00, 256 * 4, "MCQ SQD-3 ", RES_MCQ_SQD}, + {0x1000, 256 * 4, "MCQ SQD-4 ", RES_MCQ_SQD}, + {0x1400, 256 * 4, "MCQ SQD-5 ", RES_MCQ_SQD}, + {0x1800, 256 * 4, "MCQ SQD-6 ", RES_MCQ_SQD}, + {0x1c00, 256 * 4, "MCQ SQD-7 ", RES_MCQ_SQD}, + }; + + for (int i = 0; i < ARRAY_SIZE(mcq_dumps); i++) { + ufs_qcom_dump_regs(hba, mcq_dumps[i].offset, mcq_dumps[i].len, + mcq_dumps[i].prefix, mcq_dumps[i].id); + cond_resched(); + } +} + static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) { u32 reg; @@ -1616,6 +1776,15 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) host = ufshcd_get_variant(hba); + dev_err(hba->dev, "HW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_ENTER_CNT)); + dev_err(hba->dev, "HW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_HW_H8_EXIT_CNT)); + + dev_err(hba->dev, "SW_H8_ENTER_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_ENTER_CNT)); + dev_err(hba->dev, "SW_H8_EXIT_CNT=%d\n", ufshcd_readl(hba, REG_UFS_SW_H8_EXIT_CNT)); + + dev_err(hba->dev, "SW_AFTER_HW_H8_ENTER_CNT=%d\n", + ufshcd_readl(hba, REG_UFS_SW_AFTER_HW_H8_ENTER_CNT)); + ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, "HCI Vendor Specific Registers "); @@ -1658,6 +1827,23 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT); ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT "); + + if (hba->mcq_enabled) { + reg = ufs_qcom_get_debug_reg_offset(host, UFS_RD_REG_MCQ); + ufshcd_dump_regs(hba, reg, 64 * 4, "HCI MCQ Debug Registers "); + } + + /* ensure below dumps occur only in task context due to blocking calls. */ + if (in_task()) { + /* Dump MCQ Host Vendor Specific Registers */ + if (hba->mcq_enabled) + ufs_qcom_dump_mcq_hci_regs(hba); + + /* voluntarily yield the CPU as we are dumping too much data */ + ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS "); + cond_resched(); + ufs_qcom_dump_testbus(hba); + } } /** @@ -1849,25 +2035,38 @@ static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) ufshcd_mcq_config_esi(hba, msg); } +struct ufs_qcom_irq { + unsigned int irq; + unsigned int idx; + struct ufs_hba *hba; +}; + static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data) { - struct msi_desc *desc = data; - struct device *dev = msi_desc_to_dev(desc); - struct ufs_hba *hba = dev_get_drvdata(dev); - u32 id = desc->msi_index; - struct ufs_hw_queue *hwq = &hba->uhq[id]; + struct ufs_qcom_irq *qi = data; + struct ufs_hba *hba = qi->hba; + struct ufs_hw_queue *hwq = &hba->uhq[qi->idx]; - ufshcd_mcq_write_cqis(hba, 0x1, id); + ufshcd_mcq_write_cqis(hba, 0x1, qi->idx); ufshcd_mcq_poll_cqe_lock(hba, hwq); return IRQ_HANDLED; } +static void ufs_qcom_irq_free(struct ufs_qcom_irq *uqi) +{ + for (struct ufs_qcom_irq *q = uqi; q->irq; q++) + devm_free_irq(q->hba->dev, q->irq, q->hba); + + platform_device_msi_free_irqs_all(uqi->hba->dev); + devm_kfree(uqi->hba->dev, uqi); +} + +DEFINE_FREE(ufs_qcom_irq, struct ufs_qcom_irq *, if (_T) ufs_qcom_irq_free(_T)) + static int ufs_qcom_config_esi(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct msi_desc *desc; - struct msi_desc *failed_desc = NULL; int nr_irqs, ret; if (host->esi_enabled) @@ -1878,6 +2077,14 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) * 2. Poll queues do not need ESI. */ nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; + + struct ufs_qcom_irq *qi __free(ufs_qcom_irq) = + devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL); + if (!qi) + return -ENOMEM; + /* Preset so __free() has a pointer to hba in all error paths */ + qi[0].hba = hba; + ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs, ufs_qcom_write_msi_msg); if (ret) { @@ -1885,48 +2092,80 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) return ret; } - msi_lock_descs(hba->dev); - msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { - ret = devm_request_irq(hba->dev, desc->irq, - ufs_qcom_mcq_esi_handler, - IRQF_SHARED, "qcom-mcq-esi", desc); + for (int idx = 0; idx < nr_irqs; idx++) { + qi[idx].irq = msi_get_virq(hba->dev, idx); + qi[idx].idx = idx; + qi[idx].hba = hba; + + ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler, + IRQF_SHARED, "qcom-mcq-esi", qi + idx); if (ret) { dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n", - __func__, desc->irq, ret); - failed_desc = desc; - break; + __func__, qi[idx].irq, ret); + qi[idx].irq = 0; + return ret; } } - msi_unlock_descs(hba->dev); - if (ret) { - /* Rewind */ - msi_lock_descs(hba->dev); - msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) { - if (desc == failed_desc) - break; - devm_free_irq(hba->dev, desc->irq, hba); + retain_and_null_ptr(qi); + + if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && + host->hw_ver.step == 0) { + ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), + REG_UFS_CFG3); + } + ufshcd_mcq_enable_esi(hba); + host->esi_enabled = true; + return 0; +} + +static unsigned long ufs_qcom_opp_freq_to_clk_freq(struct ufs_hba *hba, + unsigned long freq, char *name) +{ + struct ufs_clk_info *clki; + struct dev_pm_opp *opp; + unsigned long clk_freq; + int idx = 0; + bool found = false; + + opp = dev_pm_opp_find_freq_exact_indexed(hba->dev, freq, 0, true); + if (IS_ERR(opp)) { + dev_err(hba->dev, "Failed to find OPP for exact frequency %lu\n", freq); + return 0; + } + + list_for_each_entry(clki, &hba->clk_list_head, list) { + if (!strcmp(clki->name, name)) { + found = true; + break; } - msi_unlock_descs(hba->dev); - platform_device_msi_free_irqs_all(hba->dev); - } else { - if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 && - host->hw_ver.step == 0) - ufshcd_rmwl(hba, ESI_VEC_MASK, - FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1), - REG_UFS_CFG3); - ufshcd_mcq_enable_esi(hba); - host->esi_enabled = true; + + idx++; } - return ret; + if (!found) { + dev_err(hba->dev, "Failed to find clock '%s' in clk list\n", name); + dev_pm_opp_put(opp); + return 0; + } + + clk_freq = dev_pm_opp_get_freq_indexed(opp, idx); + + dev_pm_opp_put(opp); + + return clk_freq; } static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq) { - u32 gear = 0; + u32 gear = UFS_HS_DONT_CHANGE; + unsigned long unipro_freq; - switch (freq) { + if (!hba->use_pm_opp) + return gear; + + unipro_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk_unipro"); + switch (unipro_freq) { case 403000000: gear = UFS_HS_G5; break; @@ -1946,10 +2185,10 @@ static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq) break; default: dev_err(hba->dev, "%s: Unsupported clock freq : %lu\n", __func__, freq); - break; + return UFS_HS_DONT_CHANGE; } - return gear; + return min_t(u32, gear, hba->max_pwr_info.info.gear_rx); } /* |