summaryrefslogtreecommitdiff
path: root/drivers/ufs/host/ufs-exynos.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/host/ufs-exynos.c')
-rw-r--r--drivers/ufs/host/ufs-exynos.c490
1 files changed, 390 insertions, 100 deletions
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index 88d125d1ee3c..70d195179eba 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -8,6 +8,9 @@
*
*/
+#include <linux/unaligned.h>
+#include <crypto/aes.h>
+#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -25,12 +28,13 @@
#include "ufs-exynos.h"
+#define DATA_UNIT_SIZE 4096
+
/*
* Exynos's Vendor specific registers for UFSHCI
*/
#define HCI_TXPRDT_ENTRY_SIZE 0x00
-#define PRDT_PREFECT_EN BIT(31)
-#define PRDT_SET_SIZE(x) ((x) & 0x1F)
+#define PRDT_PREFETCH_EN BIT(31)
#define HCI_RXPRDT_ENTRY_SIZE 0x04
#define HCI_1US_TO_CNT_VAL 0x0C
#define CNT_VAL_1US_MASK 0x3FF
@@ -44,6 +48,8 @@
#define HCI_UNIPRO_APB_CLK_CTRL 0x68
#define UNIPRO_APB_CLK(v, x) (((v) & ~0xF) | ((x) & 0xF))
#define HCI_AXIDMA_RWDATA_BURST_LEN 0x6C
+#define WLU_EN BIT(31)
+#define WLU_BURST_LEN(x) ((x) << 27 | ((x) & 0xF))
#define HCI_GPIO_OUT 0x70
#define HCI_ERR_EN_PA_LAYER 0x78
#define HCI_ERR_EN_DL_LAYER 0x7C
@@ -70,6 +76,10 @@
#define CLK_CTRL_EN_MASK (REFCLK_CTRL_EN |\
UNIPRO_PCLK_CTRL_EN |\
UNIPRO_MCLK_CTRL_EN)
+
+#define HCI_IOP_ACG_DISABLE 0x100
+#define HCI_IOP_ACG_DISABLE_EN BIT(0)
+
/* Device fatal error */
#define DFES_ERR_EN BIT(31)
#define DFES_DEF_L2_ERRS (UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF |\
@@ -82,11 +92,16 @@
UIC_TRANSPORT_NO_CONNECTION_RX |\
UIC_TRANSPORT_BAD_TC)
-/* FSYS UFS Shareability */
-#define UFS_WR_SHARABLE BIT(2)
-#define UFS_RD_SHARABLE BIT(1)
-#define UFS_SHARABLE (UFS_WR_SHARABLE | UFS_RD_SHARABLE)
-#define UFS_SHAREABILITY_OFFSET 0x710
+/* UFS Shareability */
+#define UFS_EXYNOSAUTO_WR_SHARABLE BIT(2)
+#define UFS_EXYNOSAUTO_RD_SHARABLE BIT(1)
+#define UFS_EXYNOSAUTO_SHARABLE (UFS_EXYNOSAUTO_WR_SHARABLE | \
+ UFS_EXYNOSAUTO_RD_SHARABLE)
+#define UFS_GS101_WR_SHARABLE BIT(1)
+#define UFS_GS101_RD_SHARABLE BIT(0)
+#define UFS_GS101_SHARABLE (UFS_GS101_WR_SHARABLE | \
+ UFS_GS101_RD_SHARABLE)
+#define UFS_SHAREABILITY_OFFSET 0x710
/* Multi-host registers */
#define MHCTRL 0xC4
@@ -194,25 +209,39 @@ static inline void exynos_ufs_ungate_clks(struct exynos_ufs *ufs)
exynos_ufs_ctrl_clkstop(ufs, false);
}
-static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
+static int exynos_ufs_shareability(struct exynos_ufs *ufs)
{
+ /* IO Coherency setting */
+ if (ufs->sysreg) {
+ return regmap_update_bits(ufs->sysreg,
+ ufs->iocc_offset,
+ ufs->iocc_mask, ufs->iocc_val);
+ }
+
return 0;
}
-static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
+static int gs101_ufs_drv_init(struct exynos_ufs *ufs)
{
- struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+ struct ufs_hba *hba = ufs->hba;
+ u32 reg;
- /* IO Coherency setting */
- if (ufs->sysreg) {
- return regmap_update_bits(ufs->sysreg,
- ufs->shareability_reg_offset,
- UFS_SHARABLE, UFS_SHARABLE);
- }
+ /* Enable WriteBooster */
+ hba->caps |= UFSHCD_CAP_WB_EN;
- attr->tx_dif_p_nsec = 3200000;
+ /* Enable clock gating and hibern8 */
+ hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
- return 0;
+ /* set ACG to be controlled by UFS_ACG_DISABLE */
+ reg = hci_readl(ufs, HCI_IOP_ACG_DISABLE);
+ hci_writel(ufs, reg & (~HCI_IOP_ACG_DISABLE_EN), HCI_IOP_ACG_DISABLE);
+
+ return exynos_ufs_shareability(ufs);
+}
+
+static int exynosauto_ufs_drv_init(struct exynos_ufs *ufs)
+{
+ return exynos_ufs_shareability(ufs);
}
static int exynosauto_ufs_post_hce_enable(struct exynos_ufs *ufs)
@@ -297,7 +326,7 @@ static int exynosauto_ufs_pre_pwr_change(struct exynos_ufs *ufs,
}
static int exynosauto_ufs_post_pwr_change(struct exynos_ufs *ufs,
- struct ufs_pa_layer_attr *pwr)
+ const struct ufs_pa_layer_attr *pwr)
{
struct ufs_hba *hba = ufs->hba;
u32 enabled_vh;
@@ -372,7 +401,7 @@ static int exynos7_ufs_pre_pwr_change(struct exynos_ufs *ufs,
}
static int exynos7_ufs_post_pwr_change(struct exynos_ufs *ufs,
- struct ufs_pa_layer_attr *pwr)
+ const struct ufs_pa_layer_attr *pwr)
{
struct ufs_hba *hba = ufs->hba;
int lanes = max_t(u32, pwr->lane_rx, pwr->lane_tx);
@@ -542,6 +571,9 @@ static void exynos_ufs_specify_phy_time_attr(struct exynos_ufs *ufs)
struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg;
+ if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)
+ return;
+
t_cfg->tx_linereset_p =
exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_p_nsec);
t_cfg->tx_linereset_n =
@@ -720,6 +752,9 @@ static void exynos_ufs_config_smu(struct exynos_ufs *ufs)
{
u32 reg, val;
+ if (ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE)
+ return;
+
exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val);
/* make encryption disabled by default */
@@ -741,7 +776,7 @@ static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs,
u32 mask, sync_len;
enum {
SYNC_LEN_G1 = 80 * 1000, /* 80us */
- SYNC_LEN_G2 = 40 * 1000, /* 44us */
+ SYNC_LEN_G2 = 40 * 1000, /* 40us */
SYNC_LEN_G3 = 20 * 1000, /* 20us */
};
int i;
@@ -767,8 +802,23 @@ static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs,
exynos_ufs_disable_ov_tm(hba);
}
+#define UFS_HW_VER_MAJOR_MASK GENMASK(15, 8)
+
+static u32 exynos_ufs_get_hs_gear(struct ufs_hba *hba)
+{
+ u8 major;
+
+ major = FIELD_GET(UFS_HW_VER_MAJOR_MASK, hba->ufs_version);
+
+ if (major >= 3)
+ return UFS_HS_G4;
+
+ /* Default is HS-G3 */
+ return UFS_HS_G3;
+}
+
static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *dev_max_params,
+ const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
@@ -784,6 +834,10 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
ufshcd_init_host_params(&host_params);
+ /* This driver only support symmetric gear setting e.g. hs_tx_gear == hs_rx_gear */
+ host_params.hs_tx_gear = exynos_ufs_get_hs_gear(hba);
+ host_params.hs_rx_gear = exynos_ufs_get_hs_gear(hba);
+
ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
if (ret) {
pr_err("%s: failed to determine capabilities\n", __func__);
@@ -816,7 +870,7 @@ out:
#define PWR_MODE_STR_LEN 64
static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *pwr_req)
+ const struct ufs_pa_layer_attr *pwr_req)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
struct phy *generic_phy = ufs->phy;
@@ -908,6 +962,12 @@ static int exynos_ufs_phy_init(struct exynos_ufs *ufs)
}
phy_set_bus_width(generic_phy, ufs->avail_ln_rx);
+
+ if (generic_phy->power_count) {
+ phy_power_off(generic_phy);
+ phy_exit(generic_phy);
+ }
+
ret = phy_init(generic_phy);
if (ret) {
dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
@@ -1000,9 +1060,14 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba)
exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4);
exynos_ufs_set_unipro_pclk_div(ufs);
+ exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
+
/* unipro */
exynos_ufs_config_unipro(ufs);
+ if (ufs->drv_data->pre_link)
+ ufs->drv_data->pre_link(ufs);
+
/* m-phy */
exynos_ufs_phy_init(ufs);
if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) {
@@ -1010,11 +1075,6 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba)
exynos_ufs_config_phy_cap_attr(ufs);
}
- exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
-
- if (ufs->drv_data->pre_link)
- ufs->drv_data->pre_link(ufs);
-
return 0;
}
@@ -1038,15 +1098,20 @@ static int exynos_ufs_post_link(struct ufs_hba *hba)
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
struct phy *generic_phy = ufs->phy;
struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
+ u32 val = ilog2(DATA_UNIT_SIZE);
exynos_ufs_establish_connt(ufs);
exynos_ufs_fit_aggr_timeout(ufs);
hci_writel(ufs, 0xa, HCI_DATA_REORDER);
- hci_writel(ufs, PRDT_SET_SIZE(12), HCI_TXPRDT_ENTRY_SIZE);
- hci_writel(ufs, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE);
- hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
- hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
+
+ if (hba->caps & UFSHCD_CAP_CRYPTO)
+ val |= PRDT_PREFETCH_EN;
+ hci_writel(ufs, val, HCI_TXPRDT_ENTRY_SIZE);
+
+ hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE);
+ hci_writel(ufs, BIT(hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
+ hci_writel(ufs, BIT(hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN);
if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)
@@ -1119,12 +1184,22 @@ static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs)
ufs->sysreg = NULL;
else {
if (of_property_read_u32_index(np, "samsung,sysreg", 1,
- &ufs->shareability_reg_offset)) {
+ &ufs->iocc_offset)) {
dev_warn(dev, "can't get an offset from sysreg. Set to default value\n");
- ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET;
+ ufs->iocc_offset = UFS_SHAREABILITY_OFFSET;
}
}
+ ufs->iocc_mask = ufs->drv_data->iocc_mask;
+ /*
+ * no 'dma-coherent' property means the descriptors are
+ * non-cacheable so iocc shareability should be disabled.
+ */
+ if (of_dma_is_coherent(dev->of_node))
+ ufs->iocc_val = ufs->iocc_mask;
+ else
+ ufs->iocc_val = 0;
+
ufs->pclk_avail_min = PCLK_AVAIL_MIN;
ufs->pclk_avail_max = PCLK_AVAIL_MAX;
@@ -1151,6 +1226,231 @@ static inline void exynos_ufs_priv_init(struct ufs_hba *hba,
hba->quirks = ufs->drv_data->quirks;
}
+#ifdef CONFIG_SCSI_UFS_CRYPTO
+
+/*
+ * Support for Flash Memory Protector (FMP), which is the inline encryption
+ * hardware on Exynos and Exynos-based SoCs. The interface to this hardware is
+ * not compatible with the standard UFS crypto. It requires that encryption be
+ * configured in the PRDT using a nonstandard extension.
+ */
+
+enum fmp_crypto_algo_mode {
+ FMP_BYPASS_MODE = 0,
+ FMP_ALGO_MODE_AES_CBC = 1,
+ FMP_ALGO_MODE_AES_XTS = 2,
+};
+enum fmp_crypto_key_length {
+ FMP_KEYLEN_256BIT = 1,
+};
+
+/**
+ * struct fmp_sg_entry - nonstandard format of PRDT entries when FMP is enabled
+ *
+ * @base: The standard PRDT entry, but with nonstandard bitfields in the high
+ * bits of the 'size' field, i.e. the last 32-bit word. When these
+ * nonstandard bitfields are zero, the data segment won't be encrypted or
+ * decrypted. Otherwise they specify the algorithm and key length with
+ * which the data segment will be encrypted or decrypted.
+ * @file_iv: The initialization vector (IV) with all bytes reversed
+ * @file_enckey: The first half of the AES-XTS key with all bytes reserved
+ * @file_twkey: The second half of the AES-XTS key with all bytes reserved
+ * @disk_iv: Unused
+ * @reserved: Unused
+ */
+struct fmp_sg_entry {
+ struct ufshcd_sg_entry base;
+ __be64 file_iv[2];
+ __be64 file_enckey[4];
+ __be64 file_twkey[4];
+ __be64 disk_iv[2];
+ __be64 reserved[2];
+};
+
+#define SMC_CMD_FMP_SECURITY \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_SIP, 0x1810)
+#define SMC_CMD_SMU \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_SIP, 0x1850)
+#define SMC_CMD_FMP_SMU_RESUME \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_SIP, 0x1860)
+#define SMU_EMBEDDED 0
+#define SMU_INIT 0
+#define CFG_DESCTYPE_3 3
+
+static void exynos_ufs_fmp_init(struct ufs_hba *hba, struct exynos_ufs *ufs)
+{
+ struct blk_crypto_profile *profile = &hba->crypto_profile;
+ struct arm_smccc_res res;
+ int err;
+
+ /*
+ * Check for the standard crypto support bit, since it's available even
+ * though the rest of the interface to FMP is nonstandard.
+ *
+ * This check should have the effect of preventing the driver from
+ * trying to use FMP on old Exynos SoCs that don't have FMP.
+ */
+ if (!(ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES) &
+ MASK_CRYPTO_SUPPORT))
+ return;
+
+ /*
+ * The below sequence of SMC calls to enable FMP can be found in the
+ * downstream driver source for gs101 and other Exynos-based SoCs. It
+ * is the only way to enable FMP that works on SoCs such as gs101 that
+ * don't make the FMP registers accessible to Linux. It probably works
+ * on other Exynos-based SoCs too, and might even still be the only way
+ * that works. But this hasn't been properly tested, and this code is
+ * mutually exclusive with exynos_ufs_config_smu(). So for now only
+ * enable FMP support on SoCs with EXYNOS_UFS_OPT_UFSPR_SECURE.
+ */
+ if (!(ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE))
+ return;
+
+ /*
+ * This call (which sets DESCTYPE to 0x3 in the FMPSECURITY0 register)
+ * is needed to make the hardware use the larger PRDT entry size.
+ */
+ BUILD_BUG_ON(sizeof(struct fmp_sg_entry) != 128);
+ arm_smccc_smc(SMC_CMD_FMP_SECURITY, 0, SMU_EMBEDDED, CFG_DESCTYPE_3,
+ 0, 0, 0, 0, &res);
+ if (res.a0) {
+ dev_warn(hba->dev,
+ "SMC_CMD_FMP_SECURITY failed on init: %ld. Disabling FMP support.\n",
+ res.a0);
+ return;
+ }
+ ufshcd_set_sg_entry_size(hba, sizeof(struct fmp_sg_entry));
+
+ /*
+ * This is needed to initialize FMP. Without it, errors occur when
+ * inline encryption is used.
+ */
+ arm_smccc_smc(SMC_CMD_SMU, SMU_INIT, SMU_EMBEDDED, 0, 0, 0, 0, 0, &res);
+ if (res.a0) {
+ dev_err(hba->dev,
+ "SMC_CMD_SMU(SMU_INIT) failed: %ld. Disabling FMP support.\n",
+ res.a0);
+ return;
+ }
+
+ /* Advertise crypto capabilities to the block layer. */
+ err = devm_blk_crypto_profile_init(hba->dev, profile, 0);
+ if (err) {
+ /* Only ENOMEM should be possible here. */
+ dev_err(hba->dev, "Failed to initialize crypto profile: %d\n",
+ err);
+ return;
+ }
+ profile->max_dun_bytes_supported = AES_BLOCK_SIZE;
+ profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
+ profile->dev = hba->dev;
+ profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] =
+ DATA_UNIT_SIZE;
+
+ /* Advertise crypto support to ufshcd-core. */
+ hba->caps |= UFSHCD_CAP_CRYPTO;
+
+ /* Advertise crypto quirks to ufshcd-core. */
+ hba->quirks |= UFSHCD_QUIRK_CUSTOM_CRYPTO_PROFILE |
+ UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE |
+ UFSHCD_QUIRK_KEYS_IN_PRDT;
+
+}
+
+static void exynos_ufs_fmp_resume(struct ufs_hba *hba)
+{
+ struct arm_smccc_res res;
+
+ if (!(hba->caps & UFSHCD_CAP_CRYPTO))
+ return;
+
+ arm_smccc_smc(SMC_CMD_FMP_SECURITY, 0, SMU_EMBEDDED, CFG_DESCTYPE_3,
+ 0, 0, 0, 0, &res);
+ if (res.a0)
+ dev_err(hba->dev,
+ "SMC_CMD_FMP_SECURITY failed on resume: %ld\n", res.a0);
+
+ arm_smccc_smc(SMC_CMD_FMP_SMU_RESUME, 0, SMU_EMBEDDED, 0, 0, 0, 0, 0,
+ &res);
+ if (res.a0)
+ dev_err(hba->dev,
+ "SMC_CMD_FMP_SMU_RESUME failed: %ld\n", res.a0);
+}
+
+static inline __be64 fmp_key_word(const u8 *key, int j)
+{
+ return cpu_to_be64(get_unaligned_le64(
+ key + AES_KEYSIZE_256 - (j + 1) * sizeof(u64)));
+}
+
+/* Fill the PRDT for a request according to the given encryption context. */
+static int exynos_ufs_fmp_fill_prdt(struct ufs_hba *hba,
+ const struct bio_crypt_ctx *crypt_ctx,
+ void *prdt, unsigned int num_segments)
+{
+ struct fmp_sg_entry *fmp_prdt = prdt;
+ const u8 *enckey = crypt_ctx->bc_key->bytes;
+ const u8 *twkey = enckey + AES_KEYSIZE_256;
+ u64 dun_lo = crypt_ctx->bc_dun[0];
+ u64 dun_hi = crypt_ctx->bc_dun[1];
+ unsigned int i;
+
+ /* If FMP wasn't enabled, we shouldn't get any encrypted requests. */
+ if (WARN_ON_ONCE(!(hba->caps & UFSHCD_CAP_CRYPTO)))
+ return -EIO;
+
+ /* Configure FMP on each segment of the request. */
+ for (i = 0; i < num_segments; i++) {
+ struct fmp_sg_entry *prd = &fmp_prdt[i];
+ int j;
+
+ /* Each segment must be exactly one data unit. */
+ if (prd->base.size != cpu_to_le32(DATA_UNIT_SIZE - 1)) {
+ dev_err(hba->dev,
+ "data segment is misaligned for FMP\n");
+ return -EIO;
+ }
+
+ /* Set the algorithm and key length. */
+ prd->base.size |= cpu_to_le32((FMP_ALGO_MODE_AES_XTS << 28) |
+ (FMP_KEYLEN_256BIT << 26));
+
+ /* Set the IV. */
+ prd->file_iv[0] = cpu_to_be64(dun_hi);
+ prd->file_iv[1] = cpu_to_be64(dun_lo);
+
+ /* Set the key. */
+ for (j = 0; j < AES_KEYSIZE_256 / sizeof(u64); j++) {
+ prd->file_enckey[j] = fmp_key_word(enckey, j);
+ prd->file_twkey[j] = fmp_key_word(twkey, j);
+ }
+
+ /* Increment the data unit number. */
+ dun_lo++;
+ if (dun_lo == 0)
+ dun_hi++;
+ }
+ return 0;
+}
+
+#else /* CONFIG_SCSI_UFS_CRYPTO */
+
+static void exynos_ufs_fmp_init(struct ufs_hba *hba, struct exynos_ufs *ufs)
+{
+}
+
+static void exynos_ufs_fmp_resume(struct ufs_hba *hba)
+{
+}
+
+#define exynos_ufs_fmp_fill_prdt NULL
+
+#endif /* !CONFIG_SCSI_UFS_CRYPTO */
+
static int exynos_ufs_init(struct ufs_hba *hba)
{
struct device *dev = hba->dev;
@@ -1198,8 +1498,10 @@ static int exynos_ufs_init(struct ufs_hba *hba)
exynos_ufs_priv_init(hba, ufs);
+ exynos_ufs_fmp_init(hba, ufs);
+
if (ufs->drv_data->drv_init) {
- ret = ufs->drv_data->drv_init(dev, ufs);
+ ret = ufs->drv_data->drv_init(ufs);
if (ret) {
dev_err(dev, "failed to init drv-data\n");
goto out;
@@ -1210,10 +1512,10 @@ static int exynos_ufs_init(struct ufs_hba *hba)
if (ret)
goto out;
exynos_ufs_specify_phy_time_attr(ufs);
- if (!(ufs->opts & EXYNOS_UFS_OPT_UFSPR_SECURE))
- exynos_ufs_config_smu(ufs);
- hba->host->dma_alignment = SZ_4K - 1;
+ exynos_ufs_config_smu(ufs);
+
+ hba->host->dma_alignment = DATA_UNIT_SIZE - 1;
return 0;
out:
@@ -1221,6 +1523,14 @@ out:
return ret;
}
+static void exynos_ufs_exit(struct ufs_hba *hba)
+{
+ struct exynos_ufs *ufs = ufshcd_get_variant(hba);
+
+ phy_power_off(ufs->phy);
+ phy_exit(ufs->phy);
+}
+
static int exynos_ufs_host_reset(struct ufs_hba *hba)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
@@ -1254,12 +1564,12 @@ static void exynos_ufs_dev_hw_reset(struct ufs_hba *hba)
hci_writel(ufs, 1 << 0, HCI_GPIO_OUT);
}
-static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter)
+static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, enum uic_cmd_dme cmd)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
- if (!enter) {
+ if (cmd == UIC_CMD_DME_HIBER_EXIT) {
if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
exynos_ufs_disable_auto_ctrl_hcc(ufs);
exynos_ufs_ungate_clks(ufs);
@@ -1287,30 +1597,11 @@ static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter)
}
}
-static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter)
+static void exynos_ufs_post_hibern8(struct ufs_hba *hba, enum uic_cmd_dme cmd)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
- if (!enter) {
- u32 cur_mode = 0;
- u32 pwrmode;
-
- if (ufshcd_is_hs_mode(&ufs->dev_req_params))
- pwrmode = FAST_MODE;
- else
- pwrmode = SLOW_MODE;
-
- ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &cur_mode);
- if (cur_mode != (pwrmode << 4 | pwrmode)) {
- dev_warn(hba->dev, "%s: power mode change\n", __func__);
- hba->pwr_info.pwr_rx = (cur_mode >> 4) & 0xf;
- hba->pwr_info.pwr_tx = cur_mode & 0xf;
- ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
- }
-
- if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB))
- exynos_ufs_establish_connt(ufs);
- } else {
+ if (cmd == UIC_CMD_DME_HIBER_ENTER) {
ufs->entry_hibern8_t = ktime_get();
exynos_ufs_gate_clks(ufs);
if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
@@ -1332,7 +1623,7 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba,
* (ufshcd_async_scan()). Note: this callback may also be called
* from other functions than ufshcd_init().
*/
- hba->host->max_segment_size = SZ_4K;
+ hba->host->max_segment_size = DATA_UNIT_SIZE;
if (ufs->drv_data->pre_hce_enable) {
ret = ufs->drv_data->pre_hce_enable(ufs);
@@ -1378,7 +1669,7 @@ static int exynos_ufs_link_startup_notify(struct ufs_hba *hba,
static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
+ const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
int ret = 0;
@@ -1397,19 +1688,25 @@ static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba,
}
static void exynos_ufs_hibern8_notify(struct ufs_hba *hba,
- enum uic_cmd_dme enter,
+ enum uic_cmd_dme cmd,
enum ufs_notify_change_status notify)
{
switch ((u8)notify) {
case PRE_CHANGE:
- exynos_ufs_pre_hibern8(hba, enter);
+ exynos_ufs_pre_hibern8(hba, cmd);
break;
case POST_CHANGE:
- exynos_ufs_post_hibern8(hba, enter);
+ exynos_ufs_post_hibern8(hba, cmd);
break;
}
}
+static int gs101_ufs_suspend(struct exynos_ufs *ufs)
+{
+ hci_writel(ufs, 0 << 0, HCI_GPIO_OUT);
+ return 0;
+}
+
static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
@@ -1418,6 +1715,9 @@ static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
if (status == PRE_CHANGE)
return 0;
+ if (ufs->drv_data->suspend)
+ ufs->drv_data->suspend(ufs);
+
if (!ufshcd_is_link_active(hba))
phy_power_off(ufs->phy);
@@ -1432,7 +1732,7 @@ static int exynos_ufs_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
phy_power_on(ufs->phy);
exynos_ufs_config_smu(ufs);
-
+ exynos_ufs_fmp_resume(hba);
return 0;
}
@@ -1596,6 +1896,13 @@ static int fsd_ufs_pre_pwr_change(struct exynos_ufs *ufs,
return 0;
}
+static int fsd_ufs_suspend(struct exynos_ufs *ufs)
+{
+ exynos_ufs_gate_clks(ufs);
+ hci_writel(ufs, 0, HCI_GPIO_OUT);
+ return 0;
+}
+
static inline u32 get_mclk_period_unipro_18(struct exynos_ufs *ufs)
{
return (16 * 1000 * 1000000UL / ufs->mclk_rate);
@@ -1661,6 +1968,12 @@ static int gs101_ufs_post_link(struct exynos_ufs *ufs)
{
struct ufs_hba *hba = ufs->hba;
+ /*
+ * Enable Write Line Unique. This field has to be 0x3
+ * to support Write Line Unique transaction on gs101.
+ */
+ hci_writel(ufs, WLU_EN | WLU_BURST_LEN(3), HCI_AXIDMA_RWDATA_BURST_LEN);
+
exynos_ufs_enable_dbg_mode(hba);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0x3e8);
exynos_ufs_disable_dbg_mode(hba);
@@ -1689,6 +2002,7 @@ static int gs101_ufs_pre_pwr_change(struct exynos_ufs *ufs,
static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.name = "exynos_ufs",
.init = exynos_ufs_init,
+ .exit = exynos_ufs_exit,
.hce_enable_notify = exynos_ufs_hce_enable_notify,
.link_startup_notify = exynos_ufs_link_startup_notify,
.pwr_change_notify = exynos_ufs_pwr_change_notify,
@@ -1698,6 +2012,7 @@ static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.hibern8_notify = exynos_ufs_hibern8_notify,
.suspend = exynos_ufs_suspend,
.resume = exynos_ufs_resume,
+ .fill_crypto_prdt = exynos_ufs_fmp_fill_prdt,
};
static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = {
@@ -1726,14 +2041,7 @@ static int exynos_ufs_probe(struct platform_device *pdev)
static void exynos_ufs_remove(struct platform_device *pdev)
{
- struct ufs_hba *hba = platform_get_drvdata(pdev);
- struct exynos_ufs *ufs = ufshcd_get_variant(hba);
-
- pm_runtime_get_sync(&(pdev)->dev);
- ufshcd_remove(hba);
-
- phy_power_off(ufs->phy);
- phy_exit(ufs->phy);
+ ufshcd_pltfrm_remove(pdev);
}
static struct exynos_ufs_uic_attr exynos7_uic_attr = {
@@ -1772,6 +2080,7 @@ static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
.opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
+ .iocc_mask = UFS_EXYNOSAUTO_SHARABLE,
.drv_init = exynosauto_ufs_drv_init,
.post_hce_enable = exynosauto_ufs_post_hce_enable,
.pre_link = exynosauto_ufs_pre_link,
@@ -1805,7 +2114,6 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = {
EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB |
EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER,
- .drv_init = exynos7_ufs_drv_init,
.pre_link = exynos7_ufs_pre_link,
.post_link = exynos7_ufs_post_link,
.pre_pwr_change = exynos7_ufs_pre_pwr_change,
@@ -1814,26 +2122,6 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = {
static struct exynos_ufs_uic_attr gs101_uic_attr = {
.tx_trailingclks = 0xff,
- .tx_dif_p_nsec = 3000000, /* unit: ns */
- .tx_dif_n_nsec = 1000000, /* unit: ns */
- .tx_high_z_cnt_nsec = 20000, /* unit: ns */
- .tx_base_unit_nsec = 100000, /* unit: ns */
- .tx_gran_unit_nsec = 4000, /* unit: ns */
- .tx_sleep_cnt = 1000, /* unit: ns */
- .tx_min_activatetime = 0xa,
- .rx_filler_enable = 0x2,
- .rx_dif_p_nsec = 1000000, /* unit: ns */
- .rx_hibern8_wait_nsec = 4000000, /* unit: ns */
- .rx_base_unit_nsec = 100000, /* unit: ns */
- .rx_gran_unit_nsec = 4000, /* unit: ns */
- .rx_sleep_cnt = 1280, /* unit: ns */
- .rx_stall_cnt = 320, /* unit: ns */
- .rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf),
- .rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf),
- .rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf),
- .rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
- .rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
- .rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
.pa_dbg_opt_suite1_val = 0x90913C1C,
.pa_dbg_opt_suite1_off = PA_GS101_DBG_OPTION_SUITE1,
.pa_dbg_opt_suite2_val = 0xE01C115F,
@@ -1881,6 +2169,7 @@ static const struct exynos_ufs_drv_data fsd_ufs_drvs = {
.pre_link = fsd_ufs_pre_link,
.post_link = fsd_ufs_post_link,
.pre_pwr_change = fsd_ufs_pre_pwr_change,
+ .suspend = fsd_ufs_suspend,
};
static const struct exynos_ufs_drv_data gs101_ufs_drvs = {
@@ -1891,14 +2180,15 @@ static const struct exynos_ufs_drv_data gs101_ufs_drvs = {
UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
- .opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
- EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
+ .opts = EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
EXYNOS_UFS_OPT_UFSPR_SECURE |
EXYNOS_UFS_OPT_TIMER_TICK_SELECT,
- .drv_init = exynosauto_ufs_drv_init,
+ .iocc_mask = UFS_GS101_SHARABLE,
+ .drv_init = gs101_ufs_drv_init,
.pre_link = gs101_ufs_pre_link,
.post_link = gs101_ufs_post_link,
.pre_pwr_change = gs101_ufs_pre_pwr_change,
+ .suspend = gs101_ufs_suspend,
};
static const struct of_device_id exynos_ufs_of_match[] = {
@@ -1925,7 +2215,7 @@ static const struct dev_pm_ops exynos_ufs_pm_ops = {
static struct platform_driver exynos_ufs_pltform = {
.probe = exynos_ufs_probe,
- .remove_new = exynos_ufs_remove,
+ .remove = exynos_ufs_remove,
.driver = {
.name = "exynos-ufshc",
.pm = &exynos_ufs_pm_ops,