summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/sleep.c28
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/power/runtime.c4
-rw-r--r--drivers/base/regmap/Kconfig4
-rw-r--r--drivers/base/regmap/internal.h2
-rw-r--r--drivers/base/regmap/regmap-spi.c2
-rw-r--r--drivers/base/regmap/regmap-spmi.c4
-rw-r--r--drivers/base/regmap/regmap.c111
-rw-r--r--drivers/block/rbd.c4
-rw-r--r--drivers/char/random.c4
-rw-r--r--drivers/char/tpm/tpm-dev-common.c6
-rw-r--r--drivers/char/tpm/tpm-sysfs.c87
-rw-r--r--drivers/char/tpm/tpm.h15
-rw-r--r--drivers/char/tpm/tpm2-cmd.c73
-rw-r--r--drivers/char/tpm/tpm2-space.c4
-rw-r--r--drivers/char/tpm/tpm_crb.c59
-rw-r--r--drivers/char/tpm/tpm_tis.c5
-rw-r--r--drivers/char/tpm/tpm_tis_core.c6
-rw-r--r--drivers/char/tpm/tpm_tis_core.h4
-rw-r--r--drivers/char/tpm/tpm_tis_spi.c73
-rw-r--r--drivers/clocksource/bcm2835_timer.c2
-rw-r--r--drivers/crypto/caam/jr.c4
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c2
-rw-r--r--drivers/edac/altera_edac.c10
-rw-r--r--drivers/edac/amd64_edac.c5
-rw-r--r--drivers/edac/edac_mc.c7
-rw-r--r--drivers/edac/edac_mc.h8
-rw-r--r--drivers/edac/ghes_edac.c137
-rw-r--r--drivers/edac/i7core_edac.c11
-rw-r--r--drivers/edac/pnd2_edac.c9
-rw-r--r--drivers/edac/sb_edac.c43
-rw-r--r--drivers/edac/skx_edac.c30
-rw-r--r--drivers/edac/thunderx_edac.c25
-rw-r--r--drivers/firewire/ohci.c10
-rw-r--r--drivers/firmware/tegra/ivc.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c4
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c2
-rw-r--r--drivers/hwmon/Kconfig15
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/asc7621.c1
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c8
-rw-r--r--drivers/hwmon/gpio-fan.c224
-rw-r--r--drivers/hwmon/k10temp.c108
-rw-r--r--drivers/hwmon/max1619.c10
-rw-r--r--drivers/hwmon/max6621.c593
-rw-r--r--drivers/hwmon/pmbus/Kconfig10
-rw-r--r--drivers/hwmon/pmbus/Makefile1
-rw-r--r--drivers/hwmon/pmbus/max31785.c116
-rw-r--r--drivers/hwmon/pmbus/pmbus.h6
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c25
-rw-r--r--drivers/hwmon/sht15.c175
-rw-r--r--drivers/hwmon/stts751.c18
-rw-r--r--drivers/hwmon/w83793.c4
-rw-r--r--drivers/hwmon/xgene-hwmon.c39
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c6
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c8
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h2
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c4
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c6
-rw-r--r--drivers/input/misc/regulator-haptic.c2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1
-rw-r--r--drivers/input/rmi4/rmi_smbus.c4
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c1
-rw-r--r--drivers/irqchip/Kconfig3
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-ompic.c202
-rw-r--r--drivers/macintosh/adb-iop.c4
-rw-r--r--drivers/md/dm-bufio.c10
-rw-r--r--drivers/md/dm-integrity.c15
-rw-r--r--drivers/md/dm-kcopyd.c4
-rw-r--r--drivers/md/dm-mpath.c20
-rw-r--r--drivers/md/dm-stats.c36
-rw-r--r--drivers/md/dm-switch.c2
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm-verity-target.c2
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/media/dvb-core/dvb_ringbuffer.c8
-rw-r--r--drivers/misc/lkdtm_core.c154
-rw-r--r--drivers/misc/mic/scif/scif_rb.c8
-rw-r--r--drivers/misc/mic/scif/scif_rma_list.c2
-rw-r--r--drivers/mmc/core/block.c346
-rw-r--r--drivers/mmc/core/bus.c7
-rw-r--r--drivers/mmc/core/core.c262
-rw-r--r--drivers/mmc/core/core.h16
-rw-r--r--drivers/mmc/core/host.c20
-rw-r--r--drivers/mmc/core/host.h7
-rw-r--r--drivers/mmc/core/mmc.c46
-rw-r--r--drivers/mmc/core/mmc_ops.c6
-rw-r--r--drivers/mmc/core/queue.c41
-rw-r--r--drivers/mmc/core/queue.h4
-rw-r--r--drivers/mmc/core/sd.c51
-rw-r--r--drivers/mmc/core/sdio_irq.c3
-rw-r--r--drivers/mmc/host/Kconfig28
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/atmel-mci.c13
-rw-r--r--drivers/mmc/host/cavium.c2
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c84
-rw-r--r--drivers/mmc/host/dw_mmc.h3
-rw-r--r--drivers/mmc/host/jz4740_mmc.c7
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c2
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c768
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c285
-rw-r--r--drivers/mmc/host/mvsdio.c6
-rw-r--r--drivers/mmc/host/mxcmmc.c11
-rw-r--r--drivers/mmc/host/omap.c20
-rw-r--r--drivers/mmc/host/omap_hsmmc.c35
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c1
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c5
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c38
-rw-r--r--drivers/mmc/host/sdhci-acpi.c174
-rw-r--r--drivers/mmc/host/sdhci-cadence.c28
-rw-r--r--drivers/mmc/host/sdhci-msm.c326
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c3
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c58
-rw-r--r--drivers/mmc/host/sdhci-omap.c607
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c11
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c35
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.h73
-rw-r--r--drivers/mmc/host/sdhci-pci.h13
-rw-r--r--drivers/mmc/host/sdhci-s3c.c18
-rw-r--r--drivers/mmc/host/sdhci-tegra.c10
-rw-r--r--drivers/mmc/host/sdhci.c15
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c14
-rw-r--r--drivers/mmc/host/sunxi-mmc.c5
-rw-r--r--drivers/mmc/host/tifm_sd.c6
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c36
-rw-r--r--drivers/mmc/host/usdhi6rol0.c2
-rw-r--r--drivers/mmc/host/via-sdmmc.c8
-rw-r--r--drivers/mmc/host/vub300.c41
-rw-r--r--drivers/mmc/host/wbsd.c8
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c9
-rw-r--r--drivers/net/can/c_can/c_can_pci.c1
-rw-r--r--drivers/net/can/c_can/c_can_platform.c1
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c6
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c14
-rw-r--r--drivers/net/can/sun4i_can.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c7
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c10
-rw-r--r--drivers/net/ethernet/sfc/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.h6
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c6
-rw-r--r--drivers/net/ethernet/sfc/farch.c8
-rw-r--r--drivers/net/ethernet/sfc/nic.h6
-rw-r--r--drivers/net/ethernet/sfc/ptp.c10
-rw-r--r--drivers/net/ethernet/sfc/tx.c6
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/tap.c2
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/asix_devices.c4
-rw-r--r--drivers/net/usb/cdc_ether.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/nubus/nubus.c13
-rw-r--r--drivers/regulator/Kconfig2
-rw-r--r--drivers/regulator/axp20x-regulator.c92
-rw-r--r--drivers/regulator/da9211-regulator.c14
-rw-r--r--drivers/regulator/da9211-regulator.h2
-rw-r--r--drivers/regulator/pbias-regulator.c21
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c48
-rw-r--r--drivers/regulator/tps65218-regulator.c2
-rw-r--r--drivers/s390/block/dasd_eer.c16
-rw-r--r--drivers/s390/block/dasd_int.h16
-rw-r--r--drivers/s390/block/scm_blk.h8
-rw-r--r--drivers/s390/char/sclp_con.c7
-rw-r--r--drivers/s390/char/sclp_tty.c7
-rw-r--r--drivers/s390/char/tape_class.c3
-rw-r--r--drivers/s390/char/vmlogrdr.c3
-rw-r--r--drivers/s390/char/vmur.c11
-rw-r--r--drivers/s390/char/vmur.h4
-rw-r--r--drivers/s390/cio/ccwgroup.c6
-rw-r--r--drivers/s390/cio/chsc_sch.c6
-rw-r--r--drivers/s390/cio/cio_debug.h8
-rw-r--r--drivers/s390/cio/cmf.c278
-rw-r--r--drivers/s390/cio/eadm_sch.c8
-rw-r--r--drivers/s390/cio/qdio_debug.h18
-rw-r--r--drivers/s390/cio/qdio_thinint.c10
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c24
-rw-r--r--drivers/s390/crypto/ap_asm.h43
-rw-r--r--drivers/s390/crypto/ap_bus.c74
-rw-r--r--drivers/s390/crypto/ap_bus.h4
-rw-r--r--drivers/s390/crypto/ap_card.c12
-rw-r--r--drivers/s390/crypto/ap_queue.c4
-rw-r--r--drivers/s390/crypto/pkey_api.c3
-rw-r--r--drivers/s390/crypto/zcrypt_api.h1
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c48
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c6
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c3
-rw-r--r--drivers/s390/net/ctcm_main.c1
-rw-r--r--drivers/s390/net/lcs.c1
-rw-r--r--drivers/s390/net/qeth_core_main.c1
-rw-r--r--drivers/s390/virtio/Makefile6
-rw-r--r--drivers/s390/virtio/kvm_virtio.c515
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/spi/Kconfig13
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-armada-3700.c17
-rw-r--r--drivers/spi/spi-axi-spi-engine.c4
-rw-r--r--drivers/spi/spi-fsl-dspi.c66
-rw-r--r--drivers/spi/spi-imx.c256
-rw-r--r--drivers/spi/spi-mxs.c120
-rw-r--r--drivers/spi/spi-orion.c1
-rw-r--r--drivers/spi/spi-rspi.c6
-rw-r--r--drivers/spi/spi-s3c64xx.c3
-rw-r--r--drivers/spi/spi-sh-msiof.c12
-rw-r--r--drivers/spi/spi-sprd-adi.c418
-rw-r--r--drivers/spi/spi-tegra114.c6
-rw-r--r--drivers/spi/spi.c9
-rw-r--r--drivers/target/target_core_user.c2
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/core/devio.c2
-rw-r--r--drivers/usb/core/sysfs.c4
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c4
-rw-r--r--drivers/usb/host/ohci-hcd.c2
-rw-r--r--drivers/usb/host/uhci-hcd.h4
-rw-r--r--drivers/vfio/vfio.c2
-rw-r--r--drivers/vhost/scsi.c2
267 files changed, 6410 insertions, 2569 deletions
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6804ddab3052..8082871b409a 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -160,6 +160,14 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
return 0;
}
+static bool acpi_sleep_no_lps0;
+
+static int __init init_no_lps0(const struct dmi_system_id *d)
+{
+ acpi_sleep_no_lps0 = true;
+ return 0;
+}
+
static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
{
.callback = init_old_suspend_ordering,
@@ -343,6 +351,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
},
},
+ /*
+ * https://bugzilla.kernel.org/show_bug.cgi?id=196907
+ * Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power
+ * S0 Idle firmware interface.
+ */
+ {
+ .callback = init_no_lps0,
+ .ident = "Dell XPS13 9360",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
+ },
+ },
{},
};
@@ -485,6 +506,7 @@ static void acpi_pm_end(void)
}
#else /* !CONFIG_ACPI_SLEEP */
#define acpi_target_sleep_state ACPI_STATE_S0
+#define acpi_sleep_no_lps0 (false)
static inline void acpi_sleep_dmi_check(void) {}
#endif /* CONFIG_ACPI_SLEEP */
@@ -863,6 +885,12 @@ static int lps0_device_attach(struct acpi_device *adev,
if (lps0_device_handle)
return 0;
+ if (acpi_sleep_no_lps0) {
+ acpi_handle_info(adev->handle,
+ "Low Power S0 Idle interface disabled\n");
+ return 0;
+ }
+
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
return 0;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 12ebd055724c..4b8ba2a75a4d 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -668,7 +668,7 @@ const char *dev_driver_string(const struct device *dev)
* so be careful about accessing it. dev->bus and dev->class should
* never change once they are set, so they don't need special care.
*/
- drv = ACCESS_ONCE(dev->driver);
+ drv = READ_ONCE(dev->driver);
return drv ? drv->name :
(dev->bus ? dev->bus->name :
(dev->class ? dev->class->name : ""));
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 7bcf80fa9ada..41d7c2b99f69 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -134,11 +134,11 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
if (!dev->power.use_autosuspend)
goto out;
- autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
+ autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
if (autosuspend_delay < 0)
goto out;
- last_busy = ACCESS_ONCE(dev->power.last_busy);
+ last_busy = READ_ONCE(dev->power.last_busy);
elapsed = jiffies - last_busy;
if (elapsed < 0)
goto out; /* jiffies has wrapped around. */
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 0368fd7b3a41..3a1535d812d8 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -6,6 +6,7 @@
config REGMAP
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
select IRQ_DOMAIN if REGMAP_IRQ
+ select REGMAP_HWSPINLOCK if HWSPINLOCK=y
bool
config REGCACHE_COMPRESSED
@@ -37,3 +38,6 @@ config REGMAP_MMIO
config REGMAP_IRQ
bool
+
+config REGMAP_HWSPINLOCK
+ bool
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 2a4435d76028..8641183cac2f 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -157,6 +157,8 @@ struct regmap {
struct rb_root range_tree;
void *selector_work_buf; /* Scratch buffer used for selector */
+
+ struct hwspinlock *hwlock;
};
struct regcache_ops {
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index edd9a839d004..c7150dd264d5 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -102,7 +102,7 @@ static int regmap_spi_read(void *context,
return spi_write_then_read(spi, reg, reg_size, val, val_size);
}
-static struct regmap_bus regmap_spi = {
+static const struct regmap_bus regmap_spi = {
.write = regmap_spi_write,
.gather_write = regmap_spi_gather_write,
.async_write = regmap_spi_async_write,
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index 4a36e415e938..0bfb8ed244d5 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -83,7 +83,7 @@ static int regmap_spmi_base_write(void *context, const void *data,
count - 1);
}
-static struct regmap_bus regmap_spmi_base = {
+static const struct regmap_bus regmap_spmi_base = {
.read = regmap_spmi_base_read,
.write = regmap_spmi_base_write,
.gather_write = regmap_spmi_base_gather_write,
@@ -203,7 +203,7 @@ static int regmap_spmi_ext_write(void *context, const void *data,
count - 2);
}
-static struct regmap_bus regmap_spmi_ext = {
+static const struct regmap_bus regmap_spmi_ext = {
.read = regmap_spmi_ext_read,
.write = regmap_spmi_ext_write,
.gather_write = regmap_spmi_ext_gather_write,
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index b9a779a4a739..8d516a9bfc01 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/log2.h>
+#include <linux/hwspinlock.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -413,6 +414,51 @@ static unsigned int regmap_parse_64_native(const void *buf)
}
#endif
+#ifdef REGMAP_HWSPINLOCK
+static void regmap_lock_hwlock(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_lock_timeout(map->hwlock, UINT_MAX);
+}
+
+static void regmap_lock_hwlock_irq(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
+}
+
+static void regmap_lock_hwlock_irqsave(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
+ &map->spinlock_flags);
+}
+
+static void regmap_unlock_hwlock(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_unlock(map->hwlock);
+}
+
+static void regmap_unlock_hwlock_irq(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_unlock_irq(map->hwlock);
+}
+
+static void regmap_unlock_hwlock_irqrestore(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
+}
+#endif
+
static void regmap_lock_mutex(void *__map)
{
struct regmap *map = __map;
@@ -627,6 +673,34 @@ struct regmap *__regmap_init(struct device *dev,
map->lock = config->lock;
map->unlock = config->unlock;
map->lock_arg = config->lock_arg;
+ } else if (config->hwlock_id) {
+#ifdef REGMAP_HWSPINLOCK
+ map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
+ if (!map->hwlock) {
+ ret = -ENXIO;
+ goto err_map;
+ }
+
+ switch (config->hwlock_mode) {
+ case HWLOCK_IRQSTATE:
+ map->lock = regmap_lock_hwlock_irqsave;
+ map->unlock = regmap_unlock_hwlock_irqrestore;
+ break;
+ case HWLOCK_IRQ:
+ map->lock = regmap_lock_hwlock_irq;
+ map->unlock = regmap_unlock_hwlock_irq;
+ break;
+ default:
+ map->lock = regmap_lock_hwlock;
+ map->unlock = regmap_unlock_hwlock;
+ break;
+ }
+
+ map->lock_arg = map;
+#else
+ ret = -EINVAL;
+ goto err_map;
+#endif
} else {
if ((bus && bus->fast_io) ||
config->fast_io) {
@@ -729,7 +803,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_write = regmap_format_2_6_write;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -739,7 +813,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_write = regmap_format_4_12_write;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -749,7 +823,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_write = regmap_format_7_9_write;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -759,7 +833,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_write = regmap_format_10_14_write;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -779,13 +853,13 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_reg = regmap_format_16_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
case 24:
if (reg_endian != REGMAP_ENDIAN_BIG)
- goto err_map;
+ goto err_hwlock;
map->format.format_reg = regmap_format_24;
break;
@@ -801,7 +875,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_reg = regmap_format_32_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
@@ -818,13 +892,13 @@ struct regmap *__regmap_init(struct device *dev,
map->format.format_reg = regmap_format_64_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
#endif
default:
- goto err_map;
+ goto err_hwlock;
}
if (val_endian == REGMAP_ENDIAN_NATIVE)
@@ -853,12 +927,12 @@ struct regmap *__regmap_init(struct device *dev,
map->format.parse_val = regmap_parse_16_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
case 24:
if (val_endian != REGMAP_ENDIAN_BIG)
- goto err_map;
+ goto err_hwlock;
map->format.format_val = regmap_format_24;
map->format.parse_val = regmap_parse_24;
break;
@@ -879,7 +953,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.parse_val = regmap_parse_32_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
#ifdef CONFIG_64BIT
@@ -900,7 +974,7 @@ struct regmap *__regmap_init(struct device *dev,
map->format.parse_val = regmap_parse_64_native;
break;
default:
- goto err_map;
+ goto err_hwlock;
}
break;
#endif
@@ -909,18 +983,18 @@ struct regmap *__regmap_init(struct device *dev,
if (map->format.format_write) {
if ((reg_endian != REGMAP_ENDIAN_BIG) ||
(val_endian != REGMAP_ENDIAN_BIG))
- goto err_map;
+ goto err_hwlock;
map->use_single_write = true;
}
if (!map->format.format_write &&
!(map->format.format_reg && map->format.format_val))
- goto err_map;
+ goto err_hwlock;
map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
if (map->work_buf == NULL) {
ret = -ENOMEM;
- goto err_map;
+ goto err_hwlock;
}
if (map->format.format_write) {
@@ -1041,6 +1115,9 @@ err_regcache:
err_range:
regmap_range_exit(map);
kfree(map->work_buf);
+err_hwlock:
+ if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ hwspin_lock_free(map->hwlock);
err_map:
kfree(map);
err:
@@ -1228,6 +1305,8 @@ void regmap_exit(struct regmap *map)
kfree(async->work_buf);
kfree(async);
}
+ if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ hwspin_lock_free(map->hwlock);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index b640ad8a6d20..adc877dfef5c 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2692,7 +2692,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
* from the parent.
*/
page_count = (u32)calc_pages_for(0, length);
- pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
if (IS_ERR(pages)) {
result = PTR_ERR(pages);
pages = NULL;
@@ -2827,7 +2827,7 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
*/
size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
page_count = (u32)calc_pages_for(0, size);
- pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto fail_stat_request;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 8ad92707e45f..6c7ccac2679e 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -641,7 +641,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
return;
retry:
- entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+ entropy_count = orig = READ_ONCE(r->entropy_count);
if (nfrac < 0) {
/* Debit */
entropy_count += nfrac;
@@ -1265,7 +1265,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
/* Can we pull enough? */
retry:
- entropy_count = orig = ACCESS_ONCE(r->entropy_count);
+ entropy_count = orig = READ_ONCE(r->entropy_count);
ibytes = nbytes;
/* never pull more than available */
have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 610638a80383..461bf0b8a094 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -110,6 +110,12 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
return -EFAULT;
}
+ if (in_size < 6 ||
+ in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) {
+ mutex_unlock(&priv->buffer_mutex);
+ return -EINVAL;
+ }
+
/* atomic tpm command send and result receive. We only hold the ops
* lock during this period so that the tpm can be unregistered even if
* the char dev is held open.
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index 86f38d239476..83a77a445538 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -20,44 +20,48 @@
#include <linux/device.h>
#include "tpm.h"
-#define READ_PUBEK_RESULT_SIZE 314
+struct tpm_readpubek_out {
+ u8 algorithm[4];
+ u8 encscheme[2];
+ u8 sigscheme[2];
+ __be32 paramsize;
+ u8 parameters[12];
+ __be32 keysize;
+ u8 modulus[256];
+ u8 checksum[20];
+} __packed;
+
#define READ_PUBEK_RESULT_MIN_BODY_SIZE (28 + 256)
#define TPM_ORD_READPUBEK 124
-static const struct tpm_input_header tpm_readpubek_header = {
- .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
- .length = cpu_to_be32(30),
- .ordinal = cpu_to_be32(TPM_ORD_READPUBEK)
-};
+
static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- u8 *data;
- struct tpm_cmd_t tpm_cmd;
- ssize_t err;
- int i, rc;
+ struct tpm_buf tpm_buf;
+ struct tpm_readpubek_out *out;
+ ssize_t rc;
+ int i;
char *str = buf;
struct tpm_chip *chip = to_tpm_chip(dev);
+ char anti_replay[20];
- memset(&tpm_cmd, 0, sizeof(tpm_cmd));
-
- tpm_cmd.header.in = tpm_readpubek_header;
- err = tpm_transmit_cmd(chip, NULL, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
- READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
- "attempting to read the PUBEK");
- if (err)
- goto out;
-
- /*
- ignore header 10 bytes
- algorithm 32 bits (1 == RSA )
- encscheme 16 bits
- sigscheme 16 bits
- parameters (RSA 12->bytes: keybit, #primes, expbit)
- keylenbytes 32 bits
- 256 byte modulus
- ignore checksum 20 bytes
- */
- data = tpm_cmd.params.readpubek_out_buffer;
+ memset(&anti_replay, 0, sizeof(anti_replay));
+
+ rc = tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK);
+ if (rc)
+ return rc;
+
+ tpm_buf_append(&tpm_buf, anti_replay, sizeof(anti_replay));
+
+ rc = tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE,
+ READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
+ "attempting to read the PUBEK");
+ if (rc) {
+ tpm_buf_destroy(&tpm_buf);
+ return 0;
+ }
+
+ out = (struct tpm_readpubek_out *)&tpm_buf.data[10];
str +=
sprintf(str,
"Algorithm: %02X %02X %02X %02X\n"
@@ -68,21 +72,26 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
"%02X %02X %02X %02X\n"
"Modulus length: %d\n"
"Modulus:\n",
- data[0], data[1], data[2], data[3],
- data[4], data[5],
- data[6], data[7],
- data[12], data[13], data[14], data[15],
- data[16], data[17], data[18], data[19],
- data[20], data[21], data[22], data[23],
- be32_to_cpu(*((__be32 *) (data + 24))));
+ out->algorithm[0], out->algorithm[1], out->algorithm[2],
+ out->algorithm[3],
+ out->encscheme[0], out->encscheme[1],
+ out->sigscheme[0], out->sigscheme[1],
+ out->parameters[0], out->parameters[1],
+ out->parameters[2], out->parameters[3],
+ out->parameters[4], out->parameters[5],
+ out->parameters[6], out->parameters[7],
+ out->parameters[8], out->parameters[9],
+ out->parameters[10], out->parameters[11],
+ be32_to_cpu(out->keysize));
for (i = 0; i < 256; i++) {
- str += sprintf(str, "%02X ", data[i + 28]);
+ str += sprintf(str, "%02X ", out->modulus[i]);
if ((i + 1) % 16 == 0)
str += sprintf(str, "\n");
}
-out:
+
rc = str - buf;
+ tpm_buf_destroy(&tpm_buf);
return rc;
}
static DEVICE_ATTR_RO(pubek);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 2d5466a72e40..528cffbd49d3 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -345,17 +345,6 @@ enum tpm_sub_capabilities {
TPM_CAP_PROP_TIS_DURATION = 0x120,
};
-struct tpm_readpubek_params_out {
- u8 algorithm[4];
- u8 encscheme[2];
- u8 sigscheme[2];
- __be32 paramsize;
- u8 parameters[12]; /*assuming RSA*/
- __be32 keysize;
- u8 modulus[256];
- u8 checksum[20];
-} __packed;
-
typedef union {
struct tpm_input_header in;
struct tpm_output_header out;
@@ -385,8 +374,6 @@ struct tpm_getrandom_in {
} __packed;
typedef union {
- struct tpm_readpubek_params_out readpubek_out;
- u8 readpubek_out_buffer[sizeof(struct tpm_readpubek_params_out)];
struct tpm_pcrread_in pcrread_in;
struct tpm_pcrread_out pcrread_out;
struct tpm_getrandom_in getrandom_in;
@@ -557,7 +544,7 @@ static inline void tpm_add_ppi(struct tpm_chip *chip)
}
#endif
-static inline inline u32 tpm2_rc_value(u32 rc)
+static inline u32 tpm2_rc_value(u32 rc)
{
return (rc & BIT(7)) ? rc & 0xff : rc;
}
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index e1a41b788f08..f40d20671a78 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -834,72 +834,43 @@ static const struct tpm_input_header tpm2_selftest_header = {
};
/**
- * tpm2_continue_selftest() - start a self test
- *
- * @chip: TPM chip to use
- * @full: test all commands instead of testing only those that were not
- * previously tested.
- *
- * Return: Same as with tpm_transmit_cmd with exception of RC_TESTING.
- */
-static int tpm2_start_selftest(struct tpm_chip *chip, bool full)
-{
- int rc;
- struct tpm2_cmd cmd;
-
- cmd.header.in = tpm2_selftest_header;
- cmd.params.selftest_in.full_test = full;
-
- rc = tpm_transmit_cmd(chip, NULL, &cmd, TPM2_SELF_TEST_IN_SIZE, 0, 0,
- "continue selftest");
-
- /* At least some prototype chips seem to give RC_TESTING error
- * immediately. This is a workaround for that.
- */
- if (rc == TPM2_RC_TESTING) {
- dev_warn(&chip->dev, "Got RC_TESTING, ignoring\n");
- rc = 0;
- }
-
- return rc;
-}
-
-/**
- * tpm2_do_selftest() - run a full self test
+ * tpm2_do_selftest() - ensure that all self tests have passed
*
* @chip: TPM chip to use
*
* Return: Same as with tpm_transmit_cmd.
*
- * During the self test TPM2 commands return with the error code RC_TESTING.
- * Waiting is done by issuing PCR read until it executes successfully.
+ * The TPM can either run all self tests synchronously and then return
+ * RC_SUCCESS once all tests were successful. Or it can choose to run the tests
+ * asynchronously and return RC_TESTING immediately while the self tests still
+ * execute in the background. This function handles both cases and waits until
+ * all tests have completed.
*/
static int tpm2_do_selftest(struct tpm_chip *chip)
{
int rc;
- unsigned int loops;
- unsigned int delay_msec = 100;
- unsigned long duration;
- int i;
-
- duration = tpm2_calc_ordinal_duration(chip, TPM2_CC_SELF_TEST);
+ unsigned int delay_msec = 20;
+ long duration;
+ struct tpm2_cmd cmd;
- loops = jiffies_to_msecs(duration) / delay_msec;
+ duration = jiffies_to_msecs(
+ tpm2_calc_ordinal_duration(chip, TPM2_CC_SELF_TEST));
- rc = tpm2_start_selftest(chip, true);
- if (rc)
- return rc;
+ while (duration > 0) {
+ cmd.header.in = tpm2_selftest_header;
+ cmd.params.selftest_in.full_test = 0;
- for (i = 0; i < loops; i++) {
- /* Attempt to read a PCR value */
- rc = tpm2_pcr_read(chip, 0, NULL);
- if (rc < 0)
- break;
+ rc = tpm_transmit_cmd(chip, NULL, &cmd, TPM2_SELF_TEST_IN_SIZE,
+ 0, 0, "continue selftest");
if (rc != TPM2_RC_TESTING)
break;
tpm_msleep(delay_msec);
+ duration -= delay_msec;
+
+ /* wait longer the next round */
+ delay_msec *= 2;
}
return rc;
@@ -1009,7 +980,7 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
{
struct tpm_buf buf;
u32 nr_commands;
- u32 *attrs;
+ __be32 *attrs;
u32 cc;
int i;
int rc;
@@ -1049,7 +1020,7 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
chip->nr_commands = nr_commands;
- attrs = (u32 *)&buf.data[TPM_HEADER_SIZE + 9];
+ attrs = (__be32 *)&buf.data[TPM_HEADER_SIZE + 9];
for (i = 0; i < nr_commands; i++, attrs++) {
chip->cc_attrs_tbl[i] = be32_to_cpup(attrs);
cc = chip->cc_attrs_tbl[i] & 0xFFFF;
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index e2e059d8ffec..4e4014eabdb9 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -242,7 +242,7 @@ static int tpm2_map_command(struct tpm_chip *chip, u32 cc, u8 *cmd)
struct tpm_space *space = &chip->work_space;
unsigned int nr_handles;
u32 attrs;
- u32 *handle;
+ __be32 *handle;
int i;
i = tpm2_find_cc(chip, cc);
@@ -252,7 +252,7 @@ static int tpm2_map_command(struct tpm_chip *chip, u32 cc, u8 *cmd)
attrs = chip->cc_attrs_tbl[i];
nr_handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0);
- handle = (u32 *)&cmd[TPM_HEADER_SIZE];
+ handle = (__be32 *)&cmd[TPM_HEADER_SIZE];
for (i = 0; i < nr_handles; i++, handle++) {
if ((be32_to_cpu(*handle) & 0xFF000000) == TPM2_HT_TRANSIENT) {
if (!tpm2_map_to_phandle(space, handle))
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 8f0a98dea327..7b3c2a8aa9de 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -92,14 +92,9 @@ enum crb_status {
CRB_DRV_STS_COMPLETE = BIT(0),
};
-enum crb_flags {
- CRB_FL_ACPI_START = BIT(0),
- CRB_FL_CRB_START = BIT(1),
- CRB_FL_CRB_SMC_START = BIT(2),
-};
-
struct crb_priv {
- unsigned int flags;
+ u32 sm;
+ const char *hid;
void __iomem *iobase;
struct crb_regs_head __iomem *regs_h;
struct crb_regs_tail __iomem *regs_t;
@@ -128,14 +123,16 @@ struct tpm2_crb_smc {
* Anyhow, we do not wait here as a consequent CMD_READY request
* will be handled correctly even if idle was not completed.
*
- * The function does nothing for devices with ACPI-start method.
+ * The function does nothing for devices with ACPI-start method
+ * or SMC-start method.
*
* Return: 0 always
*/
static int __maybe_unused crb_go_idle(struct device *dev, struct crb_priv *priv)
{
- if ((priv->flags & CRB_FL_ACPI_START) ||
- (priv->flags & CRB_FL_CRB_SMC_START))
+ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
return 0;
iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
@@ -174,14 +171,16 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
* The device should respond within TIMEOUT_C.
*
* The function does nothing for devices with ACPI-start method
+ * or SMC-start method.
*
* Return: 0 on success -ETIME on timeout;
*/
static int __maybe_unused crb_cmd_ready(struct device *dev,
struct crb_priv *priv)
{
- if ((priv->flags & CRB_FL_ACPI_START) ||
- (priv->flags & CRB_FL_CRB_SMC_START))
+ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
return 0;
iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
@@ -325,13 +324,20 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
/* Make sure that cmd is populated before issuing start. */
wmb();
- if (priv->flags & CRB_FL_CRB_START)
+ /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
+ * report only ACPI start but in practice seems to require both
+ * CRB start, hence invoking CRB start method if hid == MSFT0101.
+ */
+ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
+ (priv->sm == ACPI_TPM2_MEMORY_MAPPED) ||
+ (!strcmp(priv->hid, "MSFT0101")))
iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
- if (priv->flags & CRB_FL_ACPI_START)
+ if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD))
rc = crb_do_acpi_start(chip);
- if (priv->flags & CRB_FL_CRB_SMC_START) {
+ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start);
rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id);
}
@@ -345,7 +351,9 @@ static void crb_cancel(struct tpm_chip *chip)
iowrite32(CRB_CANCEL_INVOKE, &priv->regs_t->ctrl_cancel);
- if ((priv->flags & CRB_FL_ACPI_START) && crb_do_acpi_start(chip))
+ if (((priv->sm == ACPI_TPM2_START_METHOD) ||
+ (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) &&
+ crb_do_acpi_start(chip))
dev_err(&chip->dev, "ACPI Start failed\n");
}
@@ -458,7 +466,8 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
* the control area, as one nice sane region except for some older
* stuff that puts the control area outside the ACPI IO region.
*/
- if (!(priv->flags & CRB_FL_ACPI_START)) {
+ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
+ (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
if (buf->control_address == io_res.start +
sizeof(*priv->regs_h))
priv->regs_h = priv->iobase;
@@ -552,18 +561,6 @@ static int crb_acpi_add(struct acpi_device *device)
if (!priv)
return -ENOMEM;
- /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
- * report only ACPI start but in practice seems to require both
- * ACPI start and CRB start.
- */
- if (sm == ACPI_TPM2_COMMAND_BUFFER || sm == ACPI_TPM2_MEMORY_MAPPED ||
- !strcmp(acpi_device_hid(device), "MSFT0101"))
- priv->flags |= CRB_FL_CRB_START;
-
- if (sm == ACPI_TPM2_START_METHOD ||
- sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)
- priv->flags |= CRB_FL_ACPI_START;
-
if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) {
if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) {
dev_err(dev,
@@ -574,9 +571,11 @@ static int crb_acpi_add(struct acpi_device *device)
}
crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf));
priv->smc_func_id = crb_smc->smc_func_id;
- priv->flags |= CRB_FL_CRB_SMC_START;
}
+ priv->sm = sm;
+ priv->hid = acpi_device_hid(device);
+
rc = crb_map_io(device, priv, buf);
if (rc)
return rc;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 7e55aa9ce680..e2d1055fb814 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -30,6 +30,7 @@
#include <linux/freezer.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/kernel.h>
#include "tpm.h"
#include "tpm_tis_core.h"
@@ -223,7 +224,7 @@ static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
}
static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *value)
+ const u8 *value)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
@@ -365,7 +366,7 @@ static struct pnp_driver tis_pnp_driver = {
},
};
-#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2
+#define TIS_HID_USR_IDX (ARRAY_SIZE(tpm_pnp_tbl) - 2)
module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 63bc6c3b949e..fdde971bc810 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -252,7 +252,7 @@ out:
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc, status, burstcnt;
@@ -343,7 +343,7 @@ static void disable_interrupts(struct tpm_chip *chip)
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc;
@@ -445,7 +445,7 @@ static int probe_itpm(struct tpm_chip *chip)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc = 0;
- u8 cmd_getticks[] = {
+ static const u8 cmd_getticks[] = {
0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
0x00, 0x00, 0x00, 0xf1
};
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index e2212f021a02..6bbac319ff3b 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -98,7 +98,7 @@ struct tpm_tis_phy_ops {
int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
u8 *result);
int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *value);
+ const u8 *value);
int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result);
int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result);
int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src);
@@ -128,7 +128,7 @@ static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr,
}
static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr,
- u16 len, u8 *value)
+ u16 len, const u8 *value)
{
return data->phy_ops->write_bytes(data, addr, len, value);
}
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
index 88fe72ae967f..424ff2fde1f2 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -46,9 +46,7 @@
struct tpm_tis_spi_phy {
struct tpm_tis_data priv;
struct spi_device *spi_device;
-
- u8 tx_buf[4];
- u8 rx_buf[4];
+ u8 *iobuf;
};
static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
@@ -57,7 +55,7 @@ static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *da
}
static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *buffer, u8 direction)
+ u8 *in, const u8 *out)
{
struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
int ret = 0;
@@ -71,14 +69,14 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
while (len) {
transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
- phy->tx_buf[0] = direction | (transfer_len - 1);
- phy->tx_buf[1] = 0xd4;
- phy->tx_buf[2] = addr >> 8;
- phy->tx_buf[3] = addr;
+ phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
+ phy->iobuf[1] = 0xd4;
+ phy->iobuf[2] = addr >> 8;
+ phy->iobuf[3] = addr;
memset(&spi_xfer, 0, sizeof(spi_xfer));
- spi_xfer.tx_buf = phy->tx_buf;
- spi_xfer.rx_buf = phy->rx_buf;
+ spi_xfer.tx_buf = phy->iobuf;
+ spi_xfer.rx_buf = phy->iobuf;
spi_xfer.len = 4;
spi_xfer.cs_change = 1;
@@ -88,9 +86,9 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
if (ret < 0)
goto exit;
- if ((phy->rx_buf[3] & 0x01) == 0) {
+ if ((phy->iobuf[3] & 0x01) == 0) {
// handle SPI wait states
- phy->tx_buf[0] = 0;
+ phy->iobuf[0] = 0;
for (i = 0; i < TPM_RETRY; i++) {
spi_xfer.len = 1;
@@ -99,7 +97,7 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
ret = spi_sync_locked(phy->spi_device, &m);
if (ret < 0)
goto exit;
- if (phy->rx_buf[0] & 0x01)
+ if (phy->iobuf[0] & 0x01)
break;
}
@@ -113,12 +111,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
spi_xfer.len = transfer_len;
spi_xfer.delay_usecs = 5;
- if (direction) {
+ if (in) {
spi_xfer.tx_buf = NULL;
- spi_xfer.rx_buf = buffer;
- } else {
- spi_xfer.tx_buf = buffer;
+ } else if (out) {
spi_xfer.rx_buf = NULL;
+ memcpy(phy->iobuf, out, transfer_len);
+ out += transfer_len;
}
spi_message_init(&m);
@@ -127,8 +125,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
if (ret < 0)
goto exit;
+ if (in) {
+ memcpy(in, phy->iobuf, transfer_len);
+ in += transfer_len;
+ }
+
len -= transfer_len;
- buffer += transfer_len;
}
exit:
@@ -139,40 +141,51 @@ exit:
static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
u16 len, u8 *result)
{
- return tpm_tis_spi_transfer(data, addr, len, result, 0x80);
+ return tpm_tis_spi_transfer(data, addr, len, result, NULL);
}
static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
- u16 len, u8 *value)
+ u16 len, const u8 *value)
{
- return tpm_tis_spi_transfer(data, addr, len, value, 0);
+ return tpm_tis_spi_transfer(data, addr, len, NULL, value);
}
static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
{
+ __le16 result_le;
int rc;
- rc = data->phy_ops->read_bytes(data, addr, sizeof(u16), (u8 *)result);
+ rc = data->phy_ops->read_bytes(data, addr, sizeof(u16),
+ (u8 *)&result_le);
if (!rc)
- *result = le16_to_cpu(*result);
+ *result = le16_to_cpu(result_le);
+
return rc;
}
static int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
{
+ __le32 result_le;
int rc;
- rc = data->phy_ops->read_bytes(data, addr, sizeof(u32), (u8 *)result);
+ rc = data->phy_ops->read_bytes(data, addr, sizeof(u32),
+ (u8 *)&result_le);
if (!rc)
- *result = le32_to_cpu(*result);
+ *result = le32_to_cpu(result_le);
+
return rc;
}
static int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
{
- value = cpu_to_le32(value);
- return data->phy_ops->write_bytes(data, addr, sizeof(u32),
- (u8 *)&value);
+ __le32 value_le;
+ int rc;
+
+ value_le = cpu_to_le32(value);
+ rc = data->phy_ops->write_bytes(data, addr, sizeof(u32),
+ (u8 *)&value_le);
+
+ return rc;
}
static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
@@ -194,6 +207,10 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
phy->spi_device = dev;
+ phy->iobuf = devm_kmalloc(&dev->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
+ if (!phy->iobuf)
+ return -ENOMEM;
+
return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
NULL);
}
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 39e489a96ad7..60da2537bef9 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -71,7 +71,7 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id)
if (readl_relaxed(timer->control) & timer->match_mask) {
writel_relaxed(timer->match_mask, timer->control);
- event_handler = ACCESS_ONCE(timer->evt.event_handler);
+ event_handler = READ_ONCE(timer->evt.event_handler);
if (event_handler)
event_handler(&timer->evt);
return IRQ_HANDLED;
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index d258953ff488..f4f258075b89 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -172,7 +172,7 @@ static void caam_jr_dequeue(unsigned long devarg)
while (rd_reg32(&jrp->rregs->outring_used)) {
- head = ACCESS_ONCE(jrp->head);
+ head = READ_ONCE(jrp->head);
spin_lock(&jrp->outlock);
@@ -341,7 +341,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
spin_lock_bh(&jrp->inplock);
head = jrp->head;
- tail = ACCESS_ONCE(jrp->tail);
+ tail = READ_ONCE(jrp->tail);
if (!rd_reg32(&jrp->rregs->inpring_avail) ||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 874ddf5e9087..0f20f5ec9617 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -193,7 +193,7 @@ static int wait_for_csb(struct nx842_workmem *wmem,
ktime_t start = wmem->start, now = ktime_get();
ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX);
- while (!(ACCESS_ONCE(csb->flags) & CSB_V)) {
+ while (!(READ_ONCE(csb->flags) & CSB_V)) {
cpu_relax();
now = ktime_get();
if (ktime_after(now, timeout))
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 346c4987b284..11d6419788c2 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -175,11 +175,11 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file *file,
/*
* To trigger the error, we need to read the data back
* (the data was written with errors above).
- * The ACCESS_ONCE macros and printk are used to prevent the
+ * The READ_ONCE macros and printk are used to prevent the
* the compiler optimizing these reads out.
*/
- reg = ACCESS_ONCE(ptemp[0]);
- read_reg = ACCESS_ONCE(ptemp[1]);
+ reg = READ_ONCE(ptemp[0]);
+ read_reg = READ_ONCE(ptemp[1]);
/* Force Read */
rmb();
@@ -618,7 +618,7 @@ static ssize_t altr_edac_device_trig(struct file *file,
for (i = 0; i < (priv->trig_alloc_sz / sizeof(*ptemp)); i++) {
/* Read data so we're in the correct state */
rmb();
- if (ACCESS_ONCE(ptemp[i]))
+ if (READ_ONCE(ptemp[i]))
result = -1;
/* Toggle Error bit (it is latched), leave ECC enabled */
writel(error_mask, (drvdata->base + priv->set_err_ofst));
@@ -635,7 +635,7 @@ static ssize_t altr_edac_device_trig(struct file *file,
/* Read out written data. ECC error caused here */
for (i = 0; i < ALTR_TRIGGER_READ_WRD_CNT; i++)
- if (ACCESS_ONCE(ptemp[i]) != i)
+ if (READ_ONCE(ptemp[i]) != i)
edac_printk(KERN_ERR, EDAC_DEVICE,
"Read doesn't match written data\n");
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index ac2f30295efe..8b16ec595fa7 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3434,9 +3434,14 @@ MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
static int __init amd64_edac_init(void)
{
+ const char *owner;
int err = -ENODEV;
int i;
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
if (!x86_match_cpu(amd64_cpuids))
return -ENODEV;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 480072139b7a..48193f5f3b56 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -53,7 +53,7 @@ static LIST_HEAD(mc_devices);
* Used to lock EDAC MC to just one module, avoiding two drivers e. g.
* apei/ghes and i7core_edac to be used at the same time.
*/
-static void const *edac_mc_owner;
+static const char *edac_mc_owner;
static struct bus_type mc_bus[EDAC_MAX_MCS];
@@ -701,6 +701,11 @@ unlock:
}
EXPORT_SYMBOL(edac_mc_find);
+const char *edac_get_owner(void)
+{
+ return edac_mc_owner;
+}
+EXPORT_SYMBOL_GPL(edac_get_owner);
/* FIXME - should a warning be printed if no error detection? correction? */
int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
index 5357800e418d..4165e15995ad 100644
--- a/drivers/edac/edac_mc.h
+++ b/drivers/edac/edac_mc.h
@@ -128,6 +128,14 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
unsigned sz_pvt);
/**
+ * edac_get_owner - Return the owner's mod_name of EDAC MC
+ *
+ * Returns:
+ * Pointer to mod_name string when EDAC MC is owned. NULL otherwise.
+ */
+extern const char *edac_get_owner(void);
+
+/*
* edac_mc_add_mc_with_groups() - Insert the @mci structure into the mci
* global list and create sysfs entries associated with @mci structure.
*
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 6f80eb65c26c..68b6ee18bea6 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -28,10 +28,19 @@ struct ghes_edac_pvt {
char msg[80];
};
-static LIST_HEAD(ghes_reglist);
-static DEFINE_MUTEX(ghes_edac_lock);
-static int ghes_edac_mc_num;
+static atomic_t ghes_init = ATOMIC_INIT(0);
+static struct ghes_edac_pvt *ghes_pvt;
+/*
+ * Sync with other, potentially concurrent callers of
+ * ghes_edac_report_mem_error(). We don't know what the
+ * "inventive" firmware would do.
+ */
+static DEFINE_SPINLOCK(ghes_lock);
+
+/* "ghes_edac.force_load=1" skips the platform check */
+static bool __read_mostly force_load;
+module_param(force_load, bool, 0);
/* Memory Device - Type 17 of SMBIOS spec */
struct memdev_dmi_entry {
@@ -169,18 +178,26 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
enum hw_event_mc_err_type type;
struct edac_raw_error_desc *e;
struct mem_ctl_info *mci;
- struct ghes_edac_pvt *pvt = NULL;
+ struct ghes_edac_pvt *pvt = ghes_pvt;
+ unsigned long flags;
char *p;
u8 grain_bits;
- list_for_each_entry(pvt, &ghes_reglist, list) {
- if (ghes == pvt->ghes)
- break;
- }
if (!pvt) {
pr_err("Internal error: Can't find EDAC structure\n");
return;
}
+
+ /*
+ * We can do the locking below because GHES defers error processing
+ * from NMI to IRQ context. Whenever that changes, we'd at least
+ * know.
+ */
+ if (WARN_ON_ONCE(in_nmi()))
+ return;
+
+ spin_lock_irqsave(&ghes_lock, flags);
+
mci = pvt->mci;
e = &mci->error_desc;
@@ -398,10 +415,17 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev,
(e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
grain_bits, e->syndrome, pvt->detail_location);
- /* Report the error via EDAC API */
edac_raw_mc_handle_error(type, mci, e);
+ spin_unlock_irqrestore(&ghes_lock, flags);
}
-EXPORT_SYMBOL_GPL(ghes_edac_report_mem_error);
+
+/*
+ * Known systems that are safe to enable this module.
+ */
+static struct acpi_platform_list plat_list[] = {
+ {"HPE ", "Server ", 0, ACPI_SIG_FADT, all_versions},
+ { } /* End */
+};
int ghes_edac_register(struct ghes *ghes, struct device *dev)
{
@@ -409,8 +433,19 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
int rc, num_dimm = 0;
struct mem_ctl_info *mci;
struct edac_mc_layer layers[1];
- struct ghes_edac_pvt *pvt;
struct ghes_edac_dimm_fill dimm_fill;
+ int idx;
+
+ /* Check if safe to enable on this system */
+ idx = acpi_match_platform_list(plat_list);
+ if (!force_load && idx < 0)
+ return 0;
+
+ /*
+ * We have only one logical memory controller to which all DIMMs belong.
+ */
+ if (atomic_inc_return(&ghes_init) > 1)
+ return 0;
/* Get the number of DIMMs */
dmi_walk(ghes_edac_count_dimms, &num_dimm);
@@ -425,26 +460,17 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
layers[0].size = num_dimm;
layers[0].is_virt_csrow = true;
- /*
- * We need to serialize edac_mc_alloc() and edac_mc_add_mc(),
- * to avoid duplicated memory controller numbers
- */
- mutex_lock(&ghes_edac_lock);
- mci = edac_mc_alloc(ghes_edac_mc_num, ARRAY_SIZE(layers), layers,
- sizeof(*pvt));
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_edac_pvt));
if (!mci) {
pr_info("Can't allocate memory for EDAC data\n");
- mutex_unlock(&ghes_edac_lock);
return -ENOMEM;
}
- pvt = mci->pvt_info;
- memset(pvt, 0, sizeof(*pvt));
- list_add_tail(&pvt->list, &ghes_reglist);
- pvt->ghes = ghes;
- pvt->mci = mci;
- mci->pdev = dev;
+ ghes_pvt = mci->pvt_info;
+ ghes_pvt->ghes = ghes;
+ ghes_pvt->mci = mci;
+ mci->pdev = dev;
mci->mtype_cap = MEM_FLAG_EMPTY;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
@@ -452,36 +478,23 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
mci->ctl_name = "ghes_edac";
mci->dev_name = "ghes";
- if (!ghes_edac_mc_num) {
- if (!fake) {
- pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n");
- pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n");
- pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
- pr_info("If you find incorrect reports, please contact your hardware vendor\n");
- pr_info("to correct its BIOS.\n");
- pr_info("This system has %d DIMM sockets.\n",
- num_dimm);
- } else {
- pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
- pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
- pr_info("work on such system. Use this driver with caution\n");
- }
+ if (fake) {
+ pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n");
+ pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n");
+ pr_info("work on such system. Use this driver with caution\n");
+ } else if (idx < 0) {
+ pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n");
+ pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n");
+ pr_info("So, the end result of using this driver varies from vendor to vendor.\n");
+ pr_info("If you find incorrect reports, please contact your hardware vendor\n");
+ pr_info("to correct its BIOS.\n");
+ pr_info("This system has %d DIMM sockets.\n", num_dimm);
}
if (!fake) {
- /*
- * Fill DIMM info from DMI for the memory controller #0
- *
- * Keep it in blank for the other memory controllers, as
- * there's no reliable way to properly credit each DIMM to
- * the memory controller, as different BIOSes fill the
- * DMI bank location fields on different ways
- */
- if (!ghes_edac_mc_num) {
- dimm_fill.count = 0;
- dimm_fill.mci = mci;
- dmi_walk(ghes_edac_dmidecode, &dimm_fill);
- }
+ dimm_fill.count = 0;
+ dimm_fill.mci = mci;
+ dmi_walk(ghes_edac_dmidecode, &dimm_fill);
} else {
struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
mci->n_layers, 0, 0, 0);
@@ -497,28 +510,16 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
if (rc < 0) {
pr_info("Can't register at EDAC core\n");
edac_mc_free(mci);
- mutex_unlock(&ghes_edac_lock);
return -ENODEV;
}
-
- ghes_edac_mc_num++;
- mutex_unlock(&ghes_edac_lock);
return 0;
}
-EXPORT_SYMBOL_GPL(ghes_edac_register);
void ghes_edac_unregister(struct ghes *ghes)
{
struct mem_ctl_info *mci;
- struct ghes_edac_pvt *pvt, *tmp;
-
- list_for_each_entry_safe(pvt, tmp, &ghes_reglist, list) {
- if (ghes == pvt->ghes) {
- mci = pvt->mci;
- edac_mc_del_mc(mci->pdev);
- edac_mc_free(mci);
- list_del(&pvt->list);
- }
- }
+
+ mci = ghes_pvt->mci;
+ edac_mc_del_mc(mci->pdev);
+ edac_mc_free(mci);
}
-EXPORT_SYMBOL_GPL(ghes_edac_unregister);
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index c16c3b931b3d..8c5540160a23 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -2159,8 +2159,13 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = "i7core_edac.c";
- mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
- i7core_dev->socket);
+
+ mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d", i7core_dev->socket);
+ if (!mci->ctl_name) {
+ rc = -ENOMEM;
+ goto fail1;
+ }
+
mci->dev_name = pci_name(i7core_dev->pdev[0]);
mci->ctl_page_to_phys = NULL;
@@ -2214,6 +2219,8 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
fail0:
kfree(mci->ctl_name);
+
+fail1:
edac_mc_free(mci);
i7core_dev->mci = NULL;
return rc;
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index 4395c84cdcbf..df28b65358d2 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -45,6 +45,8 @@
#include "edac_module.h"
#include "pnd2_edac.h"
+#define EDAC_MOD_STR "pnd2_edac"
+
#define APL_NUM_CHANNELS 4
#define DNV_NUM_CHANNELS 2
#define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
@@ -1355,7 +1357,7 @@ static int pnd2_register_mci(struct mem_ctl_info **ppmci)
pvt = mci->pvt_info;
memset(pvt, 0, sizeof(*pvt));
- mci->mod_name = "pnd2_edac.c";
+ mci->mod_name = EDAC_MOD_STR;
mci->dev_name = ops->name;
mci->ctl_name = "Pondicherry2";
@@ -1547,10 +1549,15 @@ MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
static int __init pnd2_init(void)
{
const struct x86_cpu_id *id;
+ const char *owner;
int rc;
edac_dbg(2, "\n");
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
id = x86_match_cpu(pnd2_cpuids);
if (!id)
return -ENODEV;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index dc0591654011..f34430f99fd8 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -36,7 +36,7 @@ static LIST_HEAD(sbridge_edac_list);
* Alter this version for the module when modifications are made
*/
#define SBRIDGE_REVISION " Ver: 1.1.2 "
-#define EDAC_MOD_STR "sbridge_edac"
+#define EDAC_MOD_STR "sb_edac"
/*
* Debug macros
@@ -462,6 +462,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
static const struct pci_id_descr pci_dev_descr_ibridge[] = {
/* Processor Home Agent */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) },
/* Memory controller */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0, IMC0) },
@@ -472,7 +473,6 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = {
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0, IMC0) },
/* Optional, mode 2HA */
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1, IMC1) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1, IMC1) },
@@ -1318,9 +1318,7 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
int cur_reg_start;
int mc;
int channel;
- int way;
int participants[KNL_MAX_CHANNELS];
- int participant_count = 0;
for (i = 0; i < KNL_MAX_CHANNELS; i++)
mc_sizes[i] = 0;
@@ -1495,21 +1493,14 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
* this channel mapped to the given target?
*/
for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
- for (way = 0; way < intrlv_ways; way++) {
- int target;
- int cha;
-
- if (KNL_MOD3(dram_rule))
- target = way;
- else
- target = 0x7 & sad_pkg(
- pvt->info.interleave_pkg, interleave_reg, way);
+ int target;
+ int cha;
+ for (target = 0; target < KNL_MAX_CHANNELS; target++) {
for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
if (knl_get_mc_route(target,
mc_route_reg[cha]) == channel
&& !participants[channel]) {
- participant_count++;
participants[channel] = 1;
break;
}
@@ -1517,10 +1508,6 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
}
}
- if (participant_count != intrlv_ways)
- edac_dbg(0, "participant_count (%d) != interleave_ways (%d): DIMM size may be incorrect\n",
- participant_count, intrlv_ways);
-
for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
mc = knl_channel_mc(channel);
if (participants[channel]) {
@@ -2291,6 +2278,13 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
next_imc:
sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev);
if (!sbridge_dev) {
+ /* If the HA1 wasn't found, don't create EDAC second memory controller */
+ if (dev_descr->dom == IMC1 && devno != 1) {
+ edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n",
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+ pci_dev_put(pdev);
+ return 0;
+ }
if (dev_descr->dom == SOCK)
goto out_imc;
@@ -2491,6 +2485,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
pvt->pci_ta = pdev;
+ break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
pvt->pci_ras = pdev;
@@ -3155,7 +3150,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
- mci->mod_name = "sb_edac.c";
+ mci->mod_name = EDAC_MOD_STR;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
@@ -3287,6 +3282,11 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
break;
}
+ if (!mci->ctl_name) {
+ rc = -ENOMEM;
+ goto fail0;
+ }
+
/* Get dimm basic config and the memory layout */
rc = get_dimm_config(mci);
if (rc < 0) {
@@ -3402,10 +3402,15 @@ static void sbridge_remove(void)
static int __init sbridge_init(void)
{
const struct x86_cpu_id *id;
+ const char *owner;
int rc;
edac_dbg(2, "\n");
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
id = x86_match_cpu(sbridge_cpuids);
if (!id)
return -ENODEV;
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
index 16dea97568a1..912c4930c9ef 100644
--- a/drivers/edac/skx_edac.c
+++ b/drivers/edac/skx_edac.c
@@ -31,6 +31,8 @@
#include "edac_module.h"
+#define EDAC_MOD_STR "skx_edac"
+
/*
* Debug macros
*/
@@ -65,6 +67,7 @@ static u64 skx_tolm, skx_tohm;
struct skx_dev {
struct list_head list;
u8 bus[4];
+ int seg;
struct pci_dev *sad_all;
struct pci_dev *util_all;
u32 mcroute;
@@ -110,12 +113,12 @@ struct decoded_addr {
int bank_group;
};
-static struct skx_dev *get_skx_dev(u8 bus, u8 idx)
+static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx)
{
struct skx_dev *d;
list_for_each_entry(d, &skx_edac_list, list) {
- if (d->bus[idx] == bus)
+ if (d->seg == pci_domain_nr(bus) && d->bus[idx] == bus->number)
return d;
}
@@ -172,6 +175,7 @@ static int get_all_bus_mappings(void)
pci_dev_put(pdev);
return -ENOMEM;
}
+ d->seg = pci_domain_nr(pdev->bus);
pci_read_config_dword(pdev, 0xCC, &reg);
d->bus[0] = GET_BITFIELD(reg, 0, 7);
d->bus[1] = GET_BITFIELD(reg, 8, 15);
@@ -207,7 +211,7 @@ static int get_all_munits(const struct munit *m)
if (i == NUM_IMC)
goto fail;
}
- d = get_skx_dev(pdev->bus->number, m->busidx);
+ d = get_skx_dev(pdev->bus, m->busidx);
if (!d)
goto fail;
@@ -299,7 +303,7 @@ static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval,
#define IS_DIMM_PRESENT(mtr) GET_BITFIELD((mtr), 15, 15)
-#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 1, 2, "ranks")
+#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 0, 2, "ranks")
#define numrow(reg) get_dimm_attr((reg), 2, 4, 12, 1, 6, "rows")
#define numcol(reg) get_dimm_attr((reg), 0, 1, 10, 0, 2, "cols")
@@ -360,7 +364,7 @@ static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
imc->mc, chan, dimmno, size, npages,
- banks, ranks, rows, cols);
+ banks, 1 << ranks, rows, cols);
imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0);
imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9);
@@ -464,12 +468,16 @@ static int skx_register_mci(struct skx_imc *imc)
pvt = mci->pvt_info;
pvt->imc = imc;
- mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d",
- imc->node_id, imc->lmc);
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d", imc->node_id, imc->lmc);
+ if (!mci->ctl_name) {
+ rc = -ENOMEM;
+ goto fail0;
+ }
+
mci->mtype_cap = MEM_FLAG_DDR4;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
- mci->mod_name = "skx_edac.c";
+ mci->mod_name = EDAC_MOD_STR;
mci->dev_name = pci_name(imc->chan[0].cdev);
mci->ctl_page_to_phys = NULL;
@@ -491,6 +499,7 @@ static int skx_register_mci(struct skx_imc *imc)
fail:
kfree(mci->ctl_name);
+fail0:
edac_mc_free(mci);
imc->mci = NULL;
return rc;
@@ -1039,12 +1048,17 @@ static int __init skx_init(void)
{
const struct x86_cpu_id *id;
const struct munit *m;
+ const char *owner;
int rc = 0, i;
u8 mc = 0, src_id, node_id;
struct skx_dev *d;
edac_dbg(2, "\n");
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
id = x86_match_cpu(skx_cpuids);
if (!id)
return -ENODEV;
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index f35d87519a3e..4803c6468bab 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -639,27 +639,6 @@ err_free:
return ret;
}
-#ifdef CONFIG_PM
-static int thunderx_lmc_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- pci_save_state(pdev);
- pci_disable_device(pdev);
-
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
- return 0;
-}
-
-static int thunderx_lmc_resume(struct pci_dev *pdev)
-{
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_wake(pdev, PCI_D0, 0);
- pci_restore_state(pdev);
-
- return 0;
-}
-#endif
-
static const struct pci_device_id thunderx_lmc_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_LMC) },
{ 0, },
@@ -834,10 +813,6 @@ static struct pci_driver thunderx_lmc_driver = {
.name = "thunderx_lmc_edac",
.probe = thunderx_lmc_probe,
.remove = thunderx_lmc_remove,
-#ifdef CONFIG_PM
- .suspend = thunderx_lmc_suspend,
- .resume = thunderx_lmc_resume,
-#endif
.id_table = thunderx_lmc_pci_tbl,
};
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 8bf89267dc25..ccf52368a073 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -734,7 +734,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
__le16 res_count, next_res_count;
i = ar_first_buffer_index(ctx);
- res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
+ res_count = READ_ONCE(ctx->descriptors[i].res_count);
/* A buffer that is not yet completely filled must be the last one. */
while (i != last && res_count == 0) {
@@ -742,8 +742,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
/* Peek at the next descriptor. */
next_i = ar_next_buffer_index(i);
rmb(); /* read descriptors in order */
- next_res_count = ACCESS_ONCE(
- ctx->descriptors[next_i].res_count);
+ next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
/*
* If the next descriptor is still empty, we must stop at this
* descriptor.
@@ -759,8 +758,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
next_i = ar_next_buffer_index(next_i);
rmb();
- next_res_count = ACCESS_ONCE(
- ctx->descriptors[next_i].res_count);
+ next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
if (next_res_count != cpu_to_le16(PAGE_SIZE))
goto next_buffer_is_active;
}
@@ -2812,7 +2810,7 @@ static int handle_ir_buffer_fill(struct context *context,
u32 buffer_dma;
req_count = le16_to_cpu(last->req_count);
- res_count = le16_to_cpu(ACCESS_ONCE(last->res_count));
+ res_count = le16_to_cpu(READ_ONCE(last->res_count));
completed = req_count - res_count;
buffer_dma = le32_to_cpu(last->data_address);
diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c
index a01461d63f68..00de793e6423 100644
--- a/drivers/firmware/tegra/ivc.c
+++ b/drivers/firmware/tegra/ivc.c
@@ -99,11 +99,11 @@ static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
{
/*
* This function performs multiple checks on the same values with
- * security implications, so create snapshots with ACCESS_ONCE() to
+ * security implications, so create snapshots with READ_ONCE() to
* ensure that these checks use the same values.
*/
- u32 tx = ACCESS_ONCE(header->tx.count);
- u32 rx = ACCESS_ONCE(header->rx.count);
+ u32 tx = READ_ONCE(header->tx.count);
+ u32 rx = READ_ONCE(header->rx.count);
/*
* Perform an over-full check to prevent denial of service attacks
@@ -124,8 +124,8 @@ static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
struct tegra_ivc_header *header)
{
- u32 tx = ACCESS_ONCE(header->tx.count);
- u32 rx = ACCESS_ONCE(header->rx.count);
+ u32 tx = READ_ONCE(header->tx.count);
+ u32 rx = READ_ONCE(header->rx.count);
/*
* Invalid cases where the counters indicate that the queue is over
@@ -137,8 +137,8 @@ static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
struct tegra_ivc_header *header)
{
- u32 tx = ACCESS_ONCE(header->tx.count);
- u32 rx = ACCESS_ONCE(header->rx.count);
+ u32 tx = READ_ONCE(header->tx.count);
+ u32 rx = READ_ONCE(header->rx.count);
/*
* This function isn't expected to be used in scenarios where an
@@ -151,8 +151,8 @@ static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
{
- ACCESS_ONCE(ivc->tx.channel->tx.count) =
- ACCESS_ONCE(ivc->tx.channel->tx.count) + 1;
+ WRITE_ONCE(ivc->tx.channel->tx.count,
+ READ_ONCE(ivc->tx.channel->tx.count) + 1);
if (ivc->tx.position == ivc->num_frames - 1)
ivc->tx.position = 0;
@@ -162,8 +162,8 @@ static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
{
- ACCESS_ONCE(ivc->rx.channel->rx.count) =
- ACCESS_ONCE(ivc->rx.channel->rx.count) + 1;
+ WRITE_ONCE(ivc->rx.channel->rx.count,
+ READ_ONCE(ivc->rx.channel->rx.count) + 1);
if (ivc->rx.position == ivc->num_frames - 1)
ivc->rx.position = 0;
@@ -428,7 +428,7 @@ int tegra_ivc_notified(struct tegra_ivc *ivc)
/* Copy the receiver's state out of shared memory. */
tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
- state = ACCESS_ONCE(ivc->rx.channel->tx.state);
+ state = READ_ONCE(ivc->rx.channel->tx.state);
if (state == TEGRA_IVC_STATE_SYNC) {
offset = offsetof(struct tegra_ivc_header, tx.count);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 333bad749067..303b5e099a98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
*/
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
- uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
+ uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
struct dma_fence *fence, **ptr;
int r;
@@ -300,7 +300,7 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
amdgpu_fence_process(ring);
emitted = 0x100000000ull;
emitted -= atomic_read(&ring->fence_drv.last_seq);
- emitted += ACCESS_ONCE(ring->fence_drv.sync_seq);
+ emitted += READ_ONCE(ring->fence_drv.sync_seq);
return lower_32_bits(emitted);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7171968f261e..6149a47fe63d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -788,11 +788,11 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
seq_printf(m, "\t0x%08x: %12ld byte %s",
id, amdgpu_bo_size(bo), placement);
- offset = ACCESS_ONCE(bo->tbo.mem.start);
+ offset = READ_ONCE(bo->tbo.mem.start);
if (offset != AMDGPU_BO_INVALID_OFFSET)
seq_printf(m, " @ 0x%010Lx", offset);
- pin_count = ACCESS_ONCE(bo->pin_count);
+ pin_count = READ_ONCE(bo->pin_count);
if (pin_count)
seq_printf(m, " pin count %d", pin_count);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 38cea6fb25a8..a25f6c72f219 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -187,7 +187,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
if (kfifo_is_empty(&entity->job_queue))
return false;
- if (ACCESS_ONCE(entity->dependency))
+ if (READ_ONCE(entity->dependency))
return false;
return true;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 4ac454ae54d7..83876a1c8d98 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -2094,6 +2094,11 @@ get_fence_array(struct drm_i915_gem_execbuffer2 *args,
goto err;
}
+ if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
+ err = -EINVAL;
+ goto err;
+ }
+
syncobj = drm_syncobj_find(file, fence.handle);
if (!syncobj) {
DRM_DEBUG("Invalid syncobj handle provided\n");
@@ -2101,6 +2106,9 @@ get_fence_array(struct drm_i915_gem_execbuffer2 *args,
goto err;
}
+ BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
+ ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
+
fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e2410eb5d96e..ad524cb0f6fc 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -832,10 +832,14 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
}
}
-struct sgt_dma {
+static inline struct sgt_dma {
struct scatterlist *sg;
dma_addr_t dma, max;
-};
+} sgt_dma(struct i915_vma *vma) {
+ struct scatterlist *sg = vma->pages->sgl;
+ dma_addr_t addr = sg_dma_address(sg);
+ return (struct sgt_dma) { sg, addr, addr + sg->length };
+}
struct gen8_insert_pte {
u16 pml4e;
@@ -916,11 +920,7 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
u32 unused)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct sgt_dma iter = {
- .sg = vma->pages->sgl,
- .dma = sg_dma_address(iter.sg),
- .max = iter.dma + iter.sg->length,
- };
+ struct sgt_dma iter = sgt_dma(vma);
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
@@ -933,11 +933,7 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
u32 unused)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct sgt_dma iter = {
- .sg = vma->pages->sgl,
- .dma = sg_dma_address(iter.sg),
- .max = iter.dma + iter.sg->length,
- };
+ struct sgt_dma iter = sgt_dma(vma);
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
@@ -1632,13 +1628,10 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES;
const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
- struct sgt_dma iter;
+ struct sgt_dma iter = sgt_dma(vma);
gen6_pte_t *vaddr;
vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
- iter.sg = vma->pages->sgl;
- iter.dma = sg_dma_address(iter.sg);
- iter.max = iter.dma + iter.sg->length;
do {
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 3386452bd2f0..cf3deb283da5 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -451,7 +451,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
else
r = 0;
- cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
+ cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
args->domain = radeon_mem_type_to_domain(cur_placement);
drm_gem_object_put_unlocked(gobj);
return r;
@@ -481,7 +481,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
r = ret;
/* Flush HDP cache via MMIO if necessary */
- cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
+ cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
if (rdev->asic->mmio_hdp_flush &&
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
robj->rdev->asic->mmio_hdp_flush(rdev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e84fee3ec4f3..184340d486c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -721,7 +721,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
* allocation taken by fbdev
*/
if (!(dev_priv->capabilities & SVGA_CAP_3D))
- mem_size *= 2;
+ mem_size *= 3;
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->prim_bb_mem =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 3bbad22b3748..d6b1c509ae01 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -224,7 +224,7 @@ out:
return ret;
}
-static struct dma_fence_ops vmw_fence_ops = {
+static const struct dma_fence_ops vmw_fence_ops = {
.get_driver_name = vmw_fence_get_driver_name,
.get_timeline_name = vmw_fence_get_timeline_name,
.enable_signaling = vmw_fence_enable_signaling,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index a552e4ea5440..6ac094ee8983 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -904,7 +904,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
if (unlikely(drm_is_render_client(file_priv)))
require_exist = true;
- if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
+ if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) {
DRM_ERROR("Locked master refused legacy "
"surface reference.\n");
return -EACCES;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index d65431417b17..7ad017690e3a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -552,6 +552,7 @@ config SENSORS_G762
config SENSORS_GPIO_FAN
tristate "GPIO fan"
+ depends on OF_GPIO
depends on GPIOLIB || COMPILE_TEST
depends on THERMAL || THERMAL=n
help
@@ -862,6 +863,20 @@ tristate "MAX31722 temperature sensor"
This driver can also be built as a module. If so, the module
will be called max31722.
+config SENSORS_MAX6621
+ tristate "Maxim MAX6621 sensor chip"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for MAX6621 sensor chip.
+ MAX6621 is a PECI-to-I2C translator provides an efficient,
+ low-cost solution for PECI-to-SMBus/I2C protocol conversion.
+ It allows reading the temperature from the PECI-compliant
+ host directly from up to four PECI-enabled CPUs.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6621.
+
config SENSORS_MAX6639
tristate "Maxim MAX6639 sensor chip"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 23e195a5a2f3..0fe489fab663 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -118,6 +118,7 @@ obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
obj-$(CONFIG_SENSORS_MAX1668) += max1668.o
obj-$(CONFIG_SENSORS_MAX197) += max197.o
obj-$(CONFIG_SENSORS_MAX31722) += max31722.o
+obj-$(CONFIG_SENSORS_MAX6621) += max6621.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 4875e99b59c9..6d34c05a4f83 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -579,7 +579,6 @@ static ssize_t show_pwm_enable(struct device *dev,
mutex_unlock(&data->update_lock);
val = config | (altbit << 3);
- newval = 0;
if (val == 3 || val >= 10)
newval = 255;
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 69b97d45e3cb..63a95e23ca81 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -7,19 +7,19 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
-#include <linux/delay.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/sysfs.h>
#include <linux/regmap.h>
+#include <linux/sysfs.h>
#include <linux/thermal.h>
/* ASPEED PWM & FAN Tach Register Definition */
@@ -161,7 +161,7 @@
* 11: reserved.
*/
#define M_TACH_MODE 0x02 /* 10b */
-#define M_TACH_UNIT 0x00c0
+#define M_TACH_UNIT 0x0210
#define INIT_FAN_CTRL 0xFF
/* How long we sleep in us while waiting for an RPM result. */
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 9c355b9d31c5..5c9a52599cf6 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -29,21 +29,24 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/hwmon.h>
-#include <linux/gpio.h>
-#include <linux/gpio-fan.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/thermal.h>
+struct gpio_fan_speed {
+ int rpm;
+ int ctrl_val;
+};
+
struct gpio_fan_data {
- struct platform_device *pdev;
+ struct device *dev;
struct device *hwmon_dev;
/* Cooling device if any */
struct thermal_cooling_device *cdev;
struct mutex lock; /* lock GPIOs operations. */
- int num_ctrl;
- unsigned *ctrl;
+ int num_gpios;
+ struct gpio_desc **gpios;
int num_speed;
struct gpio_fan_speed *speed;
int speed_index;
@@ -51,7 +54,7 @@ struct gpio_fan_data {
int resume_speed;
#endif
bool pwm_enable;
- struct gpio_fan_alarm *alarm;
+ struct gpio_desc *alarm_gpio;
struct work_struct alarm_work;
};
@@ -64,8 +67,8 @@ static void fan_alarm_notify(struct work_struct *ws)
struct gpio_fan_data *fan_data =
container_of(ws, struct gpio_fan_data, alarm_work);
- sysfs_notify(&fan_data->pdev->dev.kobj, NULL, "fan1_alarm");
- kobject_uevent(&fan_data->pdev->dev.kobj, KOBJ_CHANGE);
+ sysfs_notify(&fan_data->dev->kobj, NULL, "fan1_alarm");
+ kobject_uevent(&fan_data->dev->kobj, KOBJ_CHANGE);
}
static irqreturn_t fan_alarm_irq_handler(int irq, void *dev_id)
@@ -81,47 +84,30 @@ static ssize_t fan1_alarm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
- struct gpio_fan_alarm *alarm = fan_data->alarm;
- int value = gpio_get_value_cansleep(alarm->gpio);
- if (alarm->active_low)
- value = !value;
-
- return sprintf(buf, "%d\n", value);
+ return sprintf(buf, "%d\n",
+ gpiod_get_value_cansleep(fan_data->alarm_gpio));
}
static DEVICE_ATTR_RO(fan1_alarm);
-static int fan_alarm_init(struct gpio_fan_data *fan_data,
- struct gpio_fan_alarm *alarm)
+static int fan_alarm_init(struct gpio_fan_data *fan_data)
{
- int err;
int alarm_irq;
- struct platform_device *pdev = fan_data->pdev;
-
- fan_data->alarm = alarm;
-
- err = devm_gpio_request(&pdev->dev, alarm->gpio, "GPIO fan alarm");
- if (err)
- return err;
-
- err = gpio_direction_input(alarm->gpio);
- if (err)
- return err;
+ struct device *dev = fan_data->dev;
/*
* If the alarm GPIO don't support interrupts, just leave
* without initializing the fail notification support.
*/
- alarm_irq = gpio_to_irq(alarm->gpio);
- if (alarm_irq < 0)
+ alarm_irq = gpiod_to_irq(fan_data->alarm_gpio);
+ if (alarm_irq <= 0)
return 0;
INIT_WORK(&fan_data->alarm_work, fan_alarm_notify);
irq_set_irq_type(alarm_irq, IRQ_TYPE_EDGE_BOTH);
- err = devm_request_irq(&pdev->dev, alarm_irq, fan_alarm_irq_handler,
- IRQF_SHARED, "GPIO fan alarm", fan_data);
- return err;
+ return devm_request_irq(dev, alarm_irq, fan_alarm_irq_handler,
+ IRQF_SHARED, "GPIO fan alarm", fan_data);
}
/*
@@ -133,8 +119,9 @@ static void __set_fan_ctrl(struct gpio_fan_data *fan_data, int ctrl_val)
{
int i;
- for (i = 0; i < fan_data->num_ctrl; i++)
- gpio_set_value_cansleep(fan_data->ctrl[i], (ctrl_val >> i) & 1);
+ for (i = 0; i < fan_data->num_gpios; i++)
+ gpiod_set_value_cansleep(fan_data->gpios[i],
+ (ctrl_val >> i) & 1);
}
static int __get_fan_ctrl(struct gpio_fan_data *fan_data)
@@ -142,10 +129,10 @@ static int __get_fan_ctrl(struct gpio_fan_data *fan_data)
int i;
int ctrl_val = 0;
- for (i = 0; i < fan_data->num_ctrl; i++) {
+ for (i = 0; i < fan_data->num_gpios; i++) {
int value;
- value = gpio_get_value_cansleep(fan_data->ctrl[i]);
+ value = gpiod_get_value_cansleep(fan_data->gpios[i]);
ctrl_val |= (value << i);
}
return ctrl_val;
@@ -170,7 +157,7 @@ static int get_fan_speed_index(struct gpio_fan_data *fan_data)
if (fan_data->speed[i].ctrl_val == ctrl_val)
return i;
- dev_warn(&fan_data->pdev->dev,
+ dev_warn(fan_data->dev,
"missing speed array entry for GPIO value 0x%x\n", ctrl_val);
return -ENODEV;
@@ -328,9 +315,9 @@ static umode_t gpio_fan_is_visible(struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct gpio_fan_data *data = dev_get_drvdata(dev);
- if (index == 0 && !data->alarm)
+ if (index == 0 && !data->alarm_gpio)
return 0;
- if (index > 0 && !data->ctrl)
+ if (index > 0 && !data->gpios)
return 0;
return attr->mode;
@@ -358,30 +345,25 @@ static const struct attribute_group *gpio_fan_groups[] = {
NULL
};
-static int fan_ctrl_init(struct gpio_fan_data *fan_data,
- struct gpio_fan_platform_data *pdata)
+static int fan_ctrl_init(struct gpio_fan_data *fan_data)
{
- struct platform_device *pdev = fan_data->pdev;
- int num_ctrl = pdata->num_ctrl;
- unsigned *ctrl = pdata->ctrl;
+ int num_gpios = fan_data->num_gpios;
+ struct gpio_desc **gpios = fan_data->gpios;
int i, err;
- for (i = 0; i < num_ctrl; i++) {
- err = devm_gpio_request(&pdev->dev, ctrl[i],
- "GPIO fan control");
- if (err)
- return err;
-
- err = gpio_direction_output(ctrl[i],
- gpio_get_value_cansleep(ctrl[i]));
+ for (i = 0; i < num_gpios; i++) {
+ /*
+ * The GPIO descriptors were retrieved with GPIOD_ASIS so here
+ * we set the GPIO into output mode, carefully preserving the
+ * current value by setting it to whatever it is already set
+ * (no surprise changes in default fan speed).
+ */
+ err = gpiod_direction_output(gpios[i],
+ gpiod_get_value_cansleep(gpios[i]));
if (err)
return err;
}
- fan_data->num_ctrl = num_ctrl;
- fan_data->ctrl = ctrl;
- fan_data->num_speed = pdata->num_speed;
- fan_data->speed = pdata->speed;
fan_data->pwm_enable = true; /* Enable manual fan speed control. */
fan_data->speed_index = get_fan_speed_index(fan_data);
if (fan_data->speed_index < 0)
@@ -432,67 +414,47 @@ static const struct thermal_cooling_device_ops gpio_fan_cool_ops = {
.set_cur_state = gpio_fan_set_cur_state,
};
-#ifdef CONFIG_OF_GPIO
/*
* Translate OpenFirmware node properties into platform_data
*/
-static int gpio_fan_get_of_pdata(struct device *dev,
- struct gpio_fan_platform_data *pdata)
+static int gpio_fan_get_of_data(struct gpio_fan_data *fan_data)
{
- struct device_node *node;
struct gpio_fan_speed *speed;
- unsigned *ctrl;
+ struct device *dev = fan_data->dev;
+ struct device_node *np = dev->of_node;
+ struct gpio_desc **gpios;
unsigned i;
u32 u;
struct property *prop;
const __be32 *p;
- node = dev->of_node;
-
/* Alarm GPIO if one exists */
- if (of_gpio_named_count(node, "alarm-gpios") > 0) {
- struct gpio_fan_alarm *alarm;
- int val;
- enum of_gpio_flags flags;
-
- alarm = devm_kzalloc(dev, sizeof(struct gpio_fan_alarm),
- GFP_KERNEL);
- if (!alarm)
- return -ENOMEM;
-
- val = of_get_named_gpio_flags(node, "alarm-gpios", 0, &flags);
- if (val < 0)
- return val;
- alarm->gpio = val;
- alarm->active_low = flags & OF_GPIO_ACTIVE_LOW;
-
- pdata->alarm = alarm;
- }
+ fan_data->alarm_gpio = devm_gpiod_get_optional(dev, "alarm", GPIOD_IN);
+ if (IS_ERR(fan_data->alarm_gpio))
+ return PTR_ERR(fan_data->alarm_gpio);
/* Fill GPIO pin array */
- pdata->num_ctrl = of_gpio_count(node);
- if (pdata->num_ctrl <= 0) {
- if (pdata->alarm)
+ fan_data->num_gpios = gpiod_count(dev, NULL);
+ if (fan_data->num_gpios <= 0) {
+ if (fan_data->alarm_gpio)
return 0;
dev_err(dev, "DT properties empty / missing");
return -ENODEV;
}
- ctrl = devm_kzalloc(dev, pdata->num_ctrl * sizeof(unsigned),
- GFP_KERNEL);
- if (!ctrl)
+ gpios = devm_kzalloc(dev,
+ fan_data->num_gpios * sizeof(struct gpio_desc *),
+ GFP_KERNEL);
+ if (!gpios)
return -ENOMEM;
- for (i = 0; i < pdata->num_ctrl; i++) {
- int val;
-
- val = of_get_gpio(node, i);
- if (val < 0)
- return val;
- ctrl[i] = val;
+ for (i = 0; i < fan_data->num_gpios; i++) {
+ gpios[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
+ if (IS_ERR(gpios[i]))
+ return PTR_ERR(gpios[i]);
}
- pdata->ctrl = ctrl;
+ fan_data->gpios = gpios;
/* Get number of RPM/ctrl_val pairs in speed map */
- prop = of_find_property(node, "gpio-fan,speed-map", &i);
+ prop = of_find_property(np, "gpio-fan,speed-map", &i);
if (!prop) {
dev_err(dev, "gpio-fan,speed-map DT property missing");
return -ENODEV;
@@ -502,7 +464,7 @@ static int gpio_fan_get_of_pdata(struct device *dev,
dev_err(dev, "gpio-fan,speed-map contains zero/odd number of entries");
return -ENODEV;
}
- pdata->num_speed = i / 2;
+ fan_data->num_speed = i / 2;
/*
* Populate speed map
@@ -510,12 +472,12 @@ static int gpio_fan_get_of_pdata(struct device *dev,
* this needs splitting into pairs to create gpio_fan_speed structs
*/
speed = devm_kzalloc(dev,
- pdata->num_speed * sizeof(struct gpio_fan_speed),
+ fan_data->num_speed * sizeof(struct gpio_fan_speed),
GFP_KERNEL);
if (!speed)
return -ENOMEM;
p = NULL;
- for (i = 0; i < pdata->num_speed; i++) {
+ for (i = 0; i < fan_data->num_speed; i++) {
p = of_prop_next_u32(prop, p, &u);
if (!p)
return -ENODEV;
@@ -525,7 +487,7 @@ static int gpio_fan_get_of_pdata(struct device *dev,
return -ENODEV;
speed[i].ctrl_val = u;
}
- pdata->speed = speed;
+ fan_data->speed = speed;
return 0;
}
@@ -535,76 +497,58 @@ static const struct of_device_id of_gpio_fan_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
-#endif /* CONFIG_OF_GPIO */
static int gpio_fan_probe(struct platform_device *pdev)
{
int err;
struct gpio_fan_data *fan_data;
- struct gpio_fan_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
- fan_data = devm_kzalloc(&pdev->dev, sizeof(struct gpio_fan_data),
+ fan_data = devm_kzalloc(dev, sizeof(struct gpio_fan_data),
GFP_KERNEL);
if (!fan_data)
return -ENOMEM;
-#ifdef CONFIG_OF_GPIO
- if (!pdata) {
- pdata = devm_kzalloc(&pdev->dev,
- sizeof(struct gpio_fan_platform_data),
- GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- err = gpio_fan_get_of_pdata(&pdev->dev, pdata);
- if (err)
- return err;
- }
-#else /* CONFIG_OF_GPIO */
- if (!pdata)
- return -EINVAL;
-#endif /* CONFIG_OF_GPIO */
+ fan_data->dev = dev;
+ err = gpio_fan_get_of_data(fan_data);
+ if (err)
+ return err;
- fan_data->pdev = pdev;
platform_set_drvdata(pdev, fan_data);
mutex_init(&fan_data->lock);
/* Configure alarm GPIO if available. */
- if (pdata->alarm) {
- err = fan_alarm_init(fan_data, pdata->alarm);
+ if (fan_data->alarm_gpio) {
+ err = fan_alarm_init(fan_data);
if (err)
return err;
}
/* Configure control GPIOs if available. */
- if (pdata->ctrl && pdata->num_ctrl > 0) {
- if (!pdata->speed || pdata->num_speed <= 1)
+ if (fan_data->gpios && fan_data->num_gpios > 0) {
+ if (!fan_data->speed || fan_data->num_speed <= 1)
return -EINVAL;
- err = fan_ctrl_init(fan_data, pdata);
+ err = fan_ctrl_init(fan_data);
if (err)
return err;
}
/* Make this driver part of hwmon class. */
fan_data->hwmon_dev =
- devm_hwmon_device_register_with_groups(&pdev->dev,
+ devm_hwmon_device_register_with_groups(dev,
"gpio_fan", fan_data,
gpio_fan_groups);
if (IS_ERR(fan_data->hwmon_dev))
return PTR_ERR(fan_data->hwmon_dev);
-#ifdef CONFIG_OF_GPIO
+
/* Optional cooling device register for Device tree platforms */
- fan_data->cdev = thermal_of_cooling_device_register(pdev->dev.of_node,
+ fan_data->cdev = thermal_of_cooling_device_register(np,
"gpio-fan",
fan_data,
&gpio_fan_cool_ops);
-#else /* CONFIG_OF_GPIO */
- /* Optional cooling device register for non Device tree platforms */
- fan_data->cdev = thermal_cooling_device_register("gpio-fan", fan_data,
- &gpio_fan_cool_ops);
-#endif /* CONFIG_OF_GPIO */
- dev_info(&pdev->dev, "GPIO fan initialized\n");
+ dev_info(dev, "GPIO fan initialized\n");
return 0;
}
@@ -616,7 +560,7 @@ static int gpio_fan_remove(struct platform_device *pdev)
if (!IS_ERR(fan_data->cdev))
thermal_cooling_device_unregister(fan_data->cdev);
- if (fan_data->ctrl)
+ if (fan_data->gpios)
set_fan_speed(fan_data, 0);
return 0;
@@ -632,7 +576,7 @@ static int gpio_fan_suspend(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
- if (fan_data->ctrl) {
+ if (fan_data->gpios) {
fan_data->resume_speed = fan_data->speed_index;
set_fan_speed(fan_data, 0);
}
@@ -644,7 +588,7 @@ static int gpio_fan_resume(struct device *dev)
{
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
- if (fan_data->ctrl)
+ if (fan_data->gpios)
set_fan_speed(fan_data, fan_data->resume_speed);
return 0;
@@ -663,9 +607,7 @@ static struct platform_driver gpio_fan_driver = {
.driver = {
.name = "gpio-fan",
.pm = GPIO_FAN_PM,
-#ifdef CONFIG_OF_GPIO
.of_match_table = of_match_ptr(of_gpio_fan_match),
-#endif
},
};
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index ce3b91f22e30..46a54ed23410 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -36,6 +36,10 @@ MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
/* Provide lock for writing to NB_SMU_IND_ADDR */
static DEFINE_MUTEX(nb_smu_ind_mutex);
+#ifndef PCI_DEVICE_ID_AMD_17H_DF_F3
+#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
+#endif
+
/* CPUID function 0x80000001, ebx */
#define CPUID_PKGTYPE_MASK 0xf0000000
#define CPUID_PKGTYPE_F 0x00000000
@@ -61,31 +65,72 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
*/
#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4
-static void amd_nb_smu_index_read(struct pci_dev *pdev, unsigned int devfn,
- int offset, u32 *val)
+/* F17h M01h Access througn SMN */
+#define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800
+
+struct k10temp_data {
+ struct pci_dev *pdev;
+ void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
+ int temp_offset;
+};
+
+struct tctl_offset {
+ u8 model;
+ char const *id;
+ int offset;
+};
+
+static const struct tctl_offset tctl_offset_table[] = {
+ { 0x17, "AMD Ryzen 7 1600X", 20000 },
+ { 0x17, "AMD Ryzen 7 1700X", 20000 },
+ { 0x17, "AMD Ryzen 7 1800X", 20000 },
+ { 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
+ { 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
+ { 0x17, "AMD Ryzen Threadripper 1950", 10000 },
+ { 0x17, "AMD Ryzen Threadripper 1920", 10000 },
+ { 0x17, "AMD Ryzen Threadripper 1910", 10000 },
+};
+
+static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
+{
+ pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval);
+}
+
+static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn,
+ unsigned int base, int offset, u32 *val)
{
mutex_lock(&nb_smu_ind_mutex);
pci_bus_write_config_dword(pdev->bus, devfn,
- 0xb8, offset);
+ base, offset);
pci_bus_read_config_dword(pdev->bus, devfn,
- 0xbc, val);
+ base + 4, val);
mutex_unlock(&nb_smu_ind_mutex);
}
+static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
+{
+ amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
+ F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval);
+}
+
+static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
+{
+ amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0x60,
+ F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
+}
+
static ssize_t temp1_input_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct k10temp_data *data = dev_get_drvdata(dev);
u32 regval;
- struct pci_dev *pdev = dev_get_drvdata(dev);
-
- if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model == 0x60) {
- amd_nb_smu_index_read(pdev, PCI_DEVFN(0, 0),
- F15H_M60H_REPORTED_TEMP_CTRL_OFFSET,
- &regval);
- } else {
- pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, &regval);
- }
- return sprintf(buf, "%u\n", (regval >> 21) * 125);
+ unsigned int temp;
+
+ data->read_tempreg(data->pdev, &regval);
+ temp = (regval >> 21) * 125;
+ temp -= data->temp_offset;
+
+ return sprintf(buf, "%u\n", temp);
}
static ssize_t temp1_max_show(struct device *dev,
@@ -98,11 +143,12 @@ static ssize_t show_temp_crit(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct k10temp_data *data = dev_get_drvdata(dev);
int show_hyst = attr->index;
u32 regval;
int value;
- pci_read_config_dword(dev_get_drvdata(dev),
+ pci_read_config_dword(data->pdev,
REG_HARDWARE_THERMAL_CONTROL, &regval);
value = ((regval >> 16) & 0x7f) * 500 + 52000;
if (show_hyst)
@@ -119,7 +165,8 @@ static umode_t k10temp_is_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct pci_dev *pdev = dev_get_drvdata(dev);
+ struct k10temp_data *data = dev_get_drvdata(dev);
+ struct pci_dev *pdev = data->pdev;
if (index >= 2) {
u32 reg_caps, reg_htc;
@@ -187,7 +234,9 @@ static int k10temp_probe(struct pci_dev *pdev,
{
int unreliable = has_erratum_319(pdev);
struct device *dev = &pdev->dev;
+ struct k10temp_data *data;
struct device *hwmon_dev;
+ int i;
if (unreliable) {
if (!force) {
@@ -199,7 +248,31 @@ static int k10temp_probe(struct pci_dev *pdev,
"unreliable CPU thermal sensor; check erratum 319\n");
}
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", pdev,
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->pdev = pdev;
+
+ if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
+ boot_cpu_data.x86_model == 0x70))
+ data->read_tempreg = read_tempreg_nb_f15;
+ else if (boot_cpu_data.x86 == 0x17)
+ data->read_tempreg = read_tempreg_nb_f17;
+ else
+ data->read_tempreg = read_tempreg_pci;
+
+ for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
+ const struct tctl_offset *entry = &tctl_offset_table[i];
+
+ if (boot_cpu_data.x86 == entry->model &&
+ strstr(boot_cpu_data.x86_model_id, entry->id)) {
+ data->temp_offset = entry->offset;
+ break;
+ }
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data,
k10temp_groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
@@ -214,6 +287,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};
MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index a18278938494..76d966932941 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -303,10 +303,20 @@ static const struct i2c_device_id max1619_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max1619_id);
+#ifdef CONFIG_OF
+static const struct of_device_id max1619_of_match[] = {
+ { .compatible = "maxim,max1619", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, max1619_of_match);
+#endif
+
static struct i2c_driver max1619_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "max1619",
+ .of_match_table = of_match_ptr(max1619_of_match),
},
.probe = max1619_probe,
.id_table = max1619_id,
diff --git a/drivers/hwmon/max6621.c b/drivers/hwmon/max6621.c
new file mode 100644
index 000000000000..35555f0eefb9
--- /dev/null
+++ b/drivers/hwmon/max6621.c
@@ -0,0 +1,593 @@
+/*
+ * Hardware monitoring driver for Maxim MAX6621
+ *
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Vadim Pasternak <vadimp@mellanox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#define MAX6621_DRV_NAME "max6621"
+#define MAX6621_TEMP_INPUT_REG_NUM 9
+#define MAX6621_TEMP_INPUT_MIN -127000
+#define MAX6621_TEMP_INPUT_MAX 128000
+#define MAX6621_TEMP_ALERT_CHAN_SHIFT 1
+
+#define MAX6621_TEMP_S0D0_REG 0x00
+#define MAX6621_TEMP_S0D1_REG 0x01
+#define MAX6621_TEMP_S1D0_REG 0x02
+#define MAX6621_TEMP_S1D1_REG 0x03
+#define MAX6621_TEMP_S2D0_REG 0x04
+#define MAX6621_TEMP_S2D1_REG 0x05
+#define MAX6621_TEMP_S3D0_REG 0x06
+#define MAX6621_TEMP_S3D1_REG 0x07
+#define MAX6621_TEMP_MAX_REG 0x08
+#define MAX6621_TEMP_MAX_ADDR_REG 0x0a
+#define MAX6621_TEMP_ALERT_CAUSE_REG 0x0b
+#define MAX6621_CONFIG0_REG 0x0c
+#define MAX6621_CONFIG1_REG 0x0d
+#define MAX6621_CONFIG2_REG 0x0e
+#define MAX6621_CONFIG3_REG 0x0f
+#define MAX6621_TEMP_S0_ALERT_REG 0x10
+#define MAX6621_TEMP_S1_ALERT_REG 0x11
+#define MAX6621_TEMP_S2_ALERT_REG 0x12
+#define MAX6621_TEMP_S3_ALERT_REG 0x13
+#define MAX6621_CLEAR_ALERT_REG 0x15
+#define MAX6621_REG_MAX (MAX6621_CLEAR_ALERT_REG + 1)
+#define MAX6621_REG_TEMP_SHIFT 0x06
+
+#define MAX6621_ENABLE_TEMP_ALERTS_BIT 4
+#define MAX6621_ENABLE_I2C_CRC_BIT 5
+#define MAX6621_ENABLE_ALTERNATE_DATA 6
+#define MAX6621_ENABLE_LOCKUP_TO 7
+#define MAX6621_ENABLE_S0D0_BIT 8
+#define MAX6621_ENABLE_S3D1_BIT 15
+#define MAX6621_ENABLE_TEMP_ALL GENMASK(MAX6621_ENABLE_S3D1_BIT, \
+ MAX6621_ENABLE_S0D0_BIT)
+#define MAX6621_POLL_DELAY_MASK 0x5
+#define MAX6621_CONFIG0_INIT (MAX6621_ENABLE_TEMP_ALL | \
+ BIT(MAX6621_ENABLE_LOCKUP_TO) | \
+ BIT(MAX6621_ENABLE_I2C_CRC_BIT) | \
+ MAX6621_POLL_DELAY_MASK)
+#define MAX6621_PECI_BIT_TIME 0x2
+#define MAX6621_PECI_RETRY_NUM 0x3
+#define MAX6621_CONFIG1_INIT ((MAX6621_PECI_BIT_TIME << 8) | \
+ MAX6621_PECI_RETRY_NUM)
+
+/* Error codes */
+#define MAX6621_TRAN_FAILED 0x8100 /*
+ * PECI transaction failed for more
+ * than the configured number of
+ * consecutive retries.
+ */
+#define MAX6621_POOL_DIS 0x8101 /*
+ * Polling disabled for requested
+ * socket/domain.
+ */
+#define MAX6621_POOL_UNCOMPLETE 0x8102 /*
+ * First poll not yet completed for
+ * requested socket/domain (on
+ * startup).
+ */
+#define MAX6621_SD_DIS 0x8103 /*
+ * Read maximum temperature requested,
+ * but no sockets/domains enabled or
+ * all enabled sockets/domains have
+ * errors; or read maximum temperature
+ * address requested, but read maximum
+ * temperature was not called.
+ */
+#define MAX6621_ALERT_DIS 0x8104 /*
+ * Get alert socket/domain requested,
+ * but no alert active.
+ */
+#define MAX6621_PECI_ERR_MIN 0x8000 /* Intel spec PECI error min value. */
+#define MAX6621_PECI_ERR_MAX 0x80ff /* Intel spec PECI error max value. */
+
+static const u32 max6621_temp_regs[] = {
+ MAX6621_TEMP_MAX_REG, MAX6621_TEMP_S0D0_REG, MAX6621_TEMP_S1D0_REG,
+ MAX6621_TEMP_S2D0_REG, MAX6621_TEMP_S3D0_REG, MAX6621_TEMP_S0D1_REG,
+ MAX6621_TEMP_S1D1_REG, MAX6621_TEMP_S2D1_REG, MAX6621_TEMP_S3D1_REG,
+};
+
+static const char *const max6621_temp_labels[] = {
+ "maximum",
+ "socket0_0",
+ "socket1_0",
+ "socket2_0",
+ "socket3_0",
+ "socket0_1",
+ "socket1_1",
+ "socket2_1",
+ "socket3_1",
+};
+
+static const int max6621_temp_alert_chan2reg[] = {
+ MAX6621_TEMP_S0_ALERT_REG,
+ MAX6621_TEMP_S1_ALERT_REG,
+ MAX6621_TEMP_S2_ALERT_REG,
+ MAX6621_TEMP_S3_ALERT_REG,
+};
+
+/**
+ * struct max6621_data - private data:
+ *
+ * @client: I2C client;
+ * @regmap: register map handle;
+ * @input_chan2reg: mapping from channel to register;
+ */
+struct max6621_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ int input_chan2reg[MAX6621_TEMP_INPUT_REG_NUM + 1];
+};
+
+static long max6621_temp_mc2reg(long val)
+{
+ return (val / 1000L) << MAX6621_REG_TEMP_SHIFT;
+}
+
+static umode_t
+max6621_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ /* Skip channels which are not physically conncted. */
+ if (((struct max6621_data *)data)->input_chan2reg[channel] < 0)
+ return 0;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+ case hwmon_temp_crit_alarm:
+ return 0444;
+ case hwmon_temp_offset:
+ case hwmon_temp_crit:
+ return 0644;
+ default:
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int max6621_verify_reg_data(struct device *dev, int regval)
+{
+ if (regval >= MAX6621_PECI_ERR_MIN &&
+ regval <= MAX6621_PECI_ERR_MAX) {
+ dev_dbg(dev, "PECI error code - err 0x%04x.\n",
+ regval);
+
+ return -EIO;
+ }
+
+ switch (regval) {
+ case MAX6621_TRAN_FAILED:
+ dev_dbg(dev, "PECI transaction failed - err 0x%04x.\n",
+ regval);
+ return -EIO;
+ case MAX6621_POOL_DIS:
+ dev_dbg(dev, "Polling disabled - err 0x%04x.\n", regval);
+ return -EOPNOTSUPP;
+ case MAX6621_POOL_UNCOMPLETE:
+ dev_dbg(dev, "First poll not completed on startup - err 0x%04x.\n",
+ regval);
+ return -EIO;
+ case MAX6621_SD_DIS:
+ dev_dbg(dev, "Resource is disabled - err 0x%04x.\n", regval);
+ return -EOPNOTSUPP;
+ case MAX6621_ALERT_DIS:
+ dev_dbg(dev, "No alert active - err 0x%04x.\n", regval);
+ return -EOPNOTSUPP;
+ default:
+ return 0;
+ }
+}
+
+static int
+max6621_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct max6621_data *data = dev_get_drvdata(dev);
+ u32 regval;
+ int reg;
+ s8 temp;
+ int ret;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ reg = data->input_chan2reg[channel];
+ ret = regmap_read(data->regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ ret = max6621_verify_reg_data(dev, regval);
+ if (ret)
+ return ret;
+
+ /*
+ * Bit MAX6621_REG_TEMP_SHIFT represents 1 degree step.
+ * The temperature is given in two's complement and 8
+ * bits is used for the register conversion.
+ */
+ temp = (regval >> MAX6621_REG_TEMP_SHIFT);
+ *val = temp * 1000L;
+
+ break;
+ case hwmon_temp_offset:
+ ret = regmap_read(data->regmap, MAX6621_CONFIG2_REG,
+ &regval);
+ if (ret)
+ return ret;
+
+ ret = max6621_verify_reg_data(dev, regval);
+ if (ret)
+ return ret;
+
+ *val = (regval >> MAX6621_REG_TEMP_SHIFT) *
+ 1000L;
+
+ break;
+ case hwmon_temp_crit:
+ channel -= MAX6621_TEMP_ALERT_CHAN_SHIFT;
+ reg = max6621_temp_alert_chan2reg[channel];
+ ret = regmap_read(data->regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ ret = max6621_verify_reg_data(dev, regval);
+ if (ret)
+ return ret;
+
+ *val = regval * 1000L;
+
+ break;
+ case hwmon_temp_crit_alarm:
+ /*
+ * Set val to zero to recover the case, when reading
+ * MAX6621_TEMP_ALERT_CAUSE_REG results in for example
+ * MAX6621_ALERT_DIS. Reading will return with error,
+ * but in such case alarm should be returned as 0.
+ */
+ *val = 0;
+ ret = regmap_read(data->regmap,
+ MAX6621_TEMP_ALERT_CAUSE_REG,
+ &regval);
+ if (ret)
+ return ret;
+
+ ret = max6621_verify_reg_data(dev, regval);
+ if (ret) {
+ /* Do not report error if alert is disabled. */
+ if (regval == MAX6621_ALERT_DIS)
+ return 0;
+ else
+ return ret;
+ }
+
+ /*
+ * Clear the alert automatically, using send-byte
+ * smbus protocol for clearing alert.
+ */
+ if (regval) {
+ ret = i2c_smbus_write_byte(data->client,
+ MAX6621_CLEAR_ALERT_REG);
+ if (ret)
+ return ret;
+ }
+
+ *val = !!regval;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+max6621_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct max6621_data *data = dev_get_drvdata(dev);
+ u32 reg;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_offset:
+ /* Clamp to allowed range to prevent overflow. */
+ val = clamp_val(val, MAX6621_TEMP_INPUT_MIN,
+ MAX6621_TEMP_INPUT_MAX);
+ val = max6621_temp_mc2reg(val);
+
+ return regmap_write(data->regmap,
+ MAX6621_CONFIG2_REG, val);
+ case hwmon_temp_crit:
+ channel -= MAX6621_TEMP_ALERT_CHAN_SHIFT;
+ reg = max6621_temp_alert_chan2reg[channel];
+ /* Clamp to allowed range to prevent overflow. */
+ val = clamp_val(val, MAX6621_TEMP_INPUT_MIN,
+ MAX6621_TEMP_INPUT_MAX);
+ val = val / 1000L;
+
+ return regmap_write(data->regmap, reg, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int
+max6621_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_label:
+ *str = max6621_temp_labels[channel];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static bool max6621_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX6621_CONFIG0_REG:
+ case MAX6621_CONFIG1_REG:
+ case MAX6621_CONFIG2_REG:
+ case MAX6621_CONFIG3_REG:
+ case MAX6621_TEMP_S0_ALERT_REG:
+ case MAX6621_TEMP_S1_ALERT_REG:
+ case MAX6621_TEMP_S2_ALERT_REG:
+ case MAX6621_TEMP_S3_ALERT_REG:
+ case MAX6621_TEMP_ALERT_CAUSE_REG:
+ return true;
+ }
+ return false;
+}
+
+static bool max6621_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX6621_TEMP_S0D0_REG:
+ case MAX6621_TEMP_S0D1_REG:
+ case MAX6621_TEMP_S1D0_REG:
+ case MAX6621_TEMP_S1D1_REG:
+ case MAX6621_TEMP_S2D0_REG:
+ case MAX6621_TEMP_S2D1_REG:
+ case MAX6621_TEMP_S3D0_REG:
+ case MAX6621_TEMP_S3D1_REG:
+ case MAX6621_TEMP_MAX_REG:
+ case MAX6621_TEMP_MAX_ADDR_REG:
+ case MAX6621_CONFIG0_REG:
+ case MAX6621_CONFIG1_REG:
+ case MAX6621_CONFIG2_REG:
+ case MAX6621_CONFIG3_REG:
+ case MAX6621_TEMP_S0_ALERT_REG:
+ case MAX6621_TEMP_S1_ALERT_REG:
+ case MAX6621_TEMP_S2_ALERT_REG:
+ case MAX6621_TEMP_S3_ALERT_REG:
+ return true;
+ }
+ return false;
+}
+
+static bool max6621_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX6621_TEMP_S0D0_REG:
+ case MAX6621_TEMP_S0D1_REG:
+ case MAX6621_TEMP_S1D0_REG:
+ case MAX6621_TEMP_S1D1_REG:
+ case MAX6621_TEMP_S2D0_REG:
+ case MAX6621_TEMP_S2D1_REG:
+ case MAX6621_TEMP_S3D0_REG:
+ case MAX6621_TEMP_S3D1_REG:
+ case MAX6621_TEMP_MAX_REG:
+ case MAX6621_TEMP_S0_ALERT_REG:
+ case MAX6621_TEMP_S1_ALERT_REG:
+ case MAX6621_TEMP_S2_ALERT_REG:
+ case MAX6621_TEMP_S3_ALERT_REG:
+ case MAX6621_TEMP_ALERT_CAUSE_REG:
+ return true;
+ }
+ return false;
+}
+
+static const struct reg_default max6621_regmap_default[] = {
+ { MAX6621_CONFIG0_REG, MAX6621_CONFIG0_INIT },
+ { MAX6621_CONFIG1_REG, MAX6621_CONFIG1_INIT },
+};
+
+static const struct regmap_config max6621_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = MAX6621_REG_MAX,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = max6621_writeable_reg,
+ .readable_reg = max6621_readable_reg,
+ .volatile_reg = max6621_volatile_reg,
+ .reg_defaults = max6621_regmap_default,
+ .num_reg_defaults = ARRAY_SIZE(max6621_regmap_default),
+};
+
+static u32 max6621_chip_config[] = {
+ HWMON_C_REGISTER_TZ,
+ 0
+};
+
+static const struct hwmon_channel_info max6621_chip = {
+ .type = hwmon_chip,
+ .config = max6621_chip_config,
+};
+
+static const u32 max6621_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ 0
+};
+
+static const struct hwmon_channel_info max6621_temp = {
+ .type = hwmon_temp,
+ .config = max6621_temp_config,
+};
+
+static const struct hwmon_channel_info *max6621_info[] = {
+ &max6621_chip,
+ &max6621_temp,
+ NULL
+};
+
+static const struct hwmon_ops max6621_hwmon_ops = {
+ .read = max6621_read,
+ .write = max6621_write,
+ .read_string = max6621_read_string,
+ .is_visible = max6621_is_visible,
+};
+
+static const struct hwmon_chip_info max6621_chip_info = {
+ .ops = &max6621_hwmon_ops,
+ .info = max6621_info,
+};
+
+static int max6621_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct max6621_data *data;
+ struct device *hwmon_dev;
+ int i;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->regmap = devm_regmap_init_i2c(client, &max6621_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ i2c_set_clientdata(client, data);
+ data->client = client;
+
+ /* Set CONFIG0 register masking temperature alerts and PEC. */
+ ret = regmap_write(data->regmap, MAX6621_CONFIG0_REG,
+ MAX6621_CONFIG0_INIT);
+ if (ret)
+ return ret;
+
+ /* Set CONFIG1 register for PEC access retry number. */
+ ret = regmap_write(data->regmap, MAX6621_CONFIG1_REG,
+ MAX6621_CONFIG1_INIT);
+ if (ret)
+ return ret;
+
+ /* Sync registers with hardware. */
+ regcache_mark_dirty(data->regmap);
+ ret = regcache_sync(data->regmap);
+ if (ret)
+ return ret;
+
+ /* Verify which temperature input registers are enabled. */
+ for (i = 0; i < MAX6621_TEMP_INPUT_REG_NUM; i++) {
+ ret = i2c_smbus_read_word_data(client, max6621_temp_regs[i]);
+ if (ret < 0)
+ return ret;
+ ret = max6621_verify_reg_data(dev, ret);
+ if (ret) {
+ data->input_chan2reg[i] = -1;
+ continue;
+ }
+
+ data->input_chan2reg[i] = max6621_temp_regs[i];
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &max6621_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id max6621_id[] = {
+ { MAX6621_DRV_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max6621_id);
+
+static const struct of_device_id max6621_of_match[] = {
+ { .compatible = "maxim,max6621" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max6621_of_match);
+
+static struct i2c_driver max6621_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = MAX6621_DRV_NAME,
+ .of_match_table = of_match_ptr(max6621_of_match),
+ },
+ .probe = max6621_probe,
+ .id_table = max6621_id,
+};
+
+module_i2c_driver(max6621_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Driver for Maxim MAX6621");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 40019325b517..08479006c7f9 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -114,6 +114,16 @@ config SENSORS_MAX20751
This driver can also be built as a module. If so, the module will
be called max20751.
+config SENSORS_MAX31785
+ tristate "Maxim MAX31785 and compatibles"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX31785.
+
+ This driver can also be built as a module. If so, the module will
+ be called max31785.
+
config SENSORS_MAX34440
tristate "Maxim MAX34440 and compatibles"
default n
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index e9364420a512..ea0e39518c21 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
+obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c
new file mode 100644
index 000000000000..9313849d5160
--- /dev/null
+++ b/drivers/hwmon/pmbus/max31785.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+enum max31785_regs {
+ MFR_REVISION = 0x9b,
+};
+
+#define MAX31785_NR_PAGES 23
+
+#define MAX31785_FAN_FUNCS \
+ (PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12)
+
+#define MAX31785_TEMP_FUNCS \
+ (PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP)
+
+#define MAX31785_VOUT_FUNCS \
+ (PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT)
+
+static const struct pmbus_driver_info max31785_info = {
+ .pages = MAX31785_NR_PAGES,
+
+ /* RPM */
+ .format[PSC_FAN] = direct,
+ .m[PSC_FAN] = 1,
+ .b[PSC_FAN] = 0,
+ .R[PSC_FAN] = 0,
+ .func[0] = MAX31785_FAN_FUNCS,
+ .func[1] = MAX31785_FAN_FUNCS,
+ .func[2] = MAX31785_FAN_FUNCS,
+ .func[3] = MAX31785_FAN_FUNCS,
+ .func[4] = MAX31785_FAN_FUNCS,
+ .func[5] = MAX31785_FAN_FUNCS,
+
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ .func[6] = MAX31785_TEMP_FUNCS,
+ .func[7] = MAX31785_TEMP_FUNCS,
+ .func[8] = MAX31785_TEMP_FUNCS,
+ .func[9] = MAX31785_TEMP_FUNCS,
+ .func[10] = MAX31785_TEMP_FUNCS,
+ .func[11] = MAX31785_TEMP_FUNCS,
+ .func[12] = MAX31785_TEMP_FUNCS,
+ .func[13] = MAX31785_TEMP_FUNCS,
+ .func[14] = MAX31785_TEMP_FUNCS,
+ .func[15] = MAX31785_TEMP_FUNCS,
+ .func[16] = MAX31785_TEMP_FUNCS,
+
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 0,
+ .func[17] = MAX31785_VOUT_FUNCS,
+ .func[18] = MAX31785_VOUT_FUNCS,
+ .func[19] = MAX31785_VOUT_FUNCS,
+ .func[20] = MAX31785_VOUT_FUNCS,
+ .func[21] = MAX31785_VOUT_FUNCS,
+ .func[22] = MAX31785_VOUT_FUNCS,
+};
+
+static int max31785_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct pmbus_driver_info *info;
+ s64 ret;
+
+ info = devm_kzalloc(dev, sizeof(struct pmbus_driver_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ *info = max31785_info;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 255);
+ if (ret < 0)
+ return ret;
+
+ return pmbus_do_probe(client, id, info);
+}
+
+static const struct i2c_device_id max31785_id[] = {
+ { "max31785", 0 },
+ { "max31785a", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, max31785_id);
+
+static struct i2c_driver max31785_driver = {
+ .driver = {
+ .name = "max31785",
+ },
+ .probe = max31785_probe,
+ .remove = pmbus_do_remove,
+ .id_table = max31785_id,
+};
+
+module_i2c_driver(max31785_driver);
+
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_DESCRIPTION("PMBus driver for the Maxim MAX31785");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 4efa2bd4f6d8..fa613bd209e3 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -404,9 +404,9 @@ extern const struct regulator_ops pmbus_regulator_ops;
/* Function declarations */
void pmbus_clear_cache(struct i2c_client *client);
-int pmbus_set_page(struct i2c_client *client, u8 page);
-int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
-int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word);
+int pmbus_set_page(struct i2c_client *client, int page);
+int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg);
+int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, u16 word);
int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg);
int pmbus_write_byte(struct i2c_client *client, int page, u8 value);
int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg,
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 302f0aef59de..52a58b8b6e1b 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -136,13 +136,13 @@ void pmbus_clear_cache(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(pmbus_clear_cache);
-int pmbus_set_page(struct i2c_client *client, u8 page)
+int pmbus_set_page(struct i2c_client *client, int page)
{
struct pmbus_data *data = i2c_get_clientdata(client);
int rv = 0;
int newpage;
- if (page != data->currpage) {
+ if (page >= 0 && page != data->currpage) {
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
if (newpage != page)
@@ -158,11 +158,9 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value)
{
int rv;
- if (page >= 0) {
- rv = pmbus_set_page(client, page);
- if (rv < 0)
- return rv;
- }
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
return i2c_smbus_write_byte(client, value);
}
@@ -186,7 +184,8 @@ static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value)
return pmbus_write_byte(client, page, value);
}
-int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word)
+int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
+ u16 word)
{
int rv;
@@ -219,7 +218,7 @@ static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg,
return pmbus_write_word_data(client, page, reg, word);
}
-int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg)
+int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg)
{
int rv;
@@ -255,11 +254,9 @@ int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg)
{
int rv;
- if (page >= 0) {
- rv = pmbus_set_page(client, page);
- if (rv < 0)
- return rv;
- }
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
return i2c_smbus_read_byte_data(client, reg);
}
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index e4d642b673c6..25d28343ba93 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -18,13 +18,11 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/mutex.h>
-#include <linux/platform_data/sht15.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/delay.h>
@@ -34,7 +32,8 @@
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/bitrev.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
/* Commands */
#define SHT15_MEASURE_TEMP 0x03
@@ -122,7 +121,8 @@ static const u8 sht15_crc8_table[] = {
/**
* struct sht15_data - device instance specific data
- * @pdata: platform data (gpio's etc).
+ * @sck: clock GPIO line
+ * @data: data GPIO line
* @read_work: bh of interrupt handler.
* @wait_queue: wait queue for getting values from device.
* @val_temp: last temperature value read from device.
@@ -150,7 +150,8 @@ static const u8 sht15_crc8_table[] = {
* @interrupt_handled: flag used to indicate a handler has been scheduled.
*/
struct sht15_data {
- struct sht15_platform_data *pdata;
+ struct gpio_desc *sck;
+ struct gpio_desc *data;
struct work_struct read_work;
wait_queue_head_t wait_queue;
uint16_t val_temp;
@@ -205,16 +206,16 @@ static int sht15_connection_reset(struct sht15_data *data)
{
int i, err;
- err = gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpiod_direction_output(data->data, 1);
if (err)
return err;
ndelay(SHT15_TSCKL);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
for (i = 0; i < 9; ++i) {
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
}
return 0;
@@ -227,11 +228,11 @@ static int sht15_connection_reset(struct sht15_data *data)
*/
static inline void sht15_send_bit(struct sht15_data *data, int val)
{
- gpio_set_value(data->pdata->gpio_data, val);
+ gpiod_set_value(data->data, val);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL); /* clock low time */
}
@@ -248,23 +249,23 @@ static int sht15_transmission_start(struct sht15_data *data)
int err;
/* ensure data is high and output */
- err = gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpiod_direction_output(data->data, 1);
if (err)
return err;
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_data, 0);
+ gpiod_set_value(data->data, 0);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_data, 1);
+ gpiod_set_value(data->data, 1);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
return 0;
}
@@ -292,20 +293,20 @@ static int sht15_wait_for_response(struct sht15_data *data)
{
int err;
- err = gpio_direction_input(data->pdata->gpio_data);
+ err = gpiod_direction_input(data->data);
if (err)
return err;
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- if (gpio_get_value(data->pdata->gpio_data)) {
- gpio_set_value(data->pdata->gpio_sck, 0);
+ if (gpiod_get_value(data->data)) {
+ gpiod_set_value(data->sck, 0);
dev_err(data->dev, "Command not acknowledged\n");
err = sht15_connection_reset(data);
if (err)
return err;
return -EIO;
}
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
return 0;
}
@@ -360,17 +361,17 @@ static int sht15_ack(struct sht15_data *data)
{
int err;
- err = gpio_direction_output(data->pdata->gpio_data, 0);
+ err = gpiod_direction_output(data->data, 0);
if (err)
return err;
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_data, 1);
+ gpiod_set_value(data->data, 1);
- return gpio_direction_input(data->pdata->gpio_data);
+ return gpiod_direction_input(data->data);
}
/**
@@ -383,13 +384,13 @@ static int sht15_end_transmission(struct sht15_data *data)
{
int err;
- err = gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpiod_direction_output(data->data, 1);
if (err)
return err;
ndelay(SHT15_TSU);
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
return 0;
}
@@ -405,10 +406,10 @@ static u8 sht15_read_byte(struct sht15_data *data)
for (i = 0; i < 8; ++i) {
byte <<= 1;
- gpio_set_value(data->pdata->gpio_sck, 1);
+ gpiod_set_value(data->sck, 1);
ndelay(SHT15_TSCKH);
- byte |= !!gpio_get_value(data->pdata->gpio_data);
- gpio_set_value(data->pdata->gpio_sck, 0);
+ byte |= !!gpiod_get_value(data->data);
+ gpiod_set_value(data->sck, 0);
ndelay(SHT15_TSCKL);
}
return byte;
@@ -428,7 +429,7 @@ static int sht15_send_status(struct sht15_data *data, u8 status)
err = sht15_send_cmd(data, SHT15_WRITE_STATUS);
if (err)
return err;
- err = gpio_direction_output(data->pdata->gpio_data, 1);
+ err = gpiod_direction_output(data->data, 1);
if (err)
return err;
ndelay(SHT15_TSU);
@@ -528,14 +529,14 @@ static int sht15_measurement(struct sht15_data *data,
if (ret)
return ret;
- ret = gpio_direction_input(data->pdata->gpio_data);
+ ret = gpiod_direction_input(data->data);
if (ret)
return ret;
atomic_set(&data->interrupt_handled, 0);
- enable_irq(gpio_to_irq(data->pdata->gpio_data));
- if (gpio_get_value(data->pdata->gpio_data) == 0) {
- disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+ enable_irq(gpiod_to_irq(data->data));
+ if (gpiod_get_value(data->data) == 0) {
+ disable_irq_nosync(gpiod_to_irq(data->data));
/* Only relevant if the interrupt hasn't occurred. */
if (!atomic_read(&data->interrupt_handled))
schedule_work(&data->read_work);
@@ -547,7 +548,7 @@ static int sht15_measurement(struct sht15_data *data,
data->state = SHT15_READING_NOTHING;
return -EIO;
} else if (ret == 0) { /* timeout occurred */
- disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+ disable_irq_nosync(gpiod_to_irq(data->data));
ret = sht15_connection_reset(data);
if (ret)
return ret;
@@ -826,15 +827,15 @@ static void sht15_bh_read_data(struct work_struct *work_s)
read_work);
/* Firstly, verify the line is low */
- if (gpio_get_value(data->pdata->gpio_data)) {
+ if (gpiod_get_value(data->data)) {
/*
* If not, then start the interrupt again - care here as could
* have gone low in meantime so verify it hasn't!
*/
atomic_set(&data->interrupt_handled, 0);
- enable_irq(gpio_to_irq(data->pdata->gpio_data));
+ enable_irq(gpiod_to_irq(data->data));
/* If still not occurred or another handler was scheduled */
- if (gpio_get_value(data->pdata->gpio_data)
+ if (gpiod_get_value(data->data)
|| atomic_read(&data->interrupt_handled))
return;
}
@@ -918,53 +919,12 @@ static const struct of_device_id sht15_dt_match[] = {
{ },
};
MODULE_DEVICE_TABLE(of, sht15_dt_match);
-
-/*
- * This function returns NULL if pdev isn't a device instatiated by dt,
- * a pointer to pdata if it could successfully get all information
- * from dt or a negative ERR_PTR() on error.
- */
-static struct sht15_platform_data *sht15_probe_dt(struct device *dev)
-{
- struct device_node *np = dev->of_node;
- struct sht15_platform_data *pdata;
-
- /* no device tree device */
- if (!np)
- return NULL;
-
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->gpio_data = of_get_named_gpio(np, "data-gpios", 0);
- if (pdata->gpio_data < 0) {
- if (pdata->gpio_data != -EPROBE_DEFER)
- dev_err(dev, "data-gpios not found\n");
- return ERR_PTR(pdata->gpio_data);
- }
-
- pdata->gpio_sck = of_get_named_gpio(np, "clk-gpios", 0);
- if (pdata->gpio_sck < 0) {
- if (pdata->gpio_sck != -EPROBE_DEFER)
- dev_err(dev, "clk-gpios not found\n");
- return ERR_PTR(pdata->gpio_sck);
- }
-
- return pdata;
-}
-#else
-static inline struct sht15_platform_data *sht15_probe_dt(struct device *dev)
-{
- return NULL;
-}
#endif
static int sht15_probe(struct platform_device *pdev)
{
int ret;
struct sht15_data *data;
- u8 status = 0;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -977,25 +937,6 @@ static int sht15_probe(struct platform_device *pdev)
data->dev = &pdev->dev;
init_waitqueue_head(&data->wait_queue);
- data->pdata = sht15_probe_dt(&pdev->dev);
- if (IS_ERR(data->pdata))
- return PTR_ERR(data->pdata);
- if (data->pdata == NULL) {
- data->pdata = dev_get_platdata(&pdev->dev);
- if (data->pdata == NULL) {
- dev_err(&pdev->dev, "no platform data supplied\n");
- return -EINVAL;
- }
- }
-
- data->supply_uv = data->pdata->supply_mv * 1000;
- if (data->pdata->checksum)
- data->checksumming = true;
- if (data->pdata->no_otp_reload)
- status |= SHT15_STATUS_NO_OTP_RELOAD;
- if (data->pdata->low_resolution)
- status |= SHT15_STATUS_LOW_RESOLUTION;
-
/*
* If a regulator is available,
* query what the supply voltage actually is!
@@ -1030,21 +971,20 @@ static int sht15_probe(struct platform_device *pdev)
}
/* Try requesting the GPIOs */
- ret = devm_gpio_request_one(&pdev->dev, data->pdata->gpio_sck,
- GPIOF_OUT_INIT_LOW, "SHT15 sck");
- if (ret) {
+ data->sck = devm_gpiod_get(&pdev->dev, "clk", GPIOD_OUT_LOW);
+ if (IS_ERR(data->sck)) {
+ ret = PTR_ERR(data->sck);
dev_err(&pdev->dev, "clock line GPIO request failed\n");
goto err_release_reg;
}
-
- ret = devm_gpio_request(&pdev->dev, data->pdata->gpio_data,
- "SHT15 data");
- if (ret) {
+ data->data = devm_gpiod_get(&pdev->dev, "data", GPIOD_IN);
+ if (IS_ERR(data->data)) {
+ ret = PTR_ERR(data->data);
dev_err(&pdev->dev, "data line GPIO request failed\n");
goto err_release_reg;
}
- ret = devm_request_irq(&pdev->dev, gpio_to_irq(data->pdata->gpio_data),
+ ret = devm_request_irq(&pdev->dev, gpiod_to_irq(data->data),
sht15_interrupt_fired,
IRQF_TRIGGER_FALLING,
"sht15 data",
@@ -1053,7 +993,7 @@ static int sht15_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get irq for data line\n");
goto err_release_reg;
}
- disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
+ disable_irq_nosync(gpiod_to_irq(data->data));
ret = sht15_connection_reset(data);
if (ret)
goto err_release_reg;
@@ -1061,13 +1001,6 @@ static int sht15_probe(struct platform_device *pdev)
if (ret)
goto err_release_reg;
- /* write status with platform data options */
- if (status) {
- ret = sht15_send_status(data, status);
- if (ret)
- goto err_release_reg;
- }
-
ret = sysfs_create_group(&pdev->dev.kobj, &sht15_attr_group);
if (ret) {
dev_err(&pdev->dev, "sysfs create failed\n");
diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c
index 3f940fb67dc6..7fe152d92350 100644
--- a/drivers/hwmon/stts751.c
+++ b/drivers/hwmon/stts751.c
@@ -396,7 +396,7 @@ static ssize_t show_max_alarm(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->max_alert);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->max_alert);
}
static ssize_t show_min_alarm(struct device *dev, struct device_attribute *attr,
@@ -413,7 +413,7 @@ static ssize_t show_min_alarm(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->min_alert);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->min_alert);
}
static ssize_t show_input(struct device *dev, struct device_attribute *attr,
@@ -428,7 +428,7 @@ static ssize_t show_input(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->temp);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->temp);
}
static ssize_t show_therm(struct device *dev, struct device_attribute *attr,
@@ -436,7 +436,7 @@ static ssize_t show_therm(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->therm);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->therm);
}
static ssize_t set_therm(struct device *dev, struct device_attribute *attr,
@@ -478,7 +478,7 @@ static ssize_t show_hyst(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->hyst);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->hyst);
}
static ssize_t set_hyst(struct device *dev, struct device_attribute *attr,
@@ -518,7 +518,7 @@ static ssize_t show_therm_trip(struct device *dev,
if (ret < 0)
return ret;
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->therm_trip);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->therm_trip);
}
static ssize_t show_max(struct device *dev, struct device_attribute *attr,
@@ -526,7 +526,7 @@ static ssize_t show_max(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->event_max);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->event_max);
}
static ssize_t set_max(struct device *dev, struct device_attribute *attr,
@@ -560,7 +560,7 @@ static ssize_t show_min(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->event_min);
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->event_min);
}
static ssize_t set_min(struct device *dev, struct device_attribute *attr,
@@ -594,7 +594,7 @@ static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
{
struct stts751_priv *priv = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE - 1, "%d\n",
+ return snprintf(buf, PAGE_SIZE, "%d\n",
stts751_intervals[priv->interval]);
}
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index dab5c515d5a3..5ba9d9f1daa1 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -1676,7 +1676,9 @@ static int w83793_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
- const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 };
+ static const int watchdog_minors[] = {
+ WATCHDOG_MINOR, 212, 213, 214, 215
+ };
struct w83793_data *data;
int i, tmp, val, err;
int files_fan = ARRAY_SIZE(w83793_left_fan) / 7;
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index e1be61095532..a3cd91f23267 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -91,6 +91,11 @@
#define to_xgene_hwmon_dev(cl) \
container_of(cl, struct xgene_hwmon_dev, mbox_client)
+enum xgene_hwmon_version {
+ XGENE_HWMON_V1 = 0,
+ XGENE_HWMON_V2 = 1,
+};
+
struct slimpro_resp_msg {
u32 msg;
u32 param1;
@@ -609,6 +614,15 @@ static void xgene_hwmon_tx_done(struct mbox_client *cl, void *msg, int ret)
}
}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_hwmon_acpi_match[] = {
+ {"APMC0D29", XGENE_HWMON_V1},
+ {"APMC0D8A", XGENE_HWMON_V2},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, xgene_hwmon_acpi_match);
+#endif
+
static int xgene_hwmon_probe(struct platform_device *pdev)
{
struct xgene_hwmon_dev *ctx;
@@ -651,6 +665,15 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
}
} else {
struct acpi_pcct_hw_reduced *cppc_ss;
+ const struct acpi_device_id *acpi_id;
+ int version;
+
+ acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
+ &pdev->dev);
+ if (!acpi_id)
+ return -EINVAL;
+
+ version = (int)acpi_id->driver_data;
if (device_property_read_u32(&pdev->dev, "pcc-channel",
&ctx->mbox_idx)) {
@@ -693,7 +716,13 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
*/
ctx->comm_base_addr = cppc_ss->base_address;
if (ctx->comm_base_addr) {
- ctx->pcc_comm_addr = memremap(ctx->comm_base_addr,
+ if (version == XGENE_HWMON_V2)
+ ctx->pcc_comm_addr = (void __force *)ioremap(
+ ctx->comm_base_addr,
+ cppc_ss->length);
+ else
+ ctx->pcc_comm_addr = memremap(
+ ctx->comm_base_addr,
cppc_ss->length,
MEMREMAP_WB);
} else {
@@ -761,14 +790,6 @@ static int xgene_hwmon_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgene_hwmon_acpi_match[] = {
- {"APMC0D29", 0},
- {},
-};
-MODULE_DEVICE_TABLE(acpi, xgene_hwmon_acpi_match);
-#endif
-
static const struct of_device_id xgene_hwmon_of_match[] = {
{.compatible = "apm,xgene-slimpro-hwmon"},
{}
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index d9a1e9893136..97bea2e1aa6a 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -380,7 +380,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
if (sc->flags & SCF_FROZEN) {
wait_event_interruptible_timeout(
dd->event_queue,
- !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
+ !(READ_ONCE(dd->flags) & HFI1_FROZEN),
msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
if (dd->flags & HFI1_FROZEN)
return -ENOLCK;
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 7108a4b5e94c..75e740780285 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1423,14 +1423,14 @@ retry:
goto done;
}
/* copy from receiver cache line and recalculate */
- sc->alloc_free = ACCESS_ONCE(sc->free);
+ sc->alloc_free = READ_ONCE(sc->free);
avail =
(unsigned long)sc->credits -
(sc->fill - sc->alloc_free);
if (blocks > avail) {
/* still no room, actively update */
sc_release_update(sc);
- sc->alloc_free = ACCESS_ONCE(sc->free);
+ sc->alloc_free = READ_ONCE(sc->free);
trycount++;
goto retry;
}
@@ -1667,7 +1667,7 @@ void sc_release_update(struct send_context *sc)
/* call sent buffer callbacks */
code = -1; /* code not yet set */
- head = ACCESS_ONCE(sc->sr_head); /* snapshot the head */
+ head = READ_ONCE(sc->sr_head); /* snapshot the head */
tail = sc->sr_tail;
while (head != tail) {
pbuf = &sc->sr[tail].pbuf;
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index b3291f0fde9a..a7fc664f0d4e 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -363,7 +363,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
again:
smp_read_barrier_depends(); /* see post_one_send() */
- if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
+ if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 6781bcdb10b3..08346d25441c 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1725,7 +1725,7 @@ retry:
swhead = sde->descq_head & sde->sdma_mask;
/* this code is really bad for cache line trading */
- swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
+ swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
cnt = sde->descq_cnt;
if (swhead < swtail)
@@ -1872,7 +1872,7 @@ retry:
if ((status & sde->idle_mask) && !idle_check_done) {
u16 swtail;
- swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
+ swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
if (swtail != hwhead) {
hwhead = (u16)read_sde_csr(sde, SD(HEAD));
idle_check_done = 1;
@@ -2222,7 +2222,7 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
u16 len;
head = sde->descq_head & sde->sdma_mask;
- tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
+ tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
seq_printf(s, SDE_FMT, sde->this_idx,
sde->cpu,
sdma_state_name(sde->state.current_state),
@@ -3305,7 +3305,7 @@ int sdma_ahg_alloc(struct sdma_engine *sde)
return -EINVAL;
}
while (1) {
- nr = ffz(ACCESS_ONCE(sde->ahg_bits));
+ nr = ffz(READ_ONCE(sde->ahg_bits));
if (nr > 31) {
trace_hfi1_ahg_allocate(sde, -ENOSPC);
return -ENOSPC;
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 107011d8613b..374c59784950 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -445,7 +445,7 @@ static inline u16 sdma_descq_freecnt(struct sdma_engine *sde)
{
return sde->descq_cnt -
(sde->descq_tail -
- ACCESS_ONCE(sde->descq_head)) - 1;
+ READ_ONCE(sde->descq_head)) - 1;
}
static inline u16 sdma_descq_inprocess(struct sdma_engine *sde)
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 0b646173ca22..9a31c585427f 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -80,7 +80,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
+ if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (iowait_sdma_pending(&priv->s_iowait)) {
@@ -121,7 +121,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail;
/* Check if send work queue is empty. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_cur == ACCESS_ONCE(qp->s_head)) {
+ if (qp->s_cur == READ_ONCE(qp->s_head)) {
clear_ahg(qp);
goto bail;
}
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 2ba74fdd6f15..7fec6b984e3e 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -487,7 +487,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
+ if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (iowait_sdma_pending(&priv->s_iowait)) {
@@ -501,7 +501,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
/* see post_one_send() */
smp_read_barrier_depends();
- if (qp->s_cur == ACCESS_ONCE(qp->s_head))
+ if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index c0c0e0445cbf..8ec6e8a8d6f7 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -276,7 +276,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
/* Wait until all requests have been freed. */
wait_event_interruptible(
pq->wait,
- (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
+ (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
kfree(pq->reqs);
kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
@@ -591,7 +591,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
if (ret != -EBUSY) {
req->status = ret;
WRITE_ONCE(req->has_error, 1);
- if (ACCESS_ONCE(req->seqcomp) ==
+ if (READ_ONCE(req->seqcomp) ==
req->seqsubmitted - 1)
goto free_req;
return ret;
@@ -825,7 +825,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
*/
if (req->data_len) {
iovec = &req->iovs[req->iov_idx];
- if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
+ if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
if (++req->iov_idx == req->data_iovs) {
ret = -EFAULT;
goto free_txreq;
@@ -1390,7 +1390,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
} else {
if (status != SDMA_TXREQ_S_OK)
req->status = status;
- if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) &&
+ if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) &&
(READ_ONCE(req->done) ||
READ_ONCE(req->has_error))) {
user_sdma_free_request(req, false);
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 53efbb0b40c4..9a37e844d4c8 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -368,7 +368,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
again:
smp_read_barrier_depends(); /* see post_one_send() */
- if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
+ if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 498e2202e72c..bddcc37ace44 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -61,7 +61,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
+ if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_dma_busy)) {
@@ -91,7 +91,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
/* Check if send work queue is empty. */
smp_read_barrier_depends(); /* see post_one_send() */
- if (qp->s_cur == ACCESS_ONCE(qp->s_head))
+ if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
/*
* Start a new request.
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index be4907453ac4..15962ed193ce 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -253,7 +253,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
/* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send */
- if (qp->s_last == ACCESS_ONCE(qp->s_head))
+ if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_dma_busy)) {
@@ -267,7 +267,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
/* see post_one_send() */
smp_read_barrier_depends();
- if (qp->s_cur == ACCESS_ONCE(qp->s_head))
+ if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 22df09ae809e..b670cb9d2006 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1073,7 +1073,7 @@ int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
rdi->driver_f.notify_error_qp(qp);
/* Schedule the sending tasklet to drain the send work queue. */
- if (ACCESS_ONCE(qp->s_last) != qp->s_head)
+ if (READ_ONCE(qp->s_last) != qp->s_head)
rdi->driver_f.schedule_send(qp);
rvt_clear_mr_refs(qp, 0);
@@ -1686,7 +1686,7 @@ static inline int rvt_qp_is_avail(
if (likely(qp->s_avail))
return 0;
smp_read_barrier_depends(); /* see rc.c */
- slast = ACCESS_ONCE(qp->s_last);
+ slast = READ_ONCE(qp->s_last);
if (qp->s_head >= slast)
avail = qp->s_size - (qp->s_head - slast);
else
@@ -1917,7 +1917,7 @@ int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
* ahead and kick the send engine into gear. Otherwise we will always
* just schedule the send to happen later.
*/
- call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
+ call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
for (; wr; wr = wr->next) {
err = rvt_post_one_wr(qp, wr, &call_send);
diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c
index 2e8f801932be..a1db1e5040dc 100644
--- a/drivers/input/misc/regulator-haptic.c
+++ b/drivers/input/misc/regulator-haptic.c
@@ -233,7 +233,7 @@ static int __maybe_unused regulator_haptic_resume(struct device *dev)
haptic->suspended = false;
- magnitude = ACCESS_ONCE(haptic->magnitude);
+ magnitude = READ_ONCE(haptic->magnitude);
if (magnitude)
regulator_haptic_set_voltage(haptic, magnitude);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 6d6b092e2da9..d6135900da64 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1258,6 +1258,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0605", 0 },
{ "ELAN0609", 0 },
{ "ELAN060B", 0 },
+ { "ELAN060C", 0 },
{ "ELAN0611", 0 },
{ "ELAN1000", 0 },
{ }
diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
index 225025a0940c..b6ccf39c6a7b 100644
--- a/drivers/input/rmi4/rmi_smbus.c
+++ b/drivers/input/rmi4/rmi_smbus.c
@@ -312,7 +312,7 @@ static int rmi_smb_probe(struct i2c_client *client,
rmi_smb->xport.dev = &client->dev;
rmi_smb->xport.pdata = *pdata;
rmi_smb->xport.pdata.irq = client->irq;
- rmi_smb->xport.proto_name = "smb2";
+ rmi_smb->xport.proto_name = "smb";
rmi_smb->xport.ops = &rmi_smb_ops;
smbus_version = rmi_smb_get_version(rmi_smb);
@@ -322,7 +322,7 @@ static int rmi_smb_probe(struct i2c_client *client,
rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
smbus_version);
- if (smbus_version != 2) {
+ if (smbus_version != 2 && smbus_version != 3) {
dev_err(&client->dev, "Unrecognized SMB version %d\n",
smbus_version);
return -ENODEV;
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index 88ea5e1b72ae..abf27578beb1 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -531,6 +531,7 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
input_set_drvdata(input_dev, ts);
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 9d8a1dd2e2c2..a2ca82f6c2dd 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -151,6 +151,9 @@ config CLPS711X_IRQCHIP
select SPARSE_IRQ
default y
+config OMPIC
+ bool
+
config OR1K_PIC
bool
select IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index b842dfdc903f..046df81c402a 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o
obj-$(CONFIG_METAG) += irq-metag-ext.o
obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o
obj-$(CONFIG_CLPS711X_IRQCHIP) += irq-clps711x.o
+obj-$(CONFIG_OMPIC) += irq-ompic.o
obj-$(CONFIG_OR1K_PIC) += irq-or1k-pic.o
obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o
obj-$(CONFIG_OMAP_IRQCHIP) += irq-omap-intc.o
diff --git a/drivers/irqchip/irq-ompic.c b/drivers/irqchip/irq-ompic.c
new file mode 100644
index 000000000000..cf6d0c455518
--- /dev/null
+++ b/drivers/irqchip/irq-ompic.c
@@ -0,0 +1,202 @@
+/*
+ * Open Multi-Processor Interrupt Controller driver
+ *
+ * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+ * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * The ompic device handles IPI communication between cores in multi-core
+ * OpenRISC systems.
+ *
+ * Registers
+ *
+ * For each CPU the ompic has 2 registers. The control register for sending
+ * and acking IPIs and the status register for receiving IPIs. The register
+ * layouts are as follows:
+ *
+ * Control register
+ * +---------+---------+----------+---------+
+ * | 31 | 30 | 29 .. 16 | 15 .. 0 |
+ * ----------+---------+----------+----------
+ * | IRQ ACK | IRQ GEN | DST CORE | DATA |
+ * +---------+---------+----------+---------+
+ *
+ * Status register
+ * +----------+-------------+----------+---------+
+ * | 31 | 30 | 29 .. 16 | 15 .. 0 |
+ * -----------+-------------+----------+---------+
+ * | Reserved | IRQ Pending | SRC CORE | DATA |
+ * +----------+-------------+----------+---------+
+ *
+ * Architecture
+ *
+ * - The ompic generates a level interrupt to the CPU PIC when a message is
+ * ready. Messages are delivered via the memory bus.
+ * - The ompic does not have any interrupt input lines.
+ * - The ompic is wired to the same irq line on each core.
+ * - Devices are wired to the same irq line on each core.
+ *
+ * +---------+ +---------+
+ * | CPU | | CPU |
+ * | Core 0 |<==\ (memory access) /==>| Core 1 |
+ * | [ PIC ]| | | | [ PIC ]|
+ * +----^-^--+ | | +----^-^--+
+ * | | v v | |
+ * <====|=|=================================|=|==> (memory bus)
+ * | | ^ ^ | |
+ * (ipi | +------|---------+--------|-------|-+ (device irq)
+ * irq | | | | |
+ * core0)| +------|---------|--------|-------+ (ipi irq core1)
+ * | | | | |
+ * +----o-o-+ | +--------+ |
+ * | ompic |<===/ | Device |<===/
+ * | IPI | +--------+
+ * +--------+*
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#include <linux/irqchip.h>
+
+#define OMPIC_CPUBYTES 8
+#define OMPIC_CTRL(cpu) (0x0 + (cpu * OMPIC_CPUBYTES))
+#define OMPIC_STAT(cpu) (0x4 + (cpu * OMPIC_CPUBYTES))
+
+#define OMPIC_CTRL_IRQ_ACK (1 << 31)
+#define OMPIC_CTRL_IRQ_GEN (1 << 30)
+#define OMPIC_CTRL_DST(cpu) (((cpu) & 0x3fff) << 16)
+
+#define OMPIC_STAT_IRQ_PENDING (1 << 30)
+
+#define OMPIC_DATA(x) ((x) & 0xffff)
+
+DEFINE_PER_CPU(unsigned long, ops);
+
+static void __iomem *ompic_base;
+
+static inline u32 ompic_readreg(void __iomem *base, loff_t offset)
+{
+ return ioread32be(base + offset);
+}
+
+static void ompic_writereg(void __iomem *base, loff_t offset, u32 data)
+{
+ iowrite32be(data, base + offset);
+}
+
+static void ompic_raise_softirq(const struct cpumask *mask,
+ unsigned int ipi_msg)
+{
+ unsigned int dst_cpu;
+ unsigned int src_cpu = smp_processor_id();
+
+ for_each_cpu(dst_cpu, mask) {
+ set_bit(ipi_msg, &per_cpu(ops, dst_cpu));
+
+ /*
+ * On OpenRISC the atomic set_bit() call implies a memory
+ * barrier. Otherwise we would need: smp_wmb(); paired
+ * with the read in ompic_ipi_handler.
+ */
+
+ ompic_writereg(ompic_base, OMPIC_CTRL(src_cpu),
+ OMPIC_CTRL_IRQ_GEN |
+ OMPIC_CTRL_DST(dst_cpu) |
+ OMPIC_DATA(1));
+ }
+}
+
+static irqreturn_t ompic_ipi_handler(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long *pending_ops = &per_cpu(ops, cpu);
+ unsigned long ops;
+
+ ompic_writereg(ompic_base, OMPIC_CTRL(cpu), OMPIC_CTRL_IRQ_ACK);
+ while ((ops = xchg(pending_ops, 0)) != 0) {
+
+ /*
+ * On OpenRISC the atomic xchg() call implies a memory
+ * barrier. Otherwise we may need an smp_rmb(); paired
+ * with the write in ompic_raise_softirq.
+ */
+
+ do {
+ unsigned long ipi_msg;
+
+ ipi_msg = __ffs(ops);
+ ops &= ~(1UL << ipi_msg);
+
+ handle_IPI(ipi_msg);
+ } while (ops);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __init ompic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct resource res;
+ int irq;
+ int ret;
+
+ /* Validate the DT */
+ if (ompic_base) {
+ pr_err("ompic: duplicate ompic's are not supported");
+ return -EEXIST;
+ }
+
+ if (of_address_to_resource(node, 0, &res)) {
+ pr_err("ompic: reg property requires an address and size");
+ return -EINVAL;
+ }
+
+ if (resource_size(&res) < (num_possible_cpus() * OMPIC_CPUBYTES)) {
+ pr_err("ompic: reg size, currently %d must be at least %d",
+ resource_size(&res),
+ (num_possible_cpus() * OMPIC_CPUBYTES));
+ return -EINVAL;
+ }
+
+ /* Setup the device */
+ ompic_base = ioremap(res.start, resource_size(&res));
+ if (IS_ERR(ompic_base)) {
+ pr_err("ompic: unable to map registers");
+ return PTR_ERR(ompic_base);
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ pr_err("ompic: unable to parse device irq");
+ ret = -EINVAL;
+ goto out_unmap;
+ }
+
+ ret = request_irq(irq, ompic_ipi_handler, IRQF_PERCPU,
+ "ompic_ipi", NULL);
+ if (ret)
+ goto out_irq_disp;
+
+ set_smp_cross_call(ompic_raise_softirq);
+
+ return 0;
+
+out_irq_disp:
+ irq_dispose_mapping(irq);
+out_unmap:
+ iounmap(ompic_base);
+ ompic_base = NULL;
+ return ret;
+}
+IRQCHIP_DECLARE(ompic, "openrisc,ompic", ompic_of_init);
diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c
index dbc4a3e63396..15db69d8ba69 100644
--- a/drivers/macintosh/adb-iop.c
+++ b/drivers/macintosh/adb-iop.c
@@ -30,8 +30,6 @@
/*#define DEBUG_ADB_IOP*/
-extern void iop_ism_irq(int, void *);
-
static struct adb_request *current_req;
static struct adb_request *last_req;
#if 0
@@ -266,7 +264,7 @@ int adb_iop_autopoll(int devs)
void adb_iop_poll(void)
{
if (adb_iop_state == idle) adb_iop_start();
- iop_ism_irq(0, (void *) ADB_IOP);
+ iop_ism_irq_poll(ADB_IOP);
}
int adb_iop_reset_bus(void)
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index d216a8f7bc22..33bb074d6941 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -347,7 +347,7 @@ static void __cache_size_refresh(void)
BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
BUG_ON(dm_bufio_client_count < 0);
- dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
+ dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
/*
* Use default if set to 0 and report the actual cache size used.
@@ -960,7 +960,7 @@ static void __get_memory_limit(struct dm_bufio_client *c,
{
unsigned long buffers;
- if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
+ if (unlikely(READ_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
if (mutex_trylock(&dm_bufio_clients_lock)) {
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
@@ -1600,7 +1600,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
static unsigned long get_retain_buffers(struct dm_bufio_client *c)
{
- unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
+ unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
}
@@ -1647,7 +1647,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
- return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]);
+ return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]);
}
/*
@@ -1818,7 +1818,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
static unsigned get_max_age_hz(void)
{
- unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
+ unsigned max_age = READ_ONCE(dm_bufio_max_age);
if (max_age > UINT_MAX / HZ)
max_age = UINT_MAX / HZ;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 096fe9b66c50..8c5756e1df94 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -6,6 +6,7 @@
* This file is released under the GPL.
*/
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/device-mapper.h>
#include <linux/dm-io.h>
@@ -80,13 +81,13 @@ struct journal_entry {
#define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
#if BITS_PER_LONG == 64
-#define journal_entry_set_sector(je, x) do { smp_wmb(); ACCESS_ONCE((je)->u.sector) = cpu_to_le64(x); } while (0)
+#define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
#elif defined(CONFIG_LBDAF)
-#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32((x) >> 32); } while (0)
+#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
#else
-#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32(0); } while (0)
+#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32(0)); } while (0)
#define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo)
#endif
#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
@@ -320,7 +321,7 @@ static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, in
static int dm_integrity_failed(struct dm_integrity_c *ic)
{
- return ACCESS_ONCE(ic->failed);
+ return READ_ONCE(ic->failed);
}
static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
@@ -1545,7 +1546,7 @@ retry_kmap:
smp_mb();
if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
wake_up(&ic->copy_to_journal_wait);
- if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
+ if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
queue_work(ic->commit_wq, &ic->commit_work);
} else {
schedule_autocommit(ic);
@@ -1798,7 +1799,7 @@ static void integrity_commit(struct work_struct *w)
ic->n_committed_sections += commit_sections;
spin_unlock_irq(&ic->endio_wait.lock);
- if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
+ if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
queue_work(ic->writer_wq, &ic->writer_work);
release_flush_bios:
@@ -1980,7 +1981,7 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
/* the following test is not needed, but it tests the replay code */
- if (ACCESS_ONCE(ic->suspending))
+ if (READ_ONCE(ic->suspending))
return;
spin_lock_irq(&ic->endio_wait.lock);
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index cf2c67e35eaf..eb45cc3df31d 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -107,7 +107,7 @@ static void io_job_start(struct dm_kcopyd_throttle *t)
try_again:
spin_lock_irq(&throttle_spinlock);
- throttle = ACCESS_ONCE(t->throttle);
+ throttle = READ_ONCE(t->throttle);
if (likely(throttle >= 100))
goto skip_limit;
@@ -157,7 +157,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t)
t->num_io_jobs--;
- if (likely(ACCESS_ONCE(t->throttle) >= 100))
+ if (likely(READ_ONCE(t->throttle) >= 100))
goto skip_limit;
if (!t->num_io_jobs) {
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 11f273d2f018..3f88c9d32f7e 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m,
pgpath = path_to_pgpath(path);
- if (unlikely(lockless_dereference(m->current_pg) != pg)) {
+ if (unlikely(READ_ONCE(m->current_pg) != pg)) {
/* Only update current_pgpath if pg changed */
spin_lock_irqsave(&m->lock, flags);
m->current_pgpath = pgpath;
@@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
}
/* Were we instructed to switch PG? */
- if (lockless_dereference(m->next_pg)) {
+ if (READ_ONCE(m->next_pg)) {
spin_lock_irqsave(&m->lock, flags);
pg = m->next_pg;
if (!pg) {
@@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
/* Don't change PG until it has no remaining paths */
check_current_pg:
- pg = lockless_dereference(m->current_pg);
+ pg = READ_ONCE(m->current_pg);
if (pg) {
pgpath = choose_path_in_pg(m, pg, nr_bytes);
if (!IS_ERR_OR_NULL(pgpath))
@@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
struct request *clone;
/* Do we need to select a new pgpath? */
- pgpath = lockless_dereference(m->current_pgpath);
+ pgpath = READ_ONCE(m->current_pgpath);
if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
pgpath = choose_pgpath(m, nr_bytes);
@@ -535,7 +535,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
bool queue_io;
/* Do we need to select a new pgpath? */
- pgpath = lockless_dereference(m->current_pgpath);
+ pgpath = READ_ONCE(m->current_pgpath);
queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
if (!pgpath || !queue_io)
pgpath = choose_pgpath(m, nr_bytes);
@@ -1804,7 +1804,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
struct pgpath *current_pgpath;
int r;
- current_pgpath = lockless_dereference(m->current_pgpath);
+ current_pgpath = READ_ONCE(m->current_pgpath);
if (!current_pgpath)
current_pgpath = choose_pgpath(m, 0);
@@ -1826,7 +1826,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
}
if (r == -ENOTCONN) {
- if (!lockless_dereference(m->current_pg)) {
+ if (!READ_ONCE(m->current_pg)) {
/* Path status changed, redo selection */
(void) choose_pgpath(m, 0);
}
@@ -1895,9 +1895,9 @@ static int multipath_busy(struct dm_target *ti)
return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
/* Guess which priority_group will be used at next mapping time */
- pg = lockless_dereference(m->current_pg);
- next_pg = lockless_dereference(m->next_pg);
- if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
+ pg = READ_ONCE(m->current_pg);
+ next_pg = READ_ONCE(m->next_pg);
+ if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
pg = next_pg;
if (!pg) {
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index a7868503d135..29bc51084c82 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -432,7 +432,7 @@ do_sync_free:
synchronize_rcu_expedited();
dm_stat_free(&s->rcu_head);
} else {
- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
+ WRITE_ONCE(dm_stat_need_rcu_barrier, 1);
call_rcu(&s->rcu_head, dm_stat_free);
}
return 0;
@@ -640,12 +640,12 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
*/
last = raw_cpu_ptr(stats->last);
stats_aux->merged =
- (bi_sector == (ACCESS_ONCE(last->last_sector) &&
+ (bi_sector == (READ_ONCE(last->last_sector) &&
((bi_rw == WRITE) ==
- (ACCESS_ONCE(last->last_rw) == WRITE))
+ (READ_ONCE(last->last_rw) == WRITE))
));
- ACCESS_ONCE(last->last_sector) = end_sector;
- ACCESS_ONCE(last->last_rw) = bi_rw;
+ WRITE_ONCE(last->last_sector, end_sector);
+ WRITE_ONCE(last->last_rw, bi_rw);
}
rcu_read_lock();
@@ -694,22 +694,22 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared
for_each_possible_cpu(cpu) {
p = &s->stat_percpu[cpu][x];
- shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
- shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
- shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
- shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
- shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
- shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
- shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
- shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
- shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
- shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
- shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
- shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
+ shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
+ shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
+ shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
+ shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
+ shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
+ shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
+ shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
+ shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
+ shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
+ shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
+ shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
+ shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
if (s->n_histogram_entries) {
unsigned i;
for (i = 0; i < s->n_histogram_entries + 1; i++)
- shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]);
+ shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
}
}
}
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index 4c8de1ff78ca..8d0ba879777e 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -144,7 +144,7 @@ static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long
switch_get_position(sctx, region_nr, &region_index, &bit);
- return (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) &
+ return (READ_ONCE(sctx->region_table[region_index]) >> bit) &
((1 << sctx->region_table_entry_bits) - 1);
}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 1e25705209c2..89e5dff9b4cf 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2431,7 +2431,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
struct pool_c *pt = pool->ti->private;
bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
enum pool_mode old_mode = get_pool_mode(pool);
- unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
+ unsigned long no_space_timeout = READ_ONCE(no_space_timeout_secs) * HZ;
/*
* Never allow the pool to transition to PM_WRITE mode if user
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index bda3caca23ca..fba93237a780 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -589,7 +589,7 @@ static void verity_prefetch_io(struct work_struct *work)
verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
if (!i) {
- unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
+ unsigned cluster = READ_ONCE(dm_verity_prefetch_cluster);
cluster >>= v->data_dev_block_bits;
if (unlikely(!cluster))
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4be85324f44d..8aaffa19b29a 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -114,7 +114,7 @@ static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
static int __dm_get_module_param_int(int *module_param, int min, int max)
{
- int param = ACCESS_ONCE(*module_param);
+ int param = READ_ONCE(*module_param);
int modified_param = 0;
bool modified = true;
@@ -136,7 +136,7 @@ static int __dm_get_module_param_int(int *module_param, int min, int max)
unsigned __dm_get_module_param(unsigned *module_param,
unsigned def, unsigned max)
{
- unsigned param = ACCESS_ONCE(*module_param);
+ unsigned param = READ_ONCE(*module_param);
unsigned modified_param = 0;
if (!param)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0ff1bbf6c90e..447ddcbc9566 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2651,7 +2651,7 @@ state_show(struct md_rdev *rdev, char *page)
{
char *sep = ",";
size_t len = 0;
- unsigned long flags = ACCESS_ONCE(rdev->flags);
+ unsigned long flags = READ_ONCE(rdev->flags);
if (test_bit(Faulty, &flags) ||
(!test_bit(ExternalBbl, &flags) &&
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 928e24a07133..7d9a50eed9db 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6072,7 +6072,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
*/
rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
- struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
+ struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
if (rdev == NULL || test_bit(Faulty, &rdev->flags))
still_degraded = 1;
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c
index 2322af1b8742..53011629c9ad 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.c
+++ b/drivers/media/dvb-core/dvb_ringbuffer.c
@@ -66,12 +66,12 @@ ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf)
{
ssize_t free;
- /* ACCESS_ONCE() to load read pointer on writer side
+ /* READ_ONCE() to load read pointer on writer side
* this pairs with smp_store_release() in dvb_ringbuffer_read(),
* dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(),
* or dvb_ringbuffer_reset()
*/
- free = ACCESS_ONCE(rbuf->pread) - rbuf->pwrite;
+ free = READ_ONCE(rbuf->pread) - rbuf->pwrite;
if (free <= 0)
free += rbuf->size;
return free-1;
@@ -143,7 +143,7 @@ ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, si
todo -= split;
/* smp_store_release() for read pointer update to ensure
* that buf is not overwritten until read is complete,
- * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
+ * this pairs with READ_ONCE() in dvb_ringbuffer_free()
*/
smp_store_release(&rbuf->pread, 0);
}
@@ -168,7 +168,7 @@ void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len)
todo -= split;
/* smp_store_release() for read pointer update to ensure
* that buf is not overwritten until read is complete,
- * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
+ * this pairs with READ_ONCE() in dvb_ringbuffer_free()
*/
smp_store_release(&rbuf->pread, 0);
}
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index 981b3ef71e47..ed7f0c61c59a 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -56,122 +56,54 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf,
size_t count, loff_t *off);
#ifdef CONFIG_KPROBES
-static void lkdtm_handler(void);
+static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
static ssize_t lkdtm_debugfs_entry(struct file *f,
const char __user *user_buf,
size_t count, loff_t *off);
-
-
-/* jprobe entry point handlers. */
-static unsigned int jp_do_irq(unsigned int irq)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static irqreturn_t jp_handle_irq_event(unsigned int irq,
- struct irqaction *action)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static void jp_tasklet_action(struct softirq_action *a)
-{
- lkdtm_handler();
- jprobe_return();
-}
-
-static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
-{
- lkdtm_handler();
- jprobe_return();
-}
-
-struct scan_control;
-
-static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
- struct zone *zone,
- struct scan_control *sc)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
- const enum hrtimer_mode mode)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-
-# ifdef CONFIG_IDE
-static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
- struct block_device *bdev, unsigned int cmd,
- unsigned long arg)
-{
- lkdtm_handler();
- jprobe_return();
- return 0;
-}
-# endif
+# define CRASHPOINT_KPROBE(_symbol) \
+ .kprobe = { \
+ .symbol_name = (_symbol), \
+ .pre_handler = lkdtm_kprobe_handler, \
+ },
+# define CRASHPOINT_WRITE(_symbol) \
+ (_symbol) ? lkdtm_debugfs_entry : direct_entry
+#else
+# define CRASHPOINT_KPROBE(_symbol)
+# define CRASHPOINT_WRITE(_symbol) direct_entry
#endif
/* Crash points */
struct crashpoint {
const char *name;
const struct file_operations fops;
- struct jprobe jprobe;
+ struct kprobe kprobe;
};
-#define CRASHPOINT(_name, _write, _symbol, _entry) \
+#define CRASHPOINT(_name, _symbol) \
{ \
.name = _name, \
.fops = { \
.read = lkdtm_debugfs_read, \
.llseek = generic_file_llseek, \
.open = lkdtm_debugfs_open, \
- .write = _write, \
- }, \
- .jprobe = { \
- .kp.symbol_name = _symbol, \
- .entry = (kprobe_opcode_t *)_entry, \
+ .write = CRASHPOINT_WRITE(_symbol) \
}, \
+ CRASHPOINT_KPROBE(_symbol) \
}
/* Define the possible places where we can trigger a crash point. */
-struct crashpoint crashpoints[] = {
- CRASHPOINT("DIRECT", direct_entry,
- NULL, NULL),
+static struct crashpoint crashpoints[] = {
+ CRASHPOINT("DIRECT", NULL),
#ifdef CONFIG_KPROBES
- CRASHPOINT("INT_HARDWARE_ENTRY", lkdtm_debugfs_entry,
- "do_IRQ", jp_do_irq),
- CRASHPOINT("INT_HW_IRQ_EN", lkdtm_debugfs_entry,
- "handle_IRQ_event", jp_handle_irq_event),
- CRASHPOINT("INT_TASKLET_ENTRY", lkdtm_debugfs_entry,
- "tasklet_action", jp_tasklet_action),
- CRASHPOINT("FS_DEVRW", lkdtm_debugfs_entry,
- "ll_rw_block", jp_ll_rw_block),
- CRASHPOINT("MEM_SWAPOUT", lkdtm_debugfs_entry,
- "shrink_inactive_list", jp_shrink_inactive_list),
- CRASHPOINT("TIMERADD", lkdtm_debugfs_entry,
- "hrtimer_start", jp_hrtimer_start),
- CRASHPOINT("SCSI_DISPATCH_CMD", lkdtm_debugfs_entry,
- "scsi_dispatch_cmd", jp_scsi_dispatch_cmd),
+ CRASHPOINT("INT_HARDWARE_ENTRY", "do_IRQ"),
+ CRASHPOINT("INT_HW_IRQ_EN", "handle_IRQ_event"),
+ CRASHPOINT("INT_TASKLET_ENTRY", "tasklet_action"),
+ CRASHPOINT("FS_DEVRW", "ll_rw_block"),
+ CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"),
+ CRASHPOINT("TIMERADD", "hrtimer_start"),
+ CRASHPOINT("SCSI_DISPATCH_CMD", "scsi_dispatch_cmd"),
# ifdef CONFIG_IDE
- CRASHPOINT("IDE_CORE_CP", lkdtm_debugfs_entry,
- "generic_ide_ioctl", jp_generic_ide_ioctl),
+ CRASHPOINT("IDE_CORE_CP", "generic_ide_ioctl"),
# endif
#endif
};
@@ -254,8 +186,8 @@ struct crashtype crashtypes[] = {
};
-/* Global jprobe entry and crashtype. */
-static struct jprobe *lkdtm_jprobe;
+/* Global kprobe entry and crashtype. */
+static struct kprobe *lkdtm_kprobe;
struct crashpoint *lkdtm_crashpoint;
struct crashtype *lkdtm_crashtype;
@@ -298,7 +230,8 @@ static struct crashtype *find_crashtype(const char *name)
*/
static noinline void lkdtm_do_action(struct crashtype *crashtype)
{
- BUG_ON(!crashtype || !crashtype->func);
+ if (WARN_ON(!crashtype || !crashtype->func))
+ return;
crashtype->func();
}
@@ -308,22 +241,22 @@ static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
int ret;
/* If this doesn't have a symbol, just call immediately. */
- if (!crashpoint->jprobe.kp.symbol_name) {
+ if (!crashpoint->kprobe.symbol_name) {
lkdtm_do_action(crashtype);
return 0;
}
- if (lkdtm_jprobe != NULL)
- unregister_jprobe(lkdtm_jprobe);
+ if (lkdtm_kprobe != NULL)
+ unregister_kprobe(lkdtm_kprobe);
lkdtm_crashpoint = crashpoint;
lkdtm_crashtype = crashtype;
- lkdtm_jprobe = &crashpoint->jprobe;
- ret = register_jprobe(lkdtm_jprobe);
+ lkdtm_kprobe = &crashpoint->kprobe;
+ ret = register_kprobe(lkdtm_kprobe);
if (ret < 0) {
- pr_info("Couldn't register jprobe %s\n",
- crashpoint->jprobe.kp.symbol_name);
- lkdtm_jprobe = NULL;
+ pr_info("Couldn't register kprobe %s\n",
+ crashpoint->kprobe.symbol_name);
+ lkdtm_kprobe = NULL;
lkdtm_crashpoint = NULL;
lkdtm_crashtype = NULL;
}
@@ -336,13 +269,14 @@ static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
static int crash_count = DEFAULT_COUNT;
static DEFINE_SPINLOCK(crash_count_lock);
-/* Called by jprobe entry points. */
-static void lkdtm_handler(void)
+/* Called by kprobe entry points. */
+static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
{
unsigned long flags;
bool do_it = false;
- BUG_ON(!lkdtm_crashpoint || !lkdtm_crashtype);
+ if (WARN_ON(!lkdtm_crashpoint || !lkdtm_crashtype))
+ return 0;
spin_lock_irqsave(&crash_count_lock, flags);
crash_count--;
@@ -357,6 +291,8 @@ static void lkdtm_handler(void)
if (do_it)
lkdtm_do_action(lkdtm_crashtype);
+
+ return 0;
}
static ssize_t lkdtm_debugfs_entry(struct file *f,
@@ -556,8 +492,8 @@ static void __exit lkdtm_module_exit(void)
/* Handle test-specific clean-up. */
lkdtm_usercopy_exit();
- if (lkdtm_jprobe != NULL)
- unregister_jprobe(lkdtm_jprobe);
+ if (lkdtm_kprobe != NULL)
+ unregister_kprobe(lkdtm_kprobe);
pr_info("Crash point unregistered\n");
}
diff --git a/drivers/misc/mic/scif/scif_rb.c b/drivers/misc/mic/scif/scif_rb.c
index 637cc4686742..b665757ca89a 100644
--- a/drivers/misc/mic/scif/scif_rb.c
+++ b/drivers/misc/mic/scif/scif_rb.c
@@ -138,7 +138,7 @@ void scif_rb_commit(struct scif_rb *rb)
* the read barrier in scif_rb_count(..)
*/
wmb();
- ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
+ WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
#ifdef CONFIG_INTEL_MIC_CARD
/*
* X100 Si bug: For the case where a Core is performing an EXT_WR
@@ -147,7 +147,7 @@ void scif_rb_commit(struct scif_rb *rb)
* This way, if ordering is violated for the Interrupt Message, it will
* fall just behind the first Posted associated with the first EXT_WR.
*/
- ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset;
+ WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
#endif
}
@@ -210,7 +210,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb)
* scif_rb_space(..)
*/
mb();
- ACCESS_ONCE(*rb->read_ptr) = new_offset;
+ WRITE_ONCE(*rb->read_ptr, new_offset);
#ifdef CONFIG_INTEL_MIC_CARD
/*
* X100 Si Bug: For the case where a Core is performing an EXT_WR
@@ -219,7 +219,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb)
* This way, if ordering is violated for the Interrupt Message, it will
* fall just behind the first Posted associated with the first EXT_WR.
*/
- ACCESS_ONCE(*rb->read_ptr) = new_offset;
+ WRITE_ONCE(*rb->read_ptr, new_offset);
#endif
}
diff --git a/drivers/misc/mic/scif/scif_rma_list.c b/drivers/misc/mic/scif/scif_rma_list.c
index e1ef8daedd5a..a036dbb4101e 100644
--- a/drivers/misc/mic/scif/scif_rma_list.c
+++ b/drivers/misc/mic/scif/scif_rma_list.c
@@ -277,7 +277,7 @@ retry:
* Need to restart list traversal if there has been
* an asynchronous list entry deletion.
*/
- if (ACCESS_ONCE(ep->rma_info.async_list_del))
+ if (READ_ONCE(ep->rma_info.async_list_del))
goto retry;
}
mutex_unlock(&ep->rma_info.rma_lock);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 2ad7b5c69156..ea80ff4cd7f9 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -28,6 +28,7 @@
#include <linux/hdreg.h>
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
+#include <linux/cdev.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/string_helpers.h>
@@ -86,6 +87,7 @@ static int max_devices;
#define MAX_DEVICES 256
static DEFINE_IDA(mmc_blk_ida);
+static DEFINE_IDA(mmc_rpmb_ida);
/*
* There is one mmc_blk_data per slot.
@@ -96,6 +98,7 @@ struct mmc_blk_data {
struct gendisk *disk;
struct mmc_queue queue;
struct list_head part;
+ struct list_head rpmbs;
unsigned int flags;
#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
@@ -121,6 +124,32 @@ struct mmc_blk_data {
int area_type;
};
+/* Device type for RPMB character devices */
+static dev_t mmc_rpmb_devt;
+
+/* Bus type for RPMB character devices */
+static struct bus_type mmc_rpmb_bus_type = {
+ .name = "mmc_rpmb",
+};
+
+/**
+ * struct mmc_rpmb_data - special RPMB device type for these areas
+ * @dev: the device for the RPMB area
+ * @chrdev: character device for the RPMB area
+ * @id: unique device ID number
+ * @part_index: partition index (0 on first)
+ * @md: parent MMC block device
+ * @node: list item, so we can put this device on a list
+ */
+struct mmc_rpmb_data {
+ struct device dev;
+ struct cdev chrdev;
+ int id;
+ unsigned int part_index;
+ struct mmc_blk_data *md;
+ struct list_head node;
+};
+
static DEFINE_MUTEX(open_lock);
module_param(perdev_minors, int, 0444);
@@ -299,6 +328,7 @@ struct mmc_blk_ioc_data {
struct mmc_ioc_cmd ic;
unsigned char *buf;
u64 buf_bytes;
+ struct mmc_rpmb_data *rpmb;
};
static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
@@ -437,14 +467,25 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_request mrq = {};
struct scatterlist sg;
int err;
- bool is_rpmb = false;
+ unsigned int target_part;
u32 status = 0;
if (!card || !md || !idata)
return -EINVAL;
- if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
- is_rpmb = true;
+ /*
+ * The RPMB accesses comes in from the character device, so we
+ * need to target these explicitly. Else we just target the
+ * partition type for the block device the ioctl() was issued
+ * on.
+ */
+ if (idata->rpmb) {
+ /* Support multiple RPMB partitions */
+ target_part = idata->rpmb->part_index;
+ target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB;
+ } else {
+ target_part = md->part_type;
+ }
cmd.opcode = idata->ic.opcode;
cmd.arg = idata->ic.arg;
@@ -488,7 +529,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
mrq.cmd = &cmd;
- err = mmc_blk_part_switch(card, md->part_type);
+ err = mmc_blk_part_switch(card, target_part);
if (err)
return err;
@@ -498,7 +539,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
return err;
}
- if (is_rpmb) {
+ if (idata->rpmb) {
err = mmc_set_blockcount(card, data.blocks,
idata->ic.write_flag & (1 << 31));
if (err)
@@ -538,7 +579,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
- if (is_rpmb) {
+ if (idata->rpmb) {
/*
* Ensure RPMB command has completed by polling CMD13
* "Send Status".
@@ -554,7 +595,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
}
static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
- struct mmc_ioc_cmd __user *ic_ptr)
+ struct mmc_ioc_cmd __user *ic_ptr,
+ struct mmc_rpmb_data *rpmb)
{
struct mmc_blk_ioc_data *idata;
struct mmc_blk_ioc_data *idatas[1];
@@ -566,6 +608,8 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
if (IS_ERR(idata))
return PTR_ERR(idata);
+ /* This will be NULL on non-RPMB ioctl():s */
+ idata->rpmb = rpmb;
card = md->queue.card;
if (IS_ERR(card)) {
@@ -581,7 +625,8 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
__GFP_RECLAIM);
idatas[0] = idata;
- req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op =
+ rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idatas;
req_to_mmc_queue_req(req)->ioc_count = 1;
blk_execute_rq(mq->queue, NULL, req, 0);
@@ -596,7 +641,8 @@ cmd_done:
}
static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
- struct mmc_ioc_multi_cmd __user *user)
+ struct mmc_ioc_multi_cmd __user *user,
+ struct mmc_rpmb_data *rpmb)
{
struct mmc_blk_ioc_data **idata = NULL;
struct mmc_ioc_cmd __user *cmds = user->cmds;
@@ -627,6 +673,8 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
num_of_cmds = i;
goto cmd_err;
}
+ /* This will be NULL on non-RPMB ioctl():s */
+ idata[i]->rpmb = rpmb;
}
card = md->queue.card;
@@ -643,7 +691,8 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
req = blk_get_request(mq->queue,
idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
__GFP_RECLAIM);
- req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op =
+ rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idata;
req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
blk_execute_rq(mq->queue, NULL, req, 0);
@@ -691,7 +740,8 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
if (!md)
return -EINVAL;
ret = mmc_blk_ioctl_cmd(md,
- (struct mmc_ioc_cmd __user *)arg);
+ (struct mmc_ioc_cmd __user *)arg,
+ NULL);
mmc_blk_put(md);
return ret;
case MMC_IOC_MULTI_CMD:
@@ -702,7 +752,8 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
if (!md)
return -EINVAL;
ret = mmc_blk_ioctl_multi_cmd(md,
- (struct mmc_ioc_multi_cmd __user *)arg);
+ (struct mmc_ioc_multi_cmd __user *)arg,
+ NULL);
mmc_blk_put(md);
return ret;
default:
@@ -1152,18 +1203,6 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
md->reset_done &= ~type;
}
-int mmc_access_rpmb(struct mmc_queue *mq)
-{
- struct mmc_blk_data *md = mq->blkdata;
- /*
- * If this is a RPMB partition access, return ture
- */
- if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
- return true;
-
- return false;
-}
-
/*
* The non-block commands come back from the block layer after it queued it and
* processed it with all other requests and then they get issued in this
@@ -1174,17 +1213,19 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
struct mmc_queue_req *mq_rq;
struct mmc_card *card = mq->card;
struct mmc_blk_data *md = mq->blkdata;
- struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
struct mmc_blk_ioc_data **idata;
+ bool rpmb_ioctl;
u8 **ext_csd;
u32 status;
int ret;
int i;
mq_rq = req_to_mmc_queue_req(req);
+ rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB);
switch (mq_rq->drv_op) {
case MMC_DRV_OP_IOCTL:
+ case MMC_DRV_OP_IOCTL_RPMB:
idata = mq_rq->drv_op_data;
for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
ret = __mmc_blk_ioctl_cmd(card, md, idata[i]);
@@ -1192,8 +1233,8 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
break;
}
/* Always switch back to main area after RPMB access */
- if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
- mmc_blk_part_switch(card, main_md->part_type);
+ if (rpmb_ioctl)
+ mmc_blk_part_switch(card, 0);
break;
case MMC_DRV_OP_BOOT_WP:
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
@@ -1534,25 +1575,27 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
}
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
- int disable_multi, bool *do_rel_wr,
- bool *do_data_tag)
+ int disable_multi, bool *do_rel_wr_p,
+ bool *do_data_tag_p)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request *brq = &mqrq->brq;
struct request *req = mmc_queue_req_to_req(mqrq);
+ bool do_rel_wr, do_data_tag;
/*
* Reliable writes are used to implement Forced Unit Access and
* are supported only on MMCs.
*/
- *do_rel_wr = (req->cmd_flags & REQ_FUA) &&
- rq_data_dir(req) == WRITE &&
- (md->flags & MMC_BLK_REL_WR);
+ do_rel_wr = (req->cmd_flags & REQ_FUA) &&
+ rq_data_dir(req) == WRITE &&
+ (md->flags & MMC_BLK_REL_WR);
memset(brq, 0, sizeof(struct mmc_blk_request));
brq->mrq.data = &brq->data;
+ brq->mrq.tag = req->tag;
brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->stop.arg = 0;
@@ -1567,6 +1610,14 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blksz = 512;
brq->data.blocks = blk_rq_sectors(req);
+ brq->data.blk_addr = blk_rq_pos(req);
+
+ /*
+ * The command queue supports 2 priorities: "high" (1) and "simple" (0).
+ * The eMMC will give "high" priority tasks priority over "simple"
+ * priority tasks. Here we always set "simple" priority by not setting
+ * MMC_DATA_PRIO.
+ */
/*
* The block layer doesn't support all sector count
@@ -1596,18 +1647,23 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blocks);
}
- if (*do_rel_wr)
+ if (do_rel_wr) {
mmc_apply_rel_rw(brq, card, req);
+ brq->data.flags |= MMC_DATA_REL_WR;
+ }
/*
* Data tag is used only during writing meta data to speed
* up write and any subsequent read of this meta data
*/
- *do_data_tag = card->ext_csd.data_tag_unit_size &&
- (req->cmd_flags & REQ_META) &&
- (rq_data_dir(req) == WRITE) &&
- ((brq->data.blocks * brq->data.blksz) >=
- card->ext_csd.data_tag_unit_size);
+ do_data_tag = card->ext_csd.data_tag_unit_size &&
+ (req->cmd_flags & REQ_META) &&
+ (rq_data_dir(req) == WRITE) &&
+ ((brq->data.blocks * brq->data.blksz) >=
+ card->ext_csd.data_tag_unit_size);
+
+ if (do_data_tag)
+ brq->data.flags |= MMC_DATA_DAT_TAG;
mmc_set_data_timeout(&brq->data, card);
@@ -1634,6 +1690,12 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
}
mqrq->areq.mrq = &brq->mrq;
+
+ if (do_rel_wr_p)
+ *do_rel_wr_p = do_rel_wr;
+
+ if (do_data_tag_p)
+ *do_data_tag_p = do_data_tag;
}
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
@@ -1948,7 +2010,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
if (req && !mq->qcnt)
/* claim host only for the first request */
- mmc_get_card(card);
+ mmc_get_card(card, NULL);
ret = mmc_blk_part_switch(card, md->part_type);
if (ret) {
@@ -2011,7 +2073,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
out:
if (!mq->qcnt)
- mmc_put_card(card);
+ mmc_put_card(card, NULL);
}
static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -2068,6 +2130,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
spin_lock_init(&md->lock);
INIT_LIST_HEAD(&md->part);
+ INIT_LIST_HEAD(&md->rpmbs);
md->usage = 1;
ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
@@ -2186,6 +2249,158 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
return 0;
}
+/**
+ * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev
+ * @filp: the character device file
+ * @cmd: the ioctl() command
+ * @arg: the argument from userspace
+ *
+ * This will essentially just redirect the ioctl()s coming in over to
+ * the main block device spawning the RPMB character device.
+ */
+static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mmc_rpmb_data *rpmb = filp->private_data;
+ int ret;
+
+ switch (cmd) {
+ case MMC_IOC_CMD:
+ ret = mmc_blk_ioctl_cmd(rpmb->md,
+ (struct mmc_ioc_cmd __user *)arg,
+ rpmb);
+ break;
+ case MMC_IOC_MULTI_CMD:
+ ret = mmc_blk_ioctl_multi_cmd(rpmb->md,
+ (struct mmc_ioc_multi_cmd __user *)arg,
+ rpmb);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp)
+{
+ struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
+ struct mmc_rpmb_data, chrdev);
+
+ get_device(&rpmb->dev);
+ filp->private_data = rpmb;
+ mmc_blk_get(rpmb->md->disk);
+
+ return nonseekable_open(inode, filp);
+}
+
+static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
+{
+ struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
+ struct mmc_rpmb_data, chrdev);
+
+ put_device(&rpmb->dev);
+ mmc_blk_put(rpmb->md);
+
+ return 0;
+}
+
+static const struct file_operations mmc_rpmb_fileops = {
+ .release = mmc_rpmb_chrdev_release,
+ .open = mmc_rpmb_chrdev_open,
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = mmc_rpmb_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mmc_rpmb_ioctl_compat,
+#endif
+};
+
+static void mmc_blk_rpmb_device_release(struct device *dev)
+{
+ struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
+
+ ida_simple_remove(&mmc_rpmb_ida, rpmb->id);
+ kfree(rpmb);
+}
+
+static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
+ struct mmc_blk_data *md,
+ unsigned int part_index,
+ sector_t size,
+ const char *subname)
+{
+ int devidx, ret;
+ char rpmb_name[DISK_NAME_LEN];
+ char cap_str[10];
+ struct mmc_rpmb_data *rpmb;
+
+ /* This creates the minor number for the RPMB char device */
+ devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL);
+ if (devidx < 0)
+ return devidx;
+
+ rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL);
+ if (!rpmb) {
+ ida_simple_remove(&mmc_rpmb_ida, devidx);
+ return -ENOMEM;
+ }
+
+ snprintf(rpmb_name, sizeof(rpmb_name),
+ "mmcblk%u%s", card->host->index, subname ? subname : "");
+
+ rpmb->id = devidx;
+ rpmb->part_index = part_index;
+ rpmb->dev.init_name = rpmb_name;
+ rpmb->dev.bus = &mmc_rpmb_bus_type;
+ rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id);
+ rpmb->dev.parent = &card->dev;
+ rpmb->dev.release = mmc_blk_rpmb_device_release;
+ device_initialize(&rpmb->dev);
+ dev_set_drvdata(&rpmb->dev, rpmb);
+ rpmb->md = md;
+
+ cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops);
+ rpmb->chrdev.owner = THIS_MODULE;
+ ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev);
+ if (ret) {
+ pr_err("%s: could not add character device\n", rpmb_name);
+ goto out_put_device;
+ }
+
+ list_add(&rpmb->node, &md->rpmbs);
+
+ string_get_size((u64)size, 512, STRING_UNITS_2,
+ cap_str, sizeof(cap_str));
+
+ pr_info("%s: %s %s partition %u %s, chardev (%d:%d)\n",
+ rpmb_name, mmc_card_id(card),
+ mmc_card_name(card), EXT_CSD_PART_CONFIG_ACC_RPMB, cap_str,
+ MAJOR(mmc_rpmb_devt), rpmb->id);
+
+ return 0;
+
+out_put_device:
+ put_device(&rpmb->dev);
+ return ret;
+}
+
+static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb)
+
+{
+ cdev_device_del(&rpmb->chrdev, &rpmb->dev);
+ put_device(&rpmb->dev);
+}
+
/* MMC Physical partitions consist of two boot partitions and
* up to four general purpose partitions.
* For each partition enabled in EXT_CSD a block device will be allocatedi
@@ -2194,13 +2409,26 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
{
- int idx, ret = 0;
+ int idx, ret;
if (!mmc_card_mmc(card))
return 0;
for (idx = 0; idx < card->nr_parts; idx++) {
- if (card->part[idx].size) {
+ if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) {
+ /*
+ * RPMB partitions does not provide block access, they
+ * are only accessed using ioctl():s. Thus create
+ * special RPMB block devices that do not have a
+ * backing block queue for these.
+ */
+ ret = mmc_blk_alloc_rpmb_part(card, md,
+ card->part[idx].part_cfg,
+ card->part[idx].size >> 9,
+ card->part[idx].name);
+ if (ret)
+ return ret;
+ } else if (card->part[idx].size) {
ret = mmc_blk_alloc_part(card, md,
card->part[idx].part_cfg,
card->part[idx].size >> 9,
@@ -2212,7 +2440,7 @@ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
}
}
- return ret;
+ return 0;
}
static void mmc_blk_remove_req(struct mmc_blk_data *md)
@@ -2249,7 +2477,15 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
{
struct list_head *pos, *q;
struct mmc_blk_data *part_md;
+ struct mmc_rpmb_data *rpmb;
+ /* Remove RPMB partitions */
+ list_for_each_safe(pos, q, &md->rpmbs) {
+ rpmb = list_entry(pos, struct mmc_rpmb_data, node);
+ list_del(pos);
+ mmc_blk_remove_rpmb_part(rpmb);
+ }
+ /* Remove block partitions */
list_for_each_safe(pos, q, &md->part) {
part_md = list_entry(pos, struct mmc_blk_data, part);
list_del(pos);
@@ -2568,6 +2804,17 @@ static int __init mmc_blk_init(void)
{
int res;
+ res = bus_register(&mmc_rpmb_bus_type);
+ if (res < 0) {
+ pr_err("mmcblk: could not register RPMB bus type\n");
+ return res;
+ }
+ res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb");
+ if (res < 0) {
+ pr_err("mmcblk: failed to allocate rpmb chrdev region\n");
+ goto out_bus_unreg;
+ }
+
if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
pr_info("mmcblk: using %d minors per device\n", perdev_minors);
@@ -2575,16 +2822,20 @@ static int __init mmc_blk_init(void)
res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
if (res)
- goto out;
+ goto out_chrdev_unreg;
res = mmc_register_driver(&mmc_driver);
if (res)
- goto out2;
+ goto out_blkdev_unreg;
return 0;
- out2:
+
+out_blkdev_unreg:
unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
- out:
+out_chrdev_unreg:
+ unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
+out_bus_unreg:
+ bus_unregister(&mmc_rpmb_bus_type);
return res;
}
@@ -2592,6 +2843,7 @@ static void __exit mmc_blk_exit(void)
{
mmc_unregister_driver(&mmc_driver);
unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
+ unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
}
module_init(mmc_blk_init);
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 301246513a37..a4b49e25fe96 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -369,10 +369,17 @@ int mmc_add_card(struct mmc_card *card)
*/
void mmc_remove_card(struct mmc_card *card)
{
+ struct mmc_host *host = card->host;
+
#ifdef CONFIG_DEBUG_FS
mmc_remove_card_debugfs(card);
#endif
+ if (host->cqe_enabled) {
+ host->cqe_ops->cqe_disable(host);
+ host->cqe_enabled = false;
+ }
+
if (mmc_card_present(card)) {
if (mmc_host_is_spi(card->host)) {
pr_info("%s: SPI card removed\n",
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 66c9cf49ad2f..1f0f44f4dd5f 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -266,7 +266,8 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
host->ops->request(host, mrq);
}
-static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
+static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq,
+ bool cqe)
{
if (mrq->sbc) {
pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
@@ -275,9 +276,12 @@ static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq)
}
if (mrq->cmd) {
- pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
- mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg,
- mrq->cmd->flags);
+ pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n",
+ mmc_hostname(host), cqe ? "CQE direct " : "",
+ mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
+ } else if (cqe) {
+ pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n",
+ mmc_hostname(host), mrq->tag, mrq->data->blk_addr);
}
if (mrq->data) {
@@ -333,7 +337,7 @@ static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq)
return 0;
}
-static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
+int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
@@ -342,7 +346,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
if (mmc_card_removed(host->card))
return -ENOMEDIUM;
- mmc_mrq_pr_debug(host, mrq);
+ mmc_mrq_pr_debug(host, mrq, false);
WARN_ON(!host->claimed);
@@ -355,6 +359,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
return 0;
}
+EXPORT_SYMBOL(mmc_start_request);
/*
* mmc_wait_data_done() - done callback for data request
@@ -482,6 +487,155 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq)
}
EXPORT_SYMBOL(mmc_wait_for_req_done);
+/*
+ * mmc_cqe_start_req - Start a CQE request.
+ * @host: MMC host to start the request
+ * @mrq: request to start
+ *
+ * Start the request, re-tuning if needed and it is possible. Returns an error
+ * code if the request fails to start or -EBUSY if CQE is busy.
+ */
+int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int err;
+
+ /*
+ * CQE cannot process re-tuning commands. Caller must hold retuning
+ * while CQE is in use. Re-tuning can happen here only when CQE has no
+ * active requests i.e. this is the first. Note, re-tuning will call
+ * ->cqe_off().
+ */
+ err = mmc_retune(host);
+ if (err)
+ goto out_err;
+
+ mrq->host = host;
+
+ mmc_mrq_pr_debug(host, mrq, true);
+
+ err = mmc_mrq_prep(host, mrq);
+ if (err)
+ goto out_err;
+
+ err = host->cqe_ops->cqe_request(host, mrq);
+ if (err)
+ goto out_err;
+
+ trace_mmc_request_start(host, mrq);
+
+ return 0;
+
+out_err:
+ if (mrq->cmd) {
+ pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
+ mmc_hostname(host), mrq->cmd->opcode, err);
+ } else {
+ pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
+ mmc_hostname(host), mrq->tag, err);
+ }
+ return err;
+}
+EXPORT_SYMBOL(mmc_cqe_start_req);
+
+/**
+ * mmc_cqe_request_done - CQE has finished processing an MMC request
+ * @host: MMC host which completed request
+ * @mrq: MMC request which completed
+ *
+ * CQE drivers should call this function when they have completed
+ * their processing of a request.
+ */
+void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
+{
+ mmc_should_fail_request(host, mrq);
+
+ /* Flag re-tuning needed on CRC errors */
+ if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
+ (mrq->data && mrq->data->error == -EILSEQ))
+ mmc_retune_needed(host);
+
+ trace_mmc_request_done(host, mrq);
+
+ if (mrq->cmd) {
+ pr_debug("%s: CQE req done (direct CMD%u): %d\n",
+ mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
+ } else {
+ pr_debug("%s: CQE transfer done tag %d\n",
+ mmc_hostname(host), mrq->tag);
+ }
+
+ if (mrq->data) {
+ pr_debug("%s: %d bytes transferred: %d\n",
+ mmc_hostname(host),
+ mrq->data->bytes_xfered, mrq->data->error);
+ }
+
+ mrq->done(mrq);
+}
+EXPORT_SYMBOL(mmc_cqe_request_done);
+
+/**
+ * mmc_cqe_post_req - CQE post process of a completed MMC request
+ * @host: MMC host
+ * @mrq: MMC request to be processed
+ */
+void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ if (host->cqe_ops->cqe_post_req)
+ host->cqe_ops->cqe_post_req(host, mrq);
+}
+EXPORT_SYMBOL(mmc_cqe_post_req);
+
+/* Arbitrary 1 second timeout */
+#define MMC_CQE_RECOVERY_TIMEOUT 1000
+
+/*
+ * mmc_cqe_recovery - Recover from CQE errors.
+ * @host: MMC host to recover
+ *
+ * Recovery consists of stopping CQE, stopping eMMC, discarding the queue in
+ * in eMMC, and discarding the queue in CQE. CQE must call
+ * mmc_cqe_request_done() on all requests. An error is returned if the eMMC
+ * fails to discard its queue.
+ */
+int mmc_cqe_recovery(struct mmc_host *host)
+{
+ struct mmc_command cmd;
+ int err;
+
+ mmc_retune_hold_now(host);
+
+ /*
+ * Recovery is expected seldom, if at all, but it reduces performance,
+ * so make sure it is not completely silent.
+ */
+ pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
+
+ host->cqe_ops->cqe_recovery_start(host);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = MMC_STOP_TRANSMISSION,
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC,
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
+ mmc_wait_for_cmd(host, &cmd, 0);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = MMC_CMDQ_TASK_MGMT;
+ cmd.arg = 1; /* Discard entire queue */
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
+ cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT,
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+
+ host->cqe_ops->cqe_recovery_finish(host);
+
+ mmc_retune_release(host);
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_cqe_recovery);
+
/**
* mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
* @host: MMC host
@@ -832,9 +986,36 @@ unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
}
EXPORT_SYMBOL(mmc_align_data_size);
+/*
+ * Allow claiming an already claimed host if the context is the same or there is
+ * no context but the task is the same.
+ */
+static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx,
+ struct task_struct *task)
+{
+ return host->claimer == ctx ||
+ (!ctx && task && host->claimer->task == task);
+}
+
+static inline void mmc_ctx_set_claimer(struct mmc_host *host,
+ struct mmc_ctx *ctx,
+ struct task_struct *task)
+{
+ if (!host->claimer) {
+ if (ctx)
+ host->claimer = ctx;
+ else
+ host->claimer = &host->default_ctx;
+ }
+ if (task)
+ host->claimer->task = task;
+}
+
/**
* __mmc_claim_host - exclusively claim a host
* @host: mmc host to claim
+ * @ctx: context that claims the host or NULL in which case the default
+ * context will be used
* @abort: whether or not the operation should be aborted
*
* Claim a host for a set of operations. If @abort is non null and
@@ -842,8 +1023,10 @@ EXPORT_SYMBOL(mmc_align_data_size);
* that non-zero value without acquiring the lock. Returns zero
* with the lock held otherwise.
*/
-int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
+int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
+ atomic_t *abort)
{
+ struct task_struct *task = ctx ? NULL : current;
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
int stop;
@@ -856,7 +1039,7 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
stop = abort ? atomic_read(abort) : 0;
- if (stop || !host->claimed || host->claimer == current)
+ if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task))
break;
spin_unlock_irqrestore(&host->lock, flags);
schedule();
@@ -865,7 +1048,7 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
set_current_state(TASK_RUNNING);
if (!stop) {
host->claimed = 1;
- host->claimer = current;
+ mmc_ctx_set_claimer(host, ctx, task);
host->claim_cnt += 1;
if (host->claim_cnt == 1)
pm = true;
@@ -900,6 +1083,7 @@ void mmc_release_host(struct mmc_host *host)
spin_unlock_irqrestore(&host->lock, flags);
} else {
host->claimed = 0;
+ host->claimer->task = NULL;
host->claimer = NULL;
spin_unlock_irqrestore(&host->lock, flags);
wake_up(&host->wq);
@@ -913,10 +1097,10 @@ EXPORT_SYMBOL(mmc_release_host);
* This is a helper function, which fetches a runtime pm reference for the
* card device and also claims the host.
*/
-void mmc_get_card(struct mmc_card *card)
+void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
{
pm_runtime_get_sync(&card->dev);
- mmc_claim_host(card->host);
+ __mmc_claim_host(card->host, ctx, NULL);
}
EXPORT_SYMBOL(mmc_get_card);
@@ -924,9 +1108,13 @@ EXPORT_SYMBOL(mmc_get_card);
* This is a helper function, which releases the host and drops the runtime
* pm reference for the card device.
*/
-void mmc_put_card(struct mmc_card *card)
+void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
{
- mmc_release_host(card->host);
+ struct mmc_host *host = card->host;
+
+ WARN_ON(ctx && host->claimer != ctx);
+
+ mmc_release_host(host);
pm_runtime_mark_last_busy(&card->dev);
pm_runtime_put_autosuspend(&card->dev);
}
@@ -1400,6 +1588,16 @@ EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
#endif /* CONFIG_REGULATOR */
+/**
+ * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host
+ * @mmc: the host to regulate
+ *
+ * Returns 0 or errno. errno should be handled, it is either a critical error
+ * or -EPROBE_DEFER. 0 means no critical error but it does not mean all
+ * regulators have been found because they all are optional. If you require
+ * certain regulators, you need to check separately in your driver if they got
+ * populated after calling this function.
+ */
int mmc_regulator_get_supply(struct mmc_host *mmc)
{
struct device *dev = mmc_dev(mmc);
@@ -1484,11 +1682,33 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
}
+int mmc_host_set_uhs_voltage(struct mmc_host *host)
+{
+ u32 clock;
+
+ /*
+ * During a signal voltage level switch, the clock must be gated
+ * for 5 ms according to the SD spec
+ */
+ clock = host->ios.clock;
+ host->ios.clock = 0;
+ mmc_set_ios(host);
+
+ if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
+ return -EAGAIN;
+
+ /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
+ mmc_delay(10);
+ host->ios.clock = clock;
+ mmc_set_ios(host);
+
+ return 0;
+}
+
int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
{
struct mmc_command cmd = {};
int err = 0;
- u32 clock;
/*
* If we cannot switch voltages, return failure so the caller
@@ -1520,15 +1740,8 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
err = -EAGAIN;
goto power_cycle;
}
- /*
- * During a signal voltage level switch, the clock must be gated
- * for 5 ms according to the SD spec
- */
- clock = host->ios.clock;
- host->ios.clock = 0;
- mmc_set_ios(host);
- if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) {
+ if (mmc_host_set_uhs_voltage(host)) {
/*
* Voltages may not have been switched, but we've already
* sent CMD11, so a power cycle is required anyway
@@ -1537,11 +1750,6 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
goto power_cycle;
}
- /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
- mmc_delay(10);
- host->ios.clock = clock;
- mmc_set_ios(host);
-
/* Wait for at least 1 ms according to spec */
mmc_delay(1);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index ca861091a776..71e6c6d7ceb7 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -49,6 +49,7 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr);
+int mmc_host_set_uhs_voltage(struct mmc_host *host);
int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);
void mmc_set_timing(struct mmc_host *host, unsigned int timing);
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type);
@@ -107,6 +108,8 @@ static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { }
void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq);
bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
+int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq);
+
struct mmc_async_req;
struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
@@ -128,10 +131,11 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
bool is_rel_write);
-int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
+int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
+ atomic_t *abort);
void mmc_release_host(struct mmc_host *host);
-void mmc_get_card(struct mmc_card *card);
-void mmc_put_card(struct mmc_card *card);
+void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx);
+void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx);
/**
* mmc_claim_host - exclusively claim a host
@@ -141,7 +145,11 @@ void mmc_put_card(struct mmc_card *card);
*/
static inline void mmc_claim_host(struct mmc_host *host)
{
- __mmc_claim_host(host, NULL);
+ __mmc_claim_host(host, NULL, NULL);
}
+int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq);
+void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq);
+int mmc_cqe_recovery(struct mmc_host *host);
+
#endif
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index ad88deb2e8f3..35a9e4fd1a9f 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -111,12 +111,6 @@ void mmc_retune_hold(struct mmc_host *host)
host->hold_retune += 1;
}
-void mmc_retune_hold_now(struct mmc_host *host)
-{
- host->retune_now = 0;
- host->hold_retune += 1;
-}
-
void mmc_retune_release(struct mmc_host *host)
{
if (host->hold_retune)
@@ -124,6 +118,7 @@ void mmc_retune_release(struct mmc_host *host)
else
WARN_ON(1);
}
+EXPORT_SYMBOL(mmc_retune_release);
int mmc_retune(struct mmc_host *host)
{
@@ -184,7 +179,7 @@ static void mmc_retune_timer(unsigned long data)
int mmc_of_parse(struct mmc_host *host)
{
struct device *dev = host->parent;
- u32 bus_width;
+ u32 bus_width, drv_type;
int ret;
bool cd_cap_invert, cd_gpio_invert = false;
bool ro_cap_invert, ro_gpio_invert = false;
@@ -326,6 +321,15 @@ int mmc_of_parse(struct mmc_host *host)
if (device_property_read_bool(dev, "no-mmc"))
host->caps2 |= MMC_CAP2_NO_MMC;
+ /* Must be after "non-removable" check */
+ if (device_property_read_u32(dev, "fixed-emmc-driver-type", &drv_type) == 0) {
+ if (host->caps & MMC_CAP_NONREMOVABLE)
+ host->fixed_drv_type = drv_type;
+ else
+ dev_err(host->parent,
+ "can't use fixed driver type, media is removable\n");
+ }
+
host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
if (host->dsr_req && (host->dsr & ~0xffff)) {
dev_err(host->parent,
@@ -398,6 +402,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->max_blk_size = 512;
host->max_blk_count = PAGE_SIZE / 512;
+ host->fixed_drv_type = -EINVAL;
+
return host;
}
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 77d6f60d1bf9..fb689a1065ed 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -19,12 +19,17 @@ void mmc_unregister_host_class(void);
void mmc_retune_enable(struct mmc_host *host);
void mmc_retune_disable(struct mmc_host *host);
void mmc_retune_hold(struct mmc_host *host);
-void mmc_retune_hold_now(struct mmc_host *host);
void mmc_retune_release(struct mmc_host *host);
int mmc_retune(struct mmc_host *host);
void mmc_retune_pause(struct mmc_host *host);
void mmc_retune_unpause(struct mmc_host *host);
+static inline void mmc_retune_hold_now(struct mmc_host *host)
+{
+ host->retune_now = 0;
+ host->hold_retune += 1;
+}
+
static inline void mmc_retune_recheck(struct mmc_host *host)
{
if (host->hold_retune <= 1)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 36217ad5e9b1..a552f61060d2 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -780,6 +780,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
+MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
card->ext_csd.device_life_time_est_typ_a,
@@ -838,6 +839,7 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_name.attr,
&dev_attr_oemid.attr,
&dev_attr_prv.attr,
+ &dev_attr_rev.attr,
&dev_attr_pre_eol_info.attr,
&dev_attr_life_time.attr,
&dev_attr_serial.attr,
@@ -1289,13 +1291,18 @@ out_err:
static void mmc_select_driver_type(struct mmc_card *card)
{
int card_drv_type, drive_strength, drv_type;
+ int fixed_drv_type = card->host->fixed_drv_type;
card_drv_type = card->ext_csd.raw_driver_strength |
mmc_driver_type_mask(0);
- drive_strength = mmc_select_drive_strength(card,
- card->ext_csd.hs200_max_dtr,
- card_drv_type, &drv_type);
+ if (fixed_drv_type >= 0)
+ drive_strength = card_drv_type & mmc_driver_type_mask(fixed_drv_type)
+ ? fixed_drv_type : 0;
+ else
+ drive_strength = mmc_select_drive_strength(card,
+ card->ext_csd.hs200_max_dtr,
+ card_drv_type, &drv_type);
card->drive_strength = drive_strength;
@@ -1786,12 +1793,41 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
/*
+ * Enable Command Queue if supported. Note that Packed Commands cannot
+ * be used with Command Queue.
+ */
+ card->ext_csd.cmdq_en = false;
+ if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) {
+ err = mmc_cmdq_enable(card);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warn("%s: Enabling CMDQ failed\n",
+ mmc_hostname(card->host));
+ card->ext_csd.cmdq_support = false;
+ card->ext_csd.cmdq_depth = 0;
+ err = 0;
+ }
+ }
+ /*
* In some cases (e.g. RPMB or mmc_test), the Command Queue must be
* disabled for a time, so a flag is needed to indicate to re-enable the
* Command Queue.
*/
card->reenable_cmdq = card->ext_csd.cmdq_en;
+ if (card->ext_csd.cmdq_en && !host->cqe_enabled) {
+ err = host->cqe_ops->cqe_enable(host, card);
+ if (err) {
+ pr_err("%s: Failed to enable CQE, error %d\n",
+ mmc_hostname(host), err);
+ } else {
+ host->cqe_enabled = true;
+ pr_info("%s: Command Queue Engine enabled\n",
+ mmc_hostname(host));
+ }
+ }
+
if (!oldcard)
host->card = card;
@@ -1911,14 +1947,14 @@ static void mmc_detect(struct mmc_host *host)
{
int err;
- mmc_get_card(host->card);
+ mmc_get_card(host->card, NULL);
/*
* Just check if our card has been removed.
*/
err = _mmc_detect_card_removed(host);
- mmc_put_card(host->card);
+ mmc_put_card(host->card, NULL);
if (err) {
mmc_remove(host);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 54686ca4bfb7..908e4db03535 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -977,7 +977,6 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
from_exception)
return;
- mmc_claim_host(card->host);
if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
timeout = MMC_OPS_TIMEOUT_MS;
use_busy_signal = true;
@@ -995,7 +994,7 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
pr_warn("%s: Error %d starting bkops\n",
mmc_hostname(card->host), err);
mmc_retune_release(card->host);
- goto out;
+ return;
}
/*
@@ -1007,9 +1006,8 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
mmc_card_set_doing_bkops(card);
else
mmc_retune_release(card->host);
-out:
- mmc_release_host(card->host);
}
+EXPORT_SYMBOL(mmc_start_bkops);
/*
* Flush the cache to the non-volatile storage.
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 0a4e77a5ba33..4f33d277b125 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -30,7 +30,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
{
struct mmc_queue *mq = q->queuedata;
- if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
+ if (mq && mmc_card_removed(mq->card))
return BLKPREP_KILL;
req->rq_flags |= RQF_DONTPREP;
@@ -177,6 +177,29 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
mq_rq->sg = NULL;
}
+static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u64 limit = BLK_BOUNCE_HIGH;
+
+ if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
+ limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
+ queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
+ if (mmc_can_erase(card))
+ mmc_queue_setup_discard(mq->queue, card);
+
+ blk_queue_bounce_limit(mq->queue, limit);
+ blk_queue_max_hw_sectors(mq->queue,
+ min(host->max_blk_count, host->max_req_size / 512));
+ blk_queue_max_segments(mq->queue, host->max_segs);
+ blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+
+ /* Initialize thread_sem even if it is not used */
+ sema_init(&mq->thread_sem, 1);
+}
+
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
@@ -190,12 +213,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
spinlock_t *lock, const char *subname)
{
struct mmc_host *host = card->host;
- u64 limit = BLK_BOUNCE_HIGH;
int ret = -ENOMEM;
- if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
- limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
-
mq->card = card;
mq->queue = blk_alloc_queue(GFP_KERNEL);
if (!mq->queue)
@@ -214,18 +233,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
}
blk_queue_prep_rq(mq->queue, mmc_prep_request);
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
- queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
- if (mmc_can_erase(card))
- mmc_queue_setup_discard(mq->queue, card);
- blk_queue_bounce_limit(mq->queue, limit);
- blk_queue_max_hw_sectors(mq->queue,
- min(host->max_blk_count, host->max_req_size / 512));
- blk_queue_max_segments(mq->queue, host->max_segs);
- blk_queue_max_segment_size(mq->queue, host->max_seg_size);
-
- sema_init(&mq->thread_sem, 1);
+ mmc_setup_queue(mq, card);
mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
host->index, subname ? subname : "");
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 6bfba32ffa66..547b457c4251 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -36,12 +36,14 @@ struct mmc_blk_request {
/**
* enum mmc_drv_op - enumerates the operations in the mmc_queue_req
* @MMC_DRV_OP_IOCTL: ioctl operation
+ * @MMC_DRV_OP_IOCTL_RPMB: RPMB-oriented ioctl operation
* @MMC_DRV_OP_BOOT_WP: write protect boot partitions
* @MMC_DRV_OP_GET_CARD_STATUS: get card status
* @MMC_DRV_OP_GET_EXT_CSD: get the EXT CSD from an eMMC card
*/
enum mmc_drv_op {
MMC_DRV_OP_IOCTL,
+ MMC_DRV_OP_IOCTL_RPMB,
MMC_DRV_OP_BOOT_WP,
MMC_DRV_OP_GET_CARD_STATUS,
MMC_DRV_OP_GET_EXT_CSD,
@@ -82,6 +84,4 @@ extern void mmc_queue_resume(struct mmc_queue *);
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
struct mmc_queue_req *);
-extern int mmc_access_rpmb(struct mmc_queue *);
-
#endif
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 4fd1620b732d..45bf78f32716 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -908,6 +908,18 @@ unsigned mmc_sd_get_max_clock(struct mmc_card *card)
return max_dtr;
}
+static bool mmc_sd_card_using_v18(struct mmc_card *card)
+{
+ /*
+ * According to the SD spec., the Bus Speed Mode (function group 1) bits
+ * 2 to 4 are zero if the card is initialized at 3.3V signal level. Thus
+ * they can be used to determine if the card has already switched to
+ * 1.8V signaling.
+ */
+ return card->sw_caps.sd3_bus_mode &
+ (SD_MODE_UHS_SDR50 | SD_MODE_UHS_SDR104 | SD_MODE_UHS_DDR50);
+}
+
/*
* Handle the detection and initialisation of a card.
*
@@ -921,9 +933,10 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
int err;
u32 cid[4];
u32 rocr = 0;
+ bool v18_fixup_failed = false;
WARN_ON(!host->claimed);
-
+retry:
err = mmc_sd_get_cid(host, ocr, cid, &rocr);
if (err)
return err;
@@ -989,6 +1002,36 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
if (err)
goto free_card;
+ /*
+ * If the card has not been power cycled, it may still be using 1.8V
+ * signaling. Detect that situation and try to initialize a UHS-I (1.8V)
+ * transfer mode.
+ */
+ if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) &&
+ mmc_sd_card_using_v18(card) &&
+ host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
+ /*
+ * Re-read switch information in case it has changed since
+ * oldcard was initialized.
+ */
+ if (oldcard) {
+ err = mmc_read_switch(card);
+ if (err)
+ goto free_card;
+ }
+ if (mmc_sd_card_using_v18(card)) {
+ if (mmc_host_set_uhs_voltage(host) ||
+ mmc_sd_init_uhs_card(card)) {
+ v18_fixup_failed = true;
+ mmc_power_cycle(host, ocr);
+ if (!oldcard)
+ mmc_remove_card(card);
+ goto retry;
+ }
+ goto done;
+ }
+ }
+
/* Initialization sequence for UHS-I cards */
if (rocr & SD_ROCR_S18A) {
err = mmc_sd_init_uhs_card(card);
@@ -1021,7 +1064,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
}
}
-
+done:
host->card = card;
return 0;
@@ -1056,14 +1099,14 @@ static void mmc_sd_detect(struct mmc_host *host)
{
int err;
- mmc_get_card(host->card);
+ mmc_get_card(host->card, NULL);
/*
* Just check if our card has been removed.
*/
err = _mmc_detect_card_removed(host);
- mmc_put_card(host->card);
+ mmc_put_card(host->card, NULL);
if (err) {
mmc_sd_remove(host);
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index c771843e4c15..7a2eaf8410a3 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -155,7 +155,8 @@ static int sdio_irq_thread(void *_host)
* holding of the host lock does not cover too much work
* that doesn't require that lock to be held.
*/
- ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
+ ret = __mmc_claim_host(host, NULL,
+ &host->sdio_irq_thread_abort);
if (ret)
break;
ret = process_sdio_pending_irqs(host);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 8c15637178ff..567028c9219a 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -352,6 +352,19 @@ config MMC_MESON_GX
If you have a controller with this interface, say Y here.
+config MMC_MESON_MX_SDIO
+ tristate "Amlogic Meson6/Meson8/Meson8b SD/MMC Host Controller support"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on COMMON_CLK
+ depends on HAS_DMA
+ depends on OF
+ help
+ This selects support for the SD/MMC Host Controller on
+ Amlogic Meson6, Meson8 and Meson8b SoCs.
+
+ If you have a controller with this interface, say Y or M here.
+ If unsure, say N.
+
config MMC_MOXART
tristate "MOXART SD/MMC Host Controller support"
depends on ARCH_MOXART && MMC
@@ -429,6 +442,7 @@ config MMC_SDHCI_MSM
tristate "Qualcomm SDHCI Controller Support"
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
depends on MMC_SDHCI_PLTFM
+ select MMC_SDHCI_IO_ACCESSORS
help
This selects the Secure Digital Host Controller Interface (SDHCI)
support present in Qualcomm SOCs. The controller supports
@@ -663,7 +677,7 @@ config MMC_CAVIUM_OCTEON
config MMC_CAVIUM_THUNDERX
tristate "Cavium ThunderX SD/MMC Card Interface support"
depends on PCI && 64BIT && (ARM64 || COMPILE_TEST)
- depends on GPIOLIB
+ depends on GPIO_THUNDERX
depends on OF_ADDRESS
help
This selects Cavium ThunderX SD/MMC Card Interface.
@@ -899,3 +913,15 @@ config MMC_SDHCI_XENON
This selects Marvell Xenon eMMC/SD/SDIO SDHCI.
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+
+config MMC_SDHCI_OMAP
+ tristate "TI SDHCI Controller Support"
+ depends on MMC_SDHCI_PLTFM && OF
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ support present in TI's DRA7 SOCs. The controller supports
+ SD/MMC/SDIO devices.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 7c7b29ff591a..a43cf0d5a5d3 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_MMC_VUB300) += vub300.o
obj-$(CONFIG_MMC_USHC) += ushc.o
obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o
obj-$(CONFIG_MMC_MESON_GX) += meson-gx-mmc.o
+obj-$(CONFIG_MMC_MESON_MX_SDIO) += meson-mx-sdio.o
obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o
obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o
obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o
@@ -90,6 +91,7 @@ obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o
obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
+obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 0a0ebf3a096d..e55f3932d580 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -732,11 +732,11 @@ static inline unsigned int atmci_convert_chksize(struct atmel_mci *host,
return 0;
}
-static void atmci_timeout_timer(unsigned long data)
+static void atmci_timeout_timer(struct timer_list *t)
{
struct atmel_mci *host;
- host = (struct atmel_mci *)data;
+ host = from_timer(host, t, timer);
dev_dbg(&host->pdev->dev, "software timeout\n");
@@ -1661,9 +1661,9 @@ static void atmci_command_complete(struct atmel_mci *host,
cmd->error = 0;
}
-static void atmci_detect_change(unsigned long data)
+static void atmci_detect_change(struct timer_list *t)
{
- struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data;
+ struct atmel_mci_slot *slot = from_timer(slot, t, detect_timer);
bool present;
bool present_old;
@@ -2349,8 +2349,7 @@ static int atmci_init_slot(struct atmel_mci *host,
if (gpio_is_valid(slot->detect_pin)) {
int ret;
- setup_timer(&slot->detect_timer, atmci_detect_change,
- (unsigned long)slot);
+ timer_setup(&slot->detect_timer, atmci_detect_change, 0);
ret = request_irq(gpio_to_irq(slot->detect_pin),
atmci_detect_interrupt,
@@ -2563,7 +2562,7 @@ static int atmci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
+ timer_setup(&host->timer, atmci_timeout_timer, 0);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index fbd29f00fca0..ed5cefb83768 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -967,7 +967,7 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
}
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
return ret;
/*
* Legacy Octeon firmware has no regulator entry, fall-back to
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c
index 64cda84b2302..73fd75c3c824 100644
--- a/drivers/mmc/host/dw_mmc-k3.c
+++ b/drivers/mmc/host/dw_mmc-k3.c
@@ -75,7 +75,7 @@ struct hs_timing {
u32 smpl_phase_min;
};
-struct hs_timing hs_timing_cfg[TIMING_MODE][TIMING_CFG_NUM] = {
+static struct hs_timing hs_timing_cfg[TIMING_MODE][TIMING_CFG_NUM] = {
{ /* reserved */ },
{ /* SD */
{7, 0, 15, 15,}, /* 0: LEGACY 400k */
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 4f2806720c5c..0aa39975f33b 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -817,7 +817,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host,
struct dma_slave_config cfg;
struct dma_async_tx_descriptor *desc = NULL;
struct scatterlist *sgl = host->data->sg;
- const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
+ static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
u32 sg_elems = host->data->sg_len;
u32 fifoth_val;
u32 fifo_offset = host->fifo_reg - host->regs;
@@ -1024,7 +1024,7 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
{
unsigned int blksz = data->blksz;
- const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
+ static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
u32 fifo_width = 1 << host->data_shift;
u32 blksz_depth = blksz / fifo_width, fifoth_val;
u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
@@ -1938,6 +1938,7 @@ static void dw_mci_set_drto(struct dw_mci *host)
unsigned int drto_clks;
unsigned int drto_div;
unsigned int drto_ms;
+ unsigned long irqflags;
drto_clks = mci_readl(host, TMOUT) >> 8;
drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
@@ -1949,7 +1950,11 @@ static void dw_mci_set_drto(struct dw_mci *host)
/* add a bit spare time */
drto_ms += 10;
- mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms));
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+ if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
+ mod_timer(&host->dto_timer,
+ jiffies + msecs_to_jiffies(drto_ms));
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
@@ -1970,6 +1975,18 @@ static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
return true;
}
+static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
+{
+ if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
+ return false;
+
+ /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
+ WARN_ON(del_timer_sync(&host->dto_timer));
+ clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+
+ return true;
+}
+
static void dw_mci_tasklet_func(unsigned long priv)
{
struct dw_mci *host = (struct dw_mci *)priv;
@@ -2111,8 +2128,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
/* fall through */
case STATE_DATA_BUSY:
- if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
- &host->pending_events)) {
+ if (!dw_mci_clear_pending_data_complete(host)) {
/*
* If data error interrupt comes but data over
* interrupt doesn't come within the given time.
@@ -2682,6 +2698,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
if (pending & SDMMC_INT_DATA_OVER) {
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
del_timer(&host->dto_timer);
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
@@ -2694,6 +2712,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
if (pending & SDMMC_INT_RXDR) {
@@ -2791,7 +2811,7 @@ static int dw_mci_init_slot(struct dw_mci *host)
/*if there are external regulators, get them*/
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto err_host_allocated;
if (!mmc->ocr_avail)
@@ -2971,9 +2991,9 @@ no_dma:
host->use_dma = TRANS_MODE_PIO;
}
-static void dw_mci_cmd11_timer(unsigned long arg)
+static void dw_mci_cmd11_timer(struct timer_list *t)
{
- struct dw_mci *host = (struct dw_mci *)arg;
+ struct dw_mci *host = from_timer(host, t, cmd11_timer);
if (host->state != STATE_SENDING_CMD11) {
dev_warn(host->dev, "Unexpected CMD11 timeout\n");
@@ -2985,9 +3005,9 @@ static void dw_mci_cmd11_timer(unsigned long arg)
tasklet_schedule(&host->tasklet);
}
-static void dw_mci_cto_timer(unsigned long arg)
+static void dw_mci_cto_timer(struct timer_list *t)
{
- struct dw_mci *host = (struct dw_mci *)arg;
+ struct dw_mci *host = from_timer(host, t, cto_timer);
unsigned long irqflags;
u32 pending;
@@ -3040,10 +3060,34 @@ exit:
spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
-static void dw_mci_dto_timer(unsigned long arg)
+static void dw_mci_dto_timer(struct timer_list *t)
{
- struct dw_mci *host = (struct dw_mci *)arg;
+ struct dw_mci *host = from_timer(host, t, dto_timer);
+ unsigned long irqflags;
+ u32 pending;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+ /*
+ * The DTO timer is much longer than the CTO timer, so it's even less
+ * likely that we'll these cases, but it pays to be paranoid.
+ */
+ pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+ if (pending & SDMMC_INT_DATA_OVER) {
+ /* The interrupt should fire; no need to act but we can warn */
+ dev_warn(host->dev, "Unexpected data interrupt latency\n");
+ goto exit;
+ }
+ if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
+ /* Presumably interrupt handler couldn't delete the timer */
+ dev_warn(host->dev, "DTO timeout when already completed\n");
+ goto exit;
+ }
+
+ /*
+ * Continued paranoia to make sure we're in the state we expect.
+ * This paranoia isn't really justified but it seems good to be safe.
+ */
switch (host->state) {
case STATE_SENDING_DATA:
case STATE_DATA_BUSY:
@@ -3058,8 +3102,13 @@ static void dw_mci_dto_timer(unsigned long arg)
tasklet_schedule(&host->tasklet);
break;
default:
+ dev_warn(host->dev, "Unexpected data timeout, state %d\n",
+ host->state);
break;
}
+
+exit:
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
#ifdef CONFIG_OF
@@ -3208,14 +3257,9 @@ int dw_mci_probe(struct dw_mci *host)
}
}
- setup_timer(&host->cmd11_timer,
- dw_mci_cmd11_timer, (unsigned long)host);
-
- setup_timer(&host->cto_timer,
- dw_mci_cto_timer, (unsigned long)host);
-
- setup_timer(&host->dto_timer,
- dw_mci_dto_timer, (unsigned long)host);
+ timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
+ timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
+ timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
spin_lock_init(&host->lock);
spin_lock_init(&host->irq_lock);
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 34474ad731aa..e3124f06a47e 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -74,7 +74,8 @@ struct dw_mci_dma_slave {
* @stop_abort: The command currently prepared for stoping transfer.
* @prev_blksz: The former transfer blksz record.
* @timing: Record of current ios timing.
- * @use_dma: Whether DMA channel is initialized or not.
+ * @use_dma: Which DMA channel is in use for the current transfer, zero
+ * denotes PIO mode.
* @using_dma: Whether DMA is in use for the current transfer.
* @dma_64bit_address: Whether DMA supports 64-bit address mode or not.
* @sg_dma: Bus address of DMA buffer.
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 7db8c7a8d38d..712e08d9a45e 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -586,9 +586,9 @@ poll_timeout:
return true;
}
-static void jz4740_mmc_timeout(unsigned long data)
+static void jz4740_mmc_timeout(struct timer_list *t)
{
- struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)data;
+ struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer);
if (!test_and_clear_bit(0, &host->waiting))
return;
@@ -1036,8 +1036,7 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
jz4740_mmc_reset(host);
jz4740_mmc_clock_disable(host);
- setup_timer(&host->timeout_timer, jz4740_mmc_timeout,
- (unsigned long)host);
+ timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0);
host->use_dma = true;
if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 85745ef179e2..e0862d3f65b3 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1190,7 +1190,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
/* Get regulators and the supported OCR mask */
host->vqmmc_enabled = false;
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto free_host;
ret = mmc_of_parse(mmc);
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
new file mode 100644
index 000000000000..09cb89645d06
--- /dev/null
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -0,0 +1,768 @@
+/*
+ * meson-mx-sdio.c - Meson6, Meson8 and Meson8b SDIO/MMC Host Controller
+ *
+ * Copyright (C) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ * Copyright (C) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
+
+#define MESON_MX_SDIO_ARGU 0x00
+
+#define MESON_MX_SDIO_SEND 0x04
+ #define MESON_MX_SDIO_SEND_COMMAND_INDEX_MASK GENMASK(7, 0)
+ #define MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK GENMASK(15, 8)
+ #define MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7 BIT(16)
+ #define MESON_MX_SDIO_SEND_RESP_HAS_DATA BIT(17)
+ #define MESON_MX_SDIO_SEND_RESP_CRC7_FROM_8 BIT(18)
+ #define MESON_MX_SDIO_SEND_CHECK_DAT0_BUSY BIT(19)
+ #define MESON_MX_SDIO_SEND_DATA BIT(20)
+ #define MESON_MX_SDIO_SEND_USE_INT_WINDOW BIT(21)
+ #define MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK GENMASK(31, 24)
+
+#define MESON_MX_SDIO_CONF 0x08
+ #define MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT 0
+ #define MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH 10
+ #define MESON_MX_SDIO_CONF_CMD_DISABLE_CRC BIT(10)
+ #define MESON_MX_SDIO_CONF_CMD_OUT_AT_POSITIVE_EDGE BIT(11)
+ #define MESON_MX_SDIO_CONF_CMD_ARGUMENT_BITS_MASK GENMASK(17, 12)
+ #define MESON_MX_SDIO_CONF_RESP_LATCH_AT_NEGATIVE_EDGE BIT(18)
+ #define MESON_MX_SDIO_CONF_DATA_LATCH_AT_NEGATIVE_EDGE BIT(19)
+ #define MESON_MX_SDIO_CONF_BUS_WIDTH BIT(20)
+ #define MESON_MX_SDIO_CONF_M_ENDIAN_MASK GENMASK(22, 21)
+ #define MESON_MX_SDIO_CONF_WRITE_NWR_MASK GENMASK(28, 23)
+ #define MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK GENMASK(31, 29)
+
+#define MESON_MX_SDIO_IRQS 0x0c
+ #define MESON_MX_SDIO_IRQS_STATUS_STATE_MACHINE_MASK GENMASK(3, 0)
+ #define MESON_MX_SDIO_IRQS_CMD_BUSY BIT(4)
+ #define MESON_MX_SDIO_IRQS_RESP_CRC7_OK BIT(5)
+ #define MESON_MX_SDIO_IRQS_DATA_READ_CRC16_OK BIT(6)
+ #define MESON_MX_SDIO_IRQS_DATA_WRITE_CRC16_OK BIT(7)
+ #define MESON_MX_SDIO_IRQS_IF_INT BIT(8)
+ #define MESON_MX_SDIO_IRQS_CMD_INT BIT(9)
+ #define MESON_MX_SDIO_IRQS_STATUS_INFO_MASK GENMASK(15, 12)
+ #define MESON_MX_SDIO_IRQS_TIMING_OUT_INT BIT(16)
+ #define MESON_MX_SDIO_IRQS_AMRISC_TIMING_OUT_INT_EN BIT(17)
+ #define MESON_MX_SDIO_IRQS_ARC_TIMING_OUT_INT_EN BIT(18)
+ #define MESON_MX_SDIO_IRQS_TIMING_OUT_COUNT_MASK GENMASK(31, 19)
+
+#define MESON_MX_SDIO_IRQC 0x10
+ #define MESON_MX_SDIO_IRQC_ARC_IF_INT_EN BIT(3)
+ #define MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN BIT(4)
+ #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13)
+ #define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15)
+ #define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30)
+ #define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31)
+
+#define MESON_MX_SDIO_MULT 0x14
+ #define MESON_MX_SDIO_MULT_PORT_SEL_MASK GENMASK(1, 0)
+ #define MESON_MX_SDIO_MULT_MEMORY_STICK_ENABLE BIT(2)
+ #define MESON_MX_SDIO_MULT_MEMORY_STICK_SCLK_ALWAYS BIT(3)
+ #define MESON_MX_SDIO_MULT_STREAM_ENABLE BIT(4)
+ #define MESON_MX_SDIO_MULT_STREAM_8BITS_MODE BIT(5)
+ #define MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX BIT(8)
+ #define MESON_MX_SDIO_MULT_DAT0_DAT1_SWAPPED BIT(10)
+ #define MESON_MX_SDIO_MULT_DAT1_DAT0_SWAPPED BIT(11)
+ #define MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK GENMASK(15, 12)
+
+#define MESON_MX_SDIO_ADDR 0x18
+
+#define MESON_MX_SDIO_EXT 0x1c
+ #define MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK GENMASK(29, 16)
+
+#define MESON_MX_SDIO_BOUNCE_REQ_SIZE (128 * 1024)
+#define MESON_MX_SDIO_RESPONSE_CRC16_BITS (16 - 1)
+#define MESON_MX_SDIO_MAX_SLOTS 3
+
+struct meson_mx_mmc_host {
+ struct device *controller_dev;
+
+ struct clk *parent_clk;
+ struct clk *core_clk;
+ struct clk_divider cfg_div;
+ struct clk *cfg_div_clk;
+ struct clk_fixed_factor fixed_factor;
+ struct clk *fixed_factor_clk;
+
+ void __iomem *base;
+ int irq;
+ spinlock_t irq_lock;
+
+ struct timer_list cmd_timeout;
+
+ unsigned int slot_id;
+ struct mmc_host *mmc;
+
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ int error;
+};
+
+static void meson_mx_mmc_mask_bits(struct mmc_host *mmc, char reg, u32 mask,
+ u32 val)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ u32 regval;
+
+ regval = readl(host->base + reg);
+ regval &= ~mask;
+ regval |= (val & mask);
+
+ writel(regval, host->base + reg);
+}
+
+static void meson_mx_mmc_soft_reset(struct meson_mx_mmc_host *host)
+{
+ writel(MESON_MX_SDIO_IRQC_SOFT_RESET, host->base + MESON_MX_SDIO_IRQC);
+ udelay(2);
+}
+
+static struct mmc_command *meson_mx_mmc_get_next_cmd(struct mmc_command *cmd)
+{
+ if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error)
+ return cmd->mrq->cmd;
+ else if (mmc_op_multi(cmd->opcode) &&
+ (!cmd->mrq->sbc || cmd->error || cmd->data->error))
+ return cmd->mrq->stop;
+ else
+ return NULL;
+}
+
+static void meson_mx_mmc_start_cmd(struct mmc_host *mmc,
+ struct mmc_command *cmd)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ unsigned int pack_size;
+ unsigned long irqflags, timeout;
+ u32 mult, send = 0, ext = 0;
+
+ host->cmd = cmd;
+
+ if (cmd->busy_timeout)
+ timeout = msecs_to_jiffies(cmd->busy_timeout);
+ else
+ timeout = msecs_to_jiffies(1000);
+
+ switch (mmc_resp_type(cmd)) {
+ case MMC_RSP_R1:
+ case MMC_RSP_R1B:
+ case MMC_RSP_R3:
+ /* 7 (CMD) + 32 (response) + 7 (CRC) -1 */
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK, 45);
+ break;
+ case MMC_RSP_R2:
+ /* 7 (CMD) + 120 (response) + 7 (CRC) -1 */
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK, 133);
+ send |= MESON_MX_SDIO_SEND_RESP_CRC7_FROM_8;
+ break;
+ default:
+ break;
+ }
+
+ if (!(cmd->flags & MMC_RSP_CRC))
+ send |= MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7;
+
+ if (cmd->flags & MMC_RSP_BUSY)
+ send |= MESON_MX_SDIO_SEND_CHECK_DAT0_BUSY;
+
+ if (cmd->data) {
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK,
+ (cmd->data->blocks - 1));
+
+ pack_size = cmd->data->blksz * BITS_PER_BYTE;
+ if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
+ pack_size += MESON_MX_SDIO_RESPONSE_CRC16_BITS * 4;
+ else
+ pack_size += MESON_MX_SDIO_RESPONSE_CRC16_BITS * 1;
+
+ ext |= FIELD_PREP(MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK,
+ pack_size);
+
+ if (cmd->data->flags & MMC_DATA_WRITE)
+ send |= MESON_MX_SDIO_SEND_DATA;
+ else
+ send |= MESON_MX_SDIO_SEND_RESP_HAS_DATA;
+
+ cmd->data->bytes_xfered = 0;
+ }
+
+ send |= FIELD_PREP(MESON_MX_SDIO_SEND_COMMAND_INDEX_MASK,
+ (0x40 | cmd->opcode));
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ mult = readl(host->base + MESON_MX_SDIO_MULT);
+ mult &= ~MESON_MX_SDIO_MULT_PORT_SEL_MASK;
+ mult |= FIELD_PREP(MESON_MX_SDIO_MULT_PORT_SEL_MASK, host->slot_id);
+ mult |= BIT(31);
+ writel(mult, host->base + MESON_MX_SDIO_MULT);
+
+ /* enable the CMD done interrupt */
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQC,
+ MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN,
+ MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN);
+
+ /* clear pending interrupts */
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQS,
+ MESON_MX_SDIO_IRQS_CMD_INT,
+ MESON_MX_SDIO_IRQS_CMD_INT);
+
+ writel(cmd->arg, host->base + MESON_MX_SDIO_ARGU);
+ writel(ext, host->base + MESON_MX_SDIO_EXT);
+ writel(send, host->base + MESON_MX_SDIO_SEND);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+
+ mod_timer(&host->cmd_timeout, jiffies + timeout);
+}
+
+static void meson_mx_mmc_request_done(struct meson_mx_mmc_host *host)
+{
+ struct mmc_request *mrq;
+
+ mrq = host->mrq;
+
+ host->mrq = NULL;
+ host->cmd = NULL;
+
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void meson_mx_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ unsigned short vdd = ios->vdd;
+ unsigned long clk_rate = ios->clock;
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF,
+ MESON_MX_SDIO_CONF_BUS_WIDTH, 0);
+ break;
+
+ case MMC_BUS_WIDTH_4:
+ meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF,
+ MESON_MX_SDIO_CONF_BUS_WIDTH,
+ MESON_MX_SDIO_CONF_BUS_WIDTH);
+ break;
+
+ case MMC_BUS_WIDTH_8:
+ default:
+ dev_err(mmc_dev(mmc), "unsupported bus width: %d\n",
+ ios->bus_width);
+ host->error = -EINVAL;
+ return;
+ }
+
+ host->error = clk_set_rate(host->cfg_div_clk, ios->clock);
+ if (host->error) {
+ dev_warn(mmc_dev(mmc),
+ "failed to set MMC clock to %lu: %d\n",
+ clk_rate, host->error);
+ return;
+ }
+
+ mmc->actual_clock = clk_get_rate(host->cfg_div_clk);
+
+ switch (ios->power_mode) {
+ case MMC_POWER_OFF:
+ vdd = 0;
+ /* fall-through: */
+ case MMC_POWER_UP:
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ host->error = mmc_regulator_set_ocr(mmc,
+ mmc->supply.vmmc,
+ vdd);
+ if (host->error)
+ return;
+ }
+ break;
+ }
+}
+
+static int meson_mx_mmc_map_dma(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+ int dma_len;
+ struct scatterlist *sg;
+
+ if (!data)
+ return 0;
+
+ sg = data->sg;
+ if (sg->offset & 3 || sg->length & 3) {
+ dev_err(mmc_dev(mmc),
+ "unaligned scatterlist: offset %x length %d\n",
+ sg->offset, sg->length);
+ return -EINVAL;
+ }
+
+ dma_len = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+ if (dma_len <= 0) {
+ dev_err(mmc_dev(mmc), "dma_map_sg failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ struct mmc_command *cmd = mrq->cmd;
+
+ if (!host->error)
+ host->error = meson_mx_mmc_map_dma(mmc, mrq);
+
+ if (host->error) {
+ cmd->error = host->error;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+
+ host->mrq = mrq;
+
+ if (mrq->data)
+ writel(sg_dma_address(mrq->data->sg),
+ host->base + MESON_MX_SDIO_ADDR);
+
+ if (mrq->sbc)
+ meson_mx_mmc_start_cmd(mmc, mrq->sbc);
+ else
+ meson_mx_mmc_start_cmd(mmc, mrq->cmd);
+}
+
+static int meson_mx_mmc_card_busy(struct mmc_host *mmc)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC);
+
+ return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK);
+}
+
+static void meson_mx_mmc_read_response(struct mmc_host *mmc,
+ struct mmc_command *cmd)
+{
+ struct meson_mx_mmc_host *host = mmc_priv(mmc);
+ u32 mult;
+ int i, resp[4];
+
+ mult = readl(host->base + MESON_MX_SDIO_MULT);
+ mult |= MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX;
+ mult &= ~MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK;
+ mult |= FIELD_PREP(MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK, 0);
+ writel(mult, host->base + MESON_MX_SDIO_MULT);
+
+ if (cmd->flags & MMC_RSP_136) {
+ for (i = 0; i <= 3; i++)
+ resp[3 - i] = readl(host->base + MESON_MX_SDIO_ARGU);
+ cmd->resp[0] = (resp[0] << 8) | ((resp[1] >> 24) & 0xff);
+ cmd->resp[1] = (resp[1] << 8) | ((resp[2] >> 24) & 0xff);
+ cmd->resp[2] = (resp[2] << 8) | ((resp[3] >> 24) & 0xff);
+ cmd->resp[3] = (resp[3] << 8);
+ } else if (cmd->flags & MMC_RSP_PRESENT) {
+ cmd->resp[0] = readl(host->base + MESON_MX_SDIO_ARGU);
+ }
+}
+
+static irqreturn_t meson_mx_mmc_process_cmd_irq(struct meson_mx_mmc_host *host,
+ u32 irqs, u32 send)
+{
+ struct mmc_command *cmd = host->cmd;
+
+ /*
+ * NOTE: even though it shouldn't happen we sometimes get command
+ * interrupts twice (at least this is what it looks like). Ideally
+ * we find out why this happens and warn here as soon as it occurs.
+ */
+ if (!cmd)
+ return IRQ_HANDLED;
+
+ cmd->error = 0;
+ meson_mx_mmc_read_response(host->mmc, cmd);
+
+ if (cmd->data) {
+ if (!((irqs & MESON_MX_SDIO_IRQS_DATA_READ_CRC16_OK) ||
+ (irqs & MESON_MX_SDIO_IRQS_DATA_WRITE_CRC16_OK)))
+ cmd->error = -EILSEQ;
+ } else {
+ if (!((irqs & MESON_MX_SDIO_IRQS_RESP_CRC7_OK) ||
+ (send & MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7)))
+ cmd->error = -EILSEQ;
+ }
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t meson_mx_mmc_irq(int irq, void *data)
+{
+ struct meson_mx_mmc_host *host = (void *) data;
+ u32 irqs, send;
+ unsigned long irqflags;
+ irqreturn_t ret;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ irqs = readl(host->base + MESON_MX_SDIO_IRQS);
+ send = readl(host->base + MESON_MX_SDIO_SEND);
+
+ if (irqs & MESON_MX_SDIO_IRQS_CMD_INT)
+ ret = meson_mx_mmc_process_cmd_irq(host, irqs, send);
+ else
+ ret = IRQ_HANDLED;
+
+ /* finally ACK all pending interrupts */
+ writel(irqs, host->base + MESON_MX_SDIO_IRQS);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+
+ return ret;
+}
+
+static irqreturn_t meson_mx_mmc_irq_thread(int irq, void *irq_data)
+{
+ struct meson_mx_mmc_host *host = (void *) irq_data;
+ struct mmc_command *cmd = host->cmd, *next_cmd;
+
+ if (WARN_ON(!cmd))
+ return IRQ_HANDLED;
+
+ del_timer_sync(&host->cmd_timeout);
+
+ if (cmd->data) {
+ dma_unmap_sg(mmc_dev(host->mmc), cmd->data->sg,
+ cmd->data->sg_len,
+ mmc_get_dma_dir(cmd->data));
+
+ cmd->data->bytes_xfered = cmd->data->blksz * cmd->data->blocks;
+ }
+
+ next_cmd = meson_mx_mmc_get_next_cmd(cmd);
+ if (next_cmd)
+ meson_mx_mmc_start_cmd(host->mmc, next_cmd);
+ else
+ meson_mx_mmc_request_done(host);
+
+ return IRQ_HANDLED;
+}
+
+static void meson_mx_mmc_timeout(struct timer_list *t)
+{
+ struct meson_mx_mmc_host *host = from_timer(host, t, cmd_timeout);
+ unsigned long irqflags;
+ u32 irqc;
+
+ spin_lock_irqsave(&host->irq_lock, irqflags);
+
+ /* disable the CMD interrupt */
+ irqc = readl(host->base + MESON_MX_SDIO_IRQC);
+ irqc &= ~MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN;
+ writel(irqc, host->base + MESON_MX_SDIO_IRQC);
+
+ spin_unlock_irqrestore(&host->irq_lock, irqflags);
+
+ /*
+ * skip the timeout handling if the interrupt handler already processed
+ * the command.
+ */
+ if (!host->cmd)
+ return;
+
+ dev_dbg(mmc_dev(host->mmc),
+ "Timeout on CMD%u (IRQS = 0x%08x, ARGU = 0x%08x)\n",
+ host->cmd->opcode, readl(host->base + MESON_MX_SDIO_IRQS),
+ readl(host->base + MESON_MX_SDIO_ARGU));
+
+ host->cmd->error = -ETIMEDOUT;
+
+ meson_mx_mmc_request_done(host);
+}
+
+static struct mmc_host_ops meson_mx_mmc_ops = {
+ .request = meson_mx_mmc_request,
+ .set_ios = meson_mx_mmc_set_ios,
+ .card_busy = meson_mx_mmc_card_busy,
+ .get_cd = mmc_gpio_get_cd,
+ .get_ro = mmc_gpio_get_ro,
+};
+
+static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent)
+{
+ struct device_node *slot_node;
+
+ /*
+ * TODO: the MMC core framework currently does not support
+ * controllers with multiple slots properly. So we only register
+ * the first slot for now
+ */
+ slot_node = of_find_compatible_node(parent->of_node, NULL, "mmc-slot");
+ if (!slot_node) {
+ dev_warn(parent, "no 'mmc-slot' sub-node found\n");
+ return ERR_PTR(-ENOENT);
+ }
+
+ return of_platform_device_create(slot_node, NULL, parent);
+}
+
+static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ struct device *slot_dev = mmc_dev(mmc);
+ int ret;
+
+ if (of_property_read_u32(slot_dev->of_node, "reg", &host->slot_id)) {
+ dev_err(slot_dev, "missing 'reg' property\n");
+ return -EINVAL;
+ }
+
+ if (host->slot_id >= MESON_MX_SDIO_MAX_SLOTS) {
+ dev_err(slot_dev, "invalid 'reg' property value %d\n",
+ host->slot_id);
+ return -EINVAL;
+ }
+
+ /* Get regulators and the supported OCR mask */
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret)
+ return ret;
+
+ mmc->max_req_size = MESON_MX_SDIO_BOUNCE_REQ_SIZE;
+ mmc->max_seg_size = mmc->max_req_size;
+ mmc->max_blk_count =
+ FIELD_GET(MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK,
+ 0xffffffff);
+ mmc->max_blk_size = FIELD_GET(MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK,
+ 0xffffffff);
+ mmc->max_blk_size -= (4 * MESON_MX_SDIO_RESPONSE_CRC16_BITS);
+ mmc->max_blk_size /= BITS_PER_BYTE;
+
+ /* Get the min and max supported clock rates */
+ mmc->f_min = clk_round_rate(host->cfg_div_clk, 1);
+ mmc->f_max = clk_round_rate(host->cfg_div_clk,
+ clk_get_rate(host->parent_clk));
+
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
+ mmc->ops = &meson_mx_mmc_ops;
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ return ret;
+
+ ret = mmc_add_host(mmc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int meson_mx_mmc_register_clks(struct meson_mx_mmc_host *host)
+{
+ struct clk_init_data init;
+ const char *clk_div_parent, *clk_fixed_factor_parent;
+
+ clk_fixed_factor_parent = __clk_get_name(host->parent_clk);
+ init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
+ "%s#fixed_factor",
+ dev_name(host->controller_dev));
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = 0;
+ init.parent_names = &clk_fixed_factor_parent;
+ init.num_parents = 1;
+ host->fixed_factor.div = 2;
+ host->fixed_factor.mult = 1;
+ host->fixed_factor.hw.init = &init;
+
+ host->fixed_factor_clk = devm_clk_register(host->controller_dev,
+ &host->fixed_factor.hw);
+ if (WARN_ON(IS_ERR(host->fixed_factor_clk)))
+ return PTR_ERR(host->fixed_factor_clk);
+
+ clk_div_parent = __clk_get_name(host->fixed_factor_clk);
+ init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
+ "%s#div", dev_name(host->controller_dev));
+ init.ops = &clk_divider_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &clk_div_parent;
+ init.num_parents = 1;
+ host->cfg_div.reg = host->base + MESON_MX_SDIO_CONF;
+ host->cfg_div.shift = MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT;
+ host->cfg_div.width = MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH;
+ host->cfg_div.hw.init = &init;
+ host->cfg_div.flags = CLK_DIVIDER_ALLOW_ZERO;
+
+ host->cfg_div_clk = devm_clk_register(host->controller_dev,
+ &host->cfg_div.hw);
+ if (WARN_ON(IS_ERR(host->cfg_div_clk)))
+ return PTR_ERR(host->cfg_div_clk);
+
+ return 0;
+}
+
+static int meson_mx_mmc_probe(struct platform_device *pdev)
+{
+ struct platform_device *slot_pdev;
+ struct mmc_host *mmc;
+ struct meson_mx_mmc_host *host;
+ struct resource *res;
+ int ret, irq;
+ u32 conf;
+
+ slot_pdev = meson_mx_mmc_slot_pdev(&pdev->dev);
+ if (!slot_pdev)
+ return -ENODEV;
+ else if (IS_ERR(slot_pdev))
+ return PTR_ERR(slot_pdev);
+
+ mmc = mmc_alloc_host(sizeof(*host), &slot_pdev->dev);
+ if (!mmc) {
+ ret = -ENOMEM;
+ goto error_unregister_slot_pdev;
+ }
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+ host->controller_dev = &pdev->dev;
+
+ spin_lock_init(&host->irq_lock);
+ timer_setup(&host->cmd_timeout, meson_mx_mmc_timeout, 0);
+
+ platform_set_drvdata(pdev, host);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->base = devm_ioremap_resource(host->controller_dev, res);
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
+ goto error_free_mmc;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_threaded_irq(host->controller_dev, irq,
+ meson_mx_mmc_irq,
+ meson_mx_mmc_irq_thread, IRQF_ONESHOT,
+ NULL, host);
+ if (ret)
+ goto error_free_mmc;
+
+ host->core_clk = devm_clk_get(host->controller_dev, "core");
+ if (IS_ERR(host->core_clk)) {
+ ret = PTR_ERR(host->core_clk);
+ goto error_free_mmc;
+ }
+
+ host->parent_clk = devm_clk_get(host->controller_dev, "clkin");
+ if (IS_ERR(host->parent_clk)) {
+ ret = PTR_ERR(host->parent_clk);
+ goto error_free_mmc;
+ }
+
+ ret = meson_mx_mmc_register_clks(host);
+ if (ret)
+ goto error_free_mmc;
+
+ ret = clk_prepare_enable(host->core_clk);
+ if (ret) {
+ dev_err(host->controller_dev, "Failed to enable core clock\n");
+ goto error_free_mmc;
+ }
+
+ ret = clk_prepare_enable(host->cfg_div_clk);
+ if (ret) {
+ dev_err(host->controller_dev, "Failed to enable MMC clock\n");
+ goto error_disable_core_clk;
+ }
+
+ conf = 0;
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_CMD_ARGUMENT_BITS_MASK, 39);
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_M_ENDIAN_MASK, 0x3);
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_NWR_MASK, 0x2);
+ conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK, 0x2);
+ writel(conf, host->base + MESON_MX_SDIO_CONF);
+
+ meson_mx_mmc_soft_reset(host);
+
+ ret = meson_mx_mmc_add_host(host);
+ if (ret)
+ goto error_disable_clks;
+
+ return 0;
+
+error_disable_clks:
+ clk_disable_unprepare(host->cfg_div_clk);
+error_disable_core_clk:
+ clk_disable_unprepare(host->core_clk);
+error_free_mmc:
+ mmc_free_host(mmc);
+error_unregister_slot_pdev:
+ of_platform_device_destroy(&slot_pdev->dev, NULL);
+ return ret;
+}
+
+static int meson_mx_mmc_remove(struct platform_device *pdev)
+{
+ struct meson_mx_mmc_host *host = platform_get_drvdata(pdev);
+ struct device *slot_dev = mmc_dev(host->mmc);
+
+ del_timer_sync(&host->cmd_timeout);
+
+ mmc_remove_host(host->mmc);
+
+ of_platform_device_destroy(slot_dev, NULL);
+
+ clk_disable_unprepare(host->cfg_div_clk);
+ clk_disable_unprepare(host->core_clk);
+
+ mmc_free_host(host->mmc);
+
+ return 0;
+}
+
+static const struct of_device_id meson_mx_mmc_of_match[] = {
+ { .compatible = "amlogic,meson8-sdio", },
+ { .compatible = "amlogic,meson8b-sdio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, meson_mx_mmc_of_match);
+
+static struct platform_driver meson_mx_mmc_driver = {
+ .probe = meson_mx_mmc_probe,
+ .remove = meson_mx_mmc_remove,
+ .driver = {
+ .name = "meson-mx-sdio",
+ .of_match_table = of_match_ptr(meson_mx_mmc_of_match),
+ },
+};
+
+module_platform_driver(meson_mx_mmc_driver);
+
+MODULE_DESCRIPTION("Meson6, Meson8 and Meson8b SDIO/MMC Host Driver");
+MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index f1f54a818489..e8a1bb1ae694 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1658,7 +1658,7 @@ static int mmci_probe(struct amba_device *dev,
/* Get regulators and the supported OCR mask */
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto clk_disable;
if (!mmc->ocr_avail)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 267f7ab08420..6457a7d8880f 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -67,6 +67,7 @@
#define SDC_RESP2 0x48
#define SDC_RESP3 0x4c
#define SDC_BLK_NUM 0x50
+#define SDC_ADV_CFG0 0x64
#define EMMC_IOCON 0x7c
#define SDC_ACMD_RESP 0x80
#define MSDC_DMA_SA 0x90
@@ -74,10 +75,14 @@
#define MSDC_DMA_CFG 0x9c
#define MSDC_PATCH_BIT 0xb0
#define MSDC_PATCH_BIT1 0xb4
+#define MSDC_PATCH_BIT2 0xb8
#define MSDC_PAD_TUNE 0xec
+#define MSDC_PAD_TUNE0 0xf0
#define PAD_DS_TUNE 0x188
#define PAD_CMD_TUNE 0x18c
#define EMMC50_CFG0 0x208
+#define EMMC50_CFG3 0x220
+#define SDC_FIFO_CFG 0x228
/*--------------------------------------------------------------------------*/
/* Register Mask */
@@ -95,6 +100,9 @@
#define MSDC_CFG_CKDIV (0xff << 8) /* RW */
#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */
#define MSDC_CFG_HS400_CK_MODE (0x1 << 18) /* RW */
+#define MSDC_CFG_HS400_CK_MODE_EXTRA (0x1 << 22) /* RW */
+#define MSDC_CFG_CKDIV_EXTRA (0xfff << 8) /* RW */
+#define MSDC_CFG_CKMOD_EXTRA (0x3 << 20) /* RW */
/* MSDC_IOCON mask */
#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */
@@ -183,6 +191,9 @@
#define SDC_STS_CMDBUSY (0x1 << 1) /* RW */
#define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */
+/* SDC_ADV_CFG0 mask */
+#define SDC_RX_ENHANCE_EN (0x1 << 20) /* RW */
+
/* MSDC_DMA_CTRL mask */
#define MSDC_DMA_CTRL_START (0x1 << 0) /* W */
#define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */
@@ -212,11 +223,22 @@
#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
+#define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */
+
+#define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */
+#define MSDC_PATCH_BIT2_CFGCRCSTS (0x1 << 28) /* RW */
+#define MSDC_PB2_RESPWAIT (0x3 << 2) /* RW */
+#define MSDC_PB2_RESPSTSENSEL (0x7 << 16) /* RW */
+#define MSDC_PB2_CRCSTSENSEL (0x7 << 29) /* RW */
+
#define MSDC_PAD_TUNE_DATWRDLY (0x1f << 0) /* RW */
#define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */
#define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */
#define MSDC_PAD_TUNE_CMDRRDLY (0x1f << 22) /* RW */
#define MSDC_PAD_TUNE_CLKTDLY (0x1f << 27) /* RW */
+#define MSDC_PAD_TUNE_RXDLYSEL (0x1 << 15) /* RW */
+#define MSDC_PAD_TUNE_RD_SEL (0x1 << 13) /* RW */
+#define MSDC_PAD_TUNE_CMD_SEL (0x1 << 21) /* RW */
#define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */
#define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */
@@ -228,6 +250,11 @@
#define EMMC50_CFG_CRCSTS_EDGE (0x1 << 3) /* RW */
#define EMMC50_CFG_CFCSTS_SEL (0x1 << 4) /* RW */
+#define EMMC50_CFG3_OUTS_WR (0x1f << 0) /* RW */
+
+#define SDC_FIFO_CFG_WRVALIDSEL (0x1 << 24) /* RW */
+#define SDC_FIFO_CFG_RDVALIDSEL (0x1 << 25) /* RW */
+
#define REQ_CMD_EIO (0x1 << 0)
#define REQ_CMD_TMO (0x1 << 1)
#define REQ_DAT_ERR (0x1 << 2)
@@ -290,9 +317,23 @@ struct msdc_save_para {
u32 pad_tune;
u32 patch_bit0;
u32 patch_bit1;
+ u32 patch_bit2;
u32 pad_ds_tune;
u32 pad_cmd_tune;
u32 emmc50_cfg0;
+ u32 emmc50_cfg3;
+ u32 sdc_fifo_cfg;
+};
+
+struct mtk_mmc_compatible {
+ u8 clk_div_bits;
+ bool hs400_tune; /* only used for MT8173 */
+ u32 pad_tune_reg;
+ bool async_fifo;
+ bool data_tune;
+ bool busy_check;
+ bool stop_clk_fix;
+ bool enhance_rx;
};
struct msdc_tune_para {
@@ -309,6 +350,7 @@ struct msdc_delay_phase {
struct msdc_host {
struct device *dev;
+ const struct mtk_mmc_compatible *dev_comp;
struct mmc_host *mmc; /* mmc structure */
int cmd_rsp;
@@ -334,11 +376,13 @@ struct msdc_host {
struct clk *src_clk; /* msdc source clock */
struct clk *h_clk; /* msdc h_clk */
+ struct clk *src_clk_cg; /* msdc source clock control gate */
u32 mclk; /* mmc subsystem clock frequency */
u32 src_clk_freq; /* source clock frequency */
u32 sclk; /* SD/MS bus clock frequency */
unsigned char timing;
bool vqmmc_enabled;
+ u32 latch_ck;
u32 hs400_ds_delay;
u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */
u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */
@@ -350,6 +394,59 @@ struct msdc_host {
struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */
};
+static const struct mtk_mmc_compatible mt8135_compat = {
+ .clk_div_bits = 8,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+};
+
+static const struct mtk_mmc_compatible mt8173_compat = {
+ .clk_div_bits = 8,
+ .hs400_tune = true,
+ .pad_tune_reg = MSDC_PAD_TUNE,
+ .async_fifo = false,
+ .data_tune = false,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+};
+
+static const struct mtk_mmc_compatible mt2701_compat = {
+ .clk_div_bits = 12,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
+ .busy_check = false,
+ .stop_clk_fix = false,
+ .enhance_rx = false,
+};
+
+static const struct mtk_mmc_compatible mt2712_compat = {
+ .clk_div_bits = 12,
+ .hs400_tune = false,
+ .pad_tune_reg = MSDC_PAD_TUNE0,
+ .async_fifo = true,
+ .data_tune = true,
+ .busy_check = true,
+ .stop_clk_fix = true,
+ .enhance_rx = true,
+};
+
+static const struct of_device_id msdc_of_ids[] = {
+ { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
+ { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
+ { .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat},
+ { .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat},
+ {}
+};
+MODULE_DEVICE_TABLE(of, msdc_of_ids);
+
static void sdr_set_bits(void __iomem *reg, u32 bs)
{
u32 val = readl(reg);
@@ -509,7 +606,12 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
timeout = (ns + clk_ns - 1) / clk_ns + clks;
/* in 1048576 sclk cycle unit */
timeout = (timeout + (0x1 << 20) - 1) >> 20;
- sdr_get_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD, &mode);
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_get_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD, &mode);
+ else
+ sdr_get_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD_EXTRA, &mode);
/*DDR mode will double the clk cycles for data timeout */
timeout = mode >= 2 ? timeout * 2 : timeout;
timeout = timeout > 1 ? timeout - 1 : 0;
@@ -520,6 +622,7 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
static void msdc_gate_clock(struct msdc_host *host)
{
+ clk_disable_unprepare(host->src_clk_cg);
clk_disable_unprepare(host->src_clk);
clk_disable_unprepare(host->h_clk);
}
@@ -528,6 +631,7 @@ static void msdc_ungate_clock(struct msdc_host *host)
{
clk_prepare_enable(host->h_clk);
clk_prepare_enable(host->src_clk);
+ clk_prepare_enable(host->src_clk_cg);
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
}
@@ -538,6 +642,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
u32 flags;
u32 div;
u32 sclk;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
if (!hz) {
dev_dbg(host->dev, "set mclk to 0\n");
@@ -548,7 +653,11 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
flags = readl(host->base + MSDC_INTEN);
sdr_clr_bits(host->base + MSDC_INTEN, flags);
- sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
+ else
+ sdr_clr_bits(host->base + MSDC_CFG,
+ MSDC_CFG_HS400_CK_MODE_EXTRA);
if (timing == MMC_TIMING_UHS_DDR50 ||
timing == MMC_TIMING_MMC_DDR52 ||
timing == MMC_TIMING_MMC_HS400) {
@@ -568,8 +677,12 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
if (timing == MMC_TIMING_MMC_HS400 &&
hz >= (host->src_clk_freq >> 1)) {
- sdr_set_bits(host->base + MSDC_CFG,
- MSDC_CFG_HS400_CK_MODE);
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_set_bits(host->base + MSDC_CFG,
+ MSDC_CFG_HS400_CK_MODE);
+ else
+ sdr_set_bits(host->base + MSDC_CFG,
+ MSDC_CFG_HS400_CK_MODE_EXTRA);
sclk = host->src_clk_freq >> 1;
div = 0; /* div is ignore when bit18 is set */
}
@@ -587,11 +700,31 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
sclk = (host->src_clk_freq >> 2) / div;
}
}
- sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
- (mode << 8) | div);
- sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
+ sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
+ /*
+ * As src_clk/HCLK use the same bit to gate/ungate,
+ * So if want to only gate src_clk, need gate its parent(mux).
+ */
+ if (host->src_clk_cg)
+ clk_disable_unprepare(host->src_clk_cg);
+ else
+ clk_disable_unprepare(clk_get_parent(host->src_clk));
+ if (host->dev_comp->clk_div_bits == 8)
+ sdr_set_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD | MSDC_CFG_CKDIV,
+ (mode << 8) | div);
+ else
+ sdr_set_field(host->base + MSDC_CFG,
+ MSDC_CFG_CKMOD_EXTRA | MSDC_CFG_CKDIV_EXTRA,
+ (mode << 12) | div);
+ if (host->src_clk_cg)
+ clk_prepare_enable(host->src_clk_cg);
+ else
+ clk_prepare_enable(clk_get_parent(host->src_clk));
+
while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB))
cpu_relax();
+ sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN);
host->sclk = sclk;
host->mclk = hz;
host->timing = timing;
@@ -605,15 +738,16 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
*/
if (host->sclk <= 52000000) {
writel(host->def_tune_para.iocon, host->base + MSDC_IOCON);
- writel(host->def_tune_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ writel(host->def_tune_para.pad_tune, host->base + tune_reg);
} else {
writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON);
- writel(host->saved_tune_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ writel(host->saved_tune_para.pad_tune, host->base + tune_reg);
writel(host->saved_tune_para.pad_cmd_tune,
host->base + PAD_CMD_TUNE);
}
- if (timing == MMC_TIMING_MMC_HS400)
+ if (timing == MMC_TIMING_MMC_HS400 &&
+ host->dev_comp->hs400_tune)
sdr_set_field(host->base + PAD_CMD_TUNE,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs400_cmd_int_delay);
@@ -1165,6 +1299,7 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
static void msdc_init_hw(struct msdc_host *host)
{
u32 val;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
/* Configure to MMC/SD mode, clock free running */
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN);
@@ -1180,14 +1315,53 @@ static void msdc_init_hw(struct msdc_host *host)
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
- writel(0, host->base + MSDC_PAD_TUNE);
+ writel(0, host->base + tune_reg);
writel(0, host->base + MSDC_IOCON);
sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0);
writel(0x403c0046, host->base + MSDC_PATCH_BIT);
sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1);
- writel(0xffff0089, host->base + MSDC_PATCH_BIT1);
+ writel(0xffff4089, host->base + MSDC_PATCH_BIT1);
sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL);
+ if (host->dev_comp->stop_clk_fix) {
+ sdr_set_field(host->base + MSDC_PATCH_BIT1,
+ MSDC_PATCH_BIT1_STOP_DLY, 3);
+ sdr_clr_bits(host->base + SDC_FIFO_CFG,
+ SDC_FIFO_CFG_WRVALIDSEL);
+ sdr_clr_bits(host->base + SDC_FIFO_CFG,
+ SDC_FIFO_CFG_RDVALIDSEL);
+ }
+
+ if (host->dev_comp->busy_check)
+ sdr_clr_bits(host->base + MSDC_PATCH_BIT1, (1 << 7));
+
+ if (host->dev_comp->async_fifo) {
+ sdr_set_field(host->base + MSDC_PATCH_BIT2,
+ MSDC_PB2_RESPWAIT, 3);
+ if (host->dev_comp->enhance_rx) {
+ sdr_set_bits(host->base + SDC_ADV_CFG0,
+ SDC_RX_ENHANCE_EN);
+ } else {
+ sdr_set_field(host->base + MSDC_PATCH_BIT2,
+ MSDC_PB2_RESPSTSENSEL, 2);
+ sdr_set_field(host->base + MSDC_PATCH_BIT2,
+ MSDC_PB2_CRCSTSENSEL, 2);
+ }
+ /* use async fifo, then no need tune internal delay */
+ sdr_clr_bits(host->base + MSDC_PATCH_BIT2,
+ MSDC_PATCH_BIT2_CFGRESP);
+ sdr_set_bits(host->base + MSDC_PATCH_BIT2,
+ MSDC_PATCH_BIT2_CFGCRCSTS);
+ }
+
+ if (host->dev_comp->data_tune) {
+ sdr_set_bits(host->base + tune_reg,
+ MSDC_PAD_TUNE_RD_SEL | MSDC_PAD_TUNE_CMD_SEL);
+ } else {
+ /* choose clock tune */
+ sdr_set_bits(host->base + tune_reg, MSDC_PAD_TUNE_RXDLYSEL);
+ }
+
/* Configure to enable SDIO mode.
* it's must otherwise sdio cmd5 failed
*/
@@ -1200,7 +1374,9 @@ static void msdc_init_hw(struct msdc_host *host)
sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3);
host->def_tune_para.iocon = readl(host->base + MSDC_IOCON);
- host->def_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
+ host->def_tune_para.pad_tune = readl(host->base + tune_reg);
+ host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
+ host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
dev_dbg(host->dev, "init hardware done!");
}
@@ -1343,18 +1519,19 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
struct msdc_delay_phase internal_delay_phase;
u8 final_delay, final_maxlen;
u32 internal_delay = 0;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
int cmd_err;
int i, j;
if (mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
mmc->ios.timing == MMC_TIMING_UHS_SDR104)
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY,
host->hs200_cmd_int_delay);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRDLY, i);
/*
* Using the same parameters, it may sometimes pass the test,
@@ -1373,12 +1550,13 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
}
final_rise_delay = get_best_delay(host, rise_delay);
/* if rising edge has enough margin, then do not scan falling edge */
- if (final_rise_delay.maxlen >= 12 && final_rise_delay.start < 4)
+ if (final_rise_delay.maxlen >= 12 ||
+ (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4))
goto skip_fall;
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
for (i = 0; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRDLY, i);
/*
* Using the same parameters, it may sometimes pass the test,
@@ -1403,20 +1581,20 @@ skip_fall:
final_maxlen = final_fall_delay.maxlen;
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
final_rise_delay.final_phase);
final_delay = final_rise_delay.final_phase;
} else {
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY,
final_fall_delay.final_phase);
final_delay = final_fall_delay.final_phase;
}
- if (host->hs200_cmd_int_delay)
+ if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay)
goto skip_internal;
for (i = 0; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_CMDRRDLY, i);
mmc_send_tuning(mmc, opcode, &cmd_err);
if (!cmd_err)
@@ -1424,7 +1602,7 @@ skip_fall:
}
dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay);
internal_delay_phase = get_best_delay(host, internal_delay);
- sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY,
+ sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY,
internal_delay_phase.final_phase);
skip_internal:
dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay);
@@ -1486,12 +1664,15 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
u32 rise_delay = 0, fall_delay = 0;
struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,};
u8 final_delay, final_maxlen;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
int i, ret;
+ sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL,
+ host->latch_ck);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
@@ -1506,7 +1687,7 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
for (i = 0; i < PAD_DELAY_MAX; i++) {
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY, i);
ret = mmc_send_tuning(mmc, opcode, NULL);
if (!ret)
@@ -1519,14 +1700,14 @@ skip_fall:
if (final_maxlen == final_rise_delay.maxlen) {
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY,
final_rise_delay.final_phase);
final_delay = final_rise_delay.final_phase;
} else {
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
- sdr_set_field(host->base + MSDC_PAD_TUNE,
+ sdr_set_field(host->base + tune_reg,
MSDC_PAD_TUNE_DATRRDLY,
final_fall_delay.final_phase);
final_delay = final_fall_delay.final_phase;
@@ -1540,8 +1721,10 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct msdc_host *host = mmc_priv(mmc);
int ret;
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
- if (host->hs400_mode)
+ if (host->hs400_mode &&
+ host->dev_comp->hs400_tune)
ret = hs400_tune_response(mmc, opcode);
else
ret = msdc_tune_response(mmc, opcode);
@@ -1556,7 +1739,7 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
}
host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON);
- host->saved_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
+ host->saved_tune_para.pad_tune = readl(host->base + tune_reg);
host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
return ret;
}
@@ -1567,6 +1750,11 @@ static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
host->hs400_mode = true;
writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
+ /* hs400 mode must set it to 0 */
+ sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS);
+ /* to improve read performance, set outstanding to 2 */
+ sdr_set_field(host->base + EMMC50_CFG3, EMMC50_CFG3_OUTS_WR, 2);
+
return 0;
}
@@ -1596,6 +1784,9 @@ static const struct mmc_host_ops mt_msdc_ops = {
static void msdc_of_property_parse(struct platform_device *pdev,
struct msdc_host *host)
{
+ of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck",
+ &host->latch_ck);
+
of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
&host->hs400_ds_delay);
@@ -1617,12 +1808,17 @@ static int msdc_drv_probe(struct platform_device *pdev)
struct mmc_host *mmc;
struct msdc_host *host;
struct resource *res;
+ const struct of_device_id *of_id;
int ret;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No DT found\n");
return -EINVAL;
}
+
+ of_id = of_match_node(msdc_of_ids, pdev->dev.of_node);
+ if (!of_id)
+ return -EINVAL;
/* Allocate MMC host for this device */
mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev);
if (!mmc)
@@ -1641,7 +1837,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
}
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto host_free;
host->src_clk = devm_clk_get(&pdev->dev, "source");
@@ -1656,6 +1852,11 @@ static int msdc_drv_probe(struct platform_device *pdev)
goto host_free;
}
+ /*source clock control gate is optional clock*/
+ host->src_clk_cg = devm_clk_get(&pdev->dev, "source_cg");
+ if (IS_ERR(host->src_clk_cg))
+ host->src_clk_cg = NULL;
+
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
ret = -EINVAL;
@@ -1686,11 +1887,15 @@ static int msdc_drv_probe(struct platform_device *pdev)
msdc_of_property_parse(pdev, host);
host->dev = &pdev->dev;
+ host->dev_comp = of_id->data;
host->mmc = mmc;
host->src_clk_freq = clk_get_rate(host->src_clk);
/* Set host parameters to mmc */
mmc->ops = &mt_msdc_ops;
- mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
+ if (host->dev_comp->clk_div_bits == 8)
+ mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255);
+ else
+ mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095);
mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
/* MMC core transfer sizes tunable parameters */
@@ -1788,28 +1993,38 @@ static int msdc_drv_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static void msdc_save_reg(struct msdc_host *host)
{
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
+
host->save_para.msdc_cfg = readl(host->base + MSDC_CFG);
host->save_para.iocon = readl(host->base + MSDC_IOCON);
host->save_para.sdc_cfg = readl(host->base + SDC_CFG);
- host->save_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
+ host->save_para.pad_tune = readl(host->base + tune_reg);
host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT);
host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1);
+ host->save_para.patch_bit2 = readl(host->base + MSDC_PATCH_BIT2);
host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE);
host->save_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE);
host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0);
+ host->save_para.emmc50_cfg3 = readl(host->base + EMMC50_CFG3);
+ host->save_para.sdc_fifo_cfg = readl(host->base + SDC_FIFO_CFG);
}
static void msdc_restore_reg(struct msdc_host *host)
{
+ u32 tune_reg = host->dev_comp->pad_tune_reg;
+
writel(host->save_para.msdc_cfg, host->base + MSDC_CFG);
writel(host->save_para.iocon, host->base + MSDC_IOCON);
writel(host->save_para.sdc_cfg, host->base + SDC_CFG);
- writel(host->save_para.pad_tune, host->base + MSDC_PAD_TUNE);
+ writel(host->save_para.pad_tune, host->base + tune_reg);
writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT);
writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1);
+ writel(host->save_para.patch_bit2, host->base + MSDC_PATCH_BIT2);
writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE);
writel(host->save_para.pad_cmd_tune, host->base + PAD_CMD_TUNE);
writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0);
+ writel(host->save_para.emmc50_cfg3, host->base + EMMC50_CFG3);
+ writel(host->save_para.sdc_fifo_cfg, host->base + SDC_FIFO_CFG);
}
static int msdc_runtime_suspend(struct device *dev)
@@ -1839,12 +2054,6 @@ static const struct dev_pm_ops msdc_dev_pm_ops = {
SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL)
};
-static const struct of_device_id msdc_of_ids[] = {
- { .compatible = "mediatek,mt8135-mmc", },
- {}
-};
-MODULE_DEVICE_TABLE(of, msdc_of_ids);
-
static struct platform_driver mt_msdc_driver = {
.probe = msdc_drv_probe,
.remove = msdc_drv_remove,
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 58d74b8d6c79..210247b3d11a 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -508,9 +508,9 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
return IRQ_NONE;
}
-static void mvsd_timeout_timer(unsigned long data)
+static void mvsd_timeout_timer(struct timer_list *t)
{
- struct mvsd_host *host = (struct mvsd_host *)data;
+ struct mvsd_host *host = from_timer(host, t, timer);
void __iomem *iobase = host->base;
struct mmc_request *mrq;
unsigned long flags;
@@ -776,7 +776,7 @@ static int mvsd_probe(struct platform_device *pdev)
goto out;
}
- setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host);
+ timer_setup(&host->timer, mvsd_timeout_timer, 0);
platform_set_drvdata(pdev, mmc);
ret = mmc_add_host(mmc);
if (ret)
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 1d5418e4efae..5ff8ef7223cc 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -963,10 +963,9 @@ static bool filter(struct dma_chan *chan, void *param)
return true;
}
-static void mxcmci_watchdog(unsigned long data)
+static void mxcmci_watchdog(struct timer_list *t)
{
- struct mmc_host *mmc = (struct mmc_host *)data;
- struct mxcmci_host *host = mmc_priv(mmc);
+ struct mxcmci_host *host = from_timer(host, t, watchdog);
struct mmc_request *req = host->req;
unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS);
@@ -1075,7 +1074,7 @@ static int mxcmci_probe(struct platform_device *pdev)
dat3_card_detect = true;
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto out_free;
if (!mmc->ocr_avail) {
@@ -1165,9 +1164,7 @@ static int mxcmci_probe(struct platform_device *pdev)
goto out_free_dma;
}
- init_timer(&host->watchdog);
- host->watchdog.function = &mxcmci_watchdog;
- host->watchdog.data = (unsigned long)mmc;
+ timer_setup(&host->watchdog, mxcmci_watchdog, 0);
mmc_add_host(mmc);
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index bd49f34d7654..adf32682f27a 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -625,9 +625,9 @@ static void mmc_omap_abort_command(struct work_struct *work)
}
static void
-mmc_omap_cmd_timer(unsigned long data)
+mmc_omap_cmd_timer(struct timer_list *t)
{
- struct mmc_omap_host *host = (struct mmc_omap_host *) data;
+ struct mmc_omap_host *host = from_timer(host, t, cmd_abort_timer);
unsigned long flags;
spin_lock_irqsave(&host->slot_lock, flags);
@@ -654,9 +654,9 @@ mmc_omap_sg_to_buf(struct mmc_omap_host *host)
}
static void
-mmc_omap_clk_timer(unsigned long data)
+mmc_omap_clk_timer(struct timer_list *t)
{
- struct mmc_omap_host *host = (struct mmc_omap_host *) data;
+ struct mmc_omap_host *host = from_timer(host, t, clk_timer);
mmc_omap_fclk_enable(host, 0);
}
@@ -874,9 +874,9 @@ void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed)
tasklet_hi_schedule(&slot->cover_tasklet);
}
-static void mmc_omap_cover_timer(unsigned long arg)
+static void mmc_omap_cover_timer(struct timer_list *t)
{
- struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg;
+ struct mmc_omap_slot *slot = from_timer(slot, t, cover_timer);
tasklet_schedule(&slot->cover_tasklet);
}
@@ -1264,8 +1264,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
mmc->max_seg_size = mmc->max_req_size;
if (slot->pdata->get_cover_state != NULL) {
- setup_timer(&slot->cover_timer, mmc_omap_cover_timer,
- (unsigned long)slot);
+ timer_setup(&slot->cover_timer, mmc_omap_cover_timer, 0);
tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler,
(unsigned long)slot);
}
@@ -1352,11 +1351,10 @@ static int mmc_omap_probe(struct platform_device *pdev)
INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command);
- setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer,
- (unsigned long) host);
+ timer_setup(&host->cmd_abort_timer, mmc_omap_cmd_timer, 0);
spin_lock_init(&host->clk_lock);
- setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
+ timer_setup(&host->clk_timer, mmc_omap_clk_timer, 0);
spin_lock_init(&host->dma_lock);
spin_lock_init(&host->slot_lock);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 3b5e6d11069b..071693ebfe18 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -147,10 +147,6 @@
#define OMAP_MMC_MAX_CLOCK 52000000
#define DRIVER_NAME "omap_hsmmc"
-#define VDD_1V8 1800000 /* 180000 uV */
-#define VDD_3V0 3000000 /* 300000 uV */
-#define VDD_165_195 (ffs(MMC_VDD_165_195) - 1)
-
/*
* One controller can have multiple slots, like on some omap boards using
* omap.c controller driver. Luckily this is not currently done on any known
@@ -308,8 +304,7 @@ err_set_ocr:
return ret;
}
-static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
- int vdd)
+static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on)
{
int ret;
@@ -317,17 +312,6 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
return 0;
if (power_on) {
- if (vdd <= VDD_165_195)
- ret = regulator_set_voltage(host->pbias, VDD_1V8,
- VDD_1V8);
- else
- ret = regulator_set_voltage(host->pbias, VDD_3V0,
- VDD_3V0);
- if (ret < 0) {
- dev_err(host->dev, "pbias set voltage fail\n");
- return ret;
- }
-
if (host->pbias_enabled == 0) {
ret = regulator_enable(host->pbias);
if (ret) {
@@ -350,8 +334,7 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
return 0;
}
-static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
- int vdd)
+static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on)
{
struct mmc_host *mmc = host->mmc;
int ret = 0;
@@ -363,7 +346,7 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
if (IS_ERR(mmc->supply.vmmc))
return 0;
- ret = omap_hsmmc_set_pbias(host, false, 0);
+ ret = omap_hsmmc_set_pbias(host, false);
if (ret)
return ret;
@@ -385,7 +368,7 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
if (ret)
return ret;
- ret = omap_hsmmc_set_pbias(host, true, vdd);
+ ret = omap_hsmmc_set_pbias(host, true);
if (ret)
goto err_set_voltage;
} else {
@@ -462,7 +445,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
return ret;
/* Allow an aux regulator */
@@ -1220,11 +1203,11 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
clk_disable_unprepare(host->dbclk);
/* Turn the power off */
- ret = omap_hsmmc_set_power(host, 0, 0);
+ ret = omap_hsmmc_set_power(host, 0);
/* Turn the power ON with given VDD 1.8 or 3.0v */
if (!ret)
- ret = omap_hsmmc_set_power(host, 1, vdd);
+ ret = omap_hsmmc_set_power(host, 1);
if (host->dbclk)
clk_prepare_enable(host->dbclk);
@@ -1621,10 +1604,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->power_mode != host->power_mode) {
switch (ios->power_mode) {
case MMC_POWER_OFF:
- omap_hsmmc_set_power(host, 0, 0);
+ omap_hsmmc_set_power(host, 0);
break;
case MMC_POWER_UP:
- omap_hsmmc_set_power(host, 1, ios->vdd);
+ omap_hsmmc_set_power(host, 1);
break;
case MMC_POWER_ON:
do_send_init_stream = 1;
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 8bae88a150fd..41cbe84c1d18 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -88,6 +88,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
{},
};
MODULE_DEVICE_TABLE(of, renesas_sdhi_internal_dmac_of_match);
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index df4465439e13..9ab10436e4b8 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -91,7 +91,6 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
};
static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
- { .compatible = "renesas,sdhi-shmobile" },
{ .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
@@ -107,6 +106,10 @@ static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,rcar-gen1-sdhi", .data = &of_rcar_gen1_compatible, },
+ { .compatible = "renesas,rcar-gen2-sdhi", .data = &of_rcar_gen2_compatible, },
+ { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
+ { .compatible = "renesas,sdhi-shmobile" },
{},
};
MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match);
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 41b57713b620..0848dc0f882e 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -618,29 +618,22 @@ static int sd_change_phase(struct realtek_pci_sdmmc *host,
u8 sample_point, bool rx)
{
struct rtsx_pcr *pcr = host->pcr;
- int err;
dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n",
__func__, rx ? "RX" : "TX", sample_point);
- rtsx_pci_init_cmd(pcr);
-
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
+ rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
if (rx)
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- SD_VPRX_CTL, 0x1F, sample_point);
+ rtsx_pci_write_register(pcr, SD_VPRX_CTL,
+ PHASE_SELECT_MASK, sample_point);
else
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
- SD_VPTX_CTL, 0x1F, sample_point);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
- PHASE_NOT_RESET, PHASE_NOT_RESET);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, 0);
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
-
- err = rtsx_pci_send_cmd(pcr, 100);
- if (err < 0)
- return err;
+ rtsx_pci_write_register(pcr, SD_VPTX_CTL,
+ PHASE_SELECT_MASK, sample_point);
+ rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
+ rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET,
+ PHASE_NOT_RESET);
+ rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0);
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
return 0;
}
@@ -708,10 +701,12 @@ static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host,
{
int err;
struct mmc_command cmd = {};
+ struct rtsx_pcr *pcr = host->pcr;
- err = sd_change_phase(host, sample_point, true);
- if (err < 0)
- return err;
+ sd_change_phase(host, sample_point, true);
+
+ rtsx_pci_write_register(pcr, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN,
+ SD_RSP_80CLK_TIMEOUT_EN);
cmd.opcode = opcode;
err = sd_read_data(host, &cmd, 0x40, NULL, 0, 100);
@@ -719,9 +714,12 @@ static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host,
/* Wait till SD DATA IDLE */
sd_wait_data_idle(host);
sd_clear_error(host);
+ rtsx_pci_write_register(pcr, SD_CFG3,
+ SD_RSP_80CLK_TIMEOUT_EN, 0);
return err;
}
+ rtsx_pci_write_register(pcr, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
return 0;
}
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 08ae0ff13513..b988997a1e80 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -73,6 +73,7 @@ struct sdhci_acpi_slot {
unsigned int caps2;
mmc_pm_flag_t pm_caps;
unsigned int flags;
+ size_t priv_size;
int (*probe_slot)(struct platform_device *, const char *, const char *);
int (*remove_slot)(struct platform_device *);
};
@@ -82,13 +83,118 @@ struct sdhci_acpi_host {
const struct sdhci_acpi_slot *slot;
struct platform_device *pdev;
bool use_runtime_pm;
+ unsigned long private[0] ____cacheline_aligned;
};
+static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c)
+{
+ return (void *)c->private;
+}
+
static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
{
return c->slot && (c->slot->flags & flag);
}
+enum {
+ INTEL_DSM_FNS = 0,
+ INTEL_DSM_V18_SWITCH = 3,
+ INTEL_DSM_V33_SWITCH = 4,
+};
+
+struct intel_host {
+ u32 dsm_fns;
+};
+
+static const guid_t intel_dsm_guid =
+ GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F,
+ 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61);
+
+static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
+ unsigned int fn, u32 *result)
+{
+ union acpi_object *obj;
+ int err = 0;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
+ if (!obj)
+ return -EOPNOTSUPP;
+
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ *result = obj->integer.value;
+ } else if (obj->type == ACPI_TYPE_BUFFER && obj->buffer.length > 0) {
+ size_t len = min_t(size_t, obj->buffer.length, 4);
+
+ *result = 0;
+ memcpy(result, obj->buffer.pointer, len);
+ } else {
+ dev_err(dev, "%s DSM fn %u obj->type %d obj->buffer.length %d\n",
+ __func__, fn, obj->type, obj->buffer.length);
+ err = -EINVAL;
+ }
+
+ ACPI_FREE(obj);
+
+ return err;
+}
+
+static int intel_dsm(struct intel_host *intel_host, struct device *dev,
+ unsigned int fn, u32 *result)
+{
+ if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
+ return -EOPNOTSUPP;
+
+ return __intel_dsm(intel_host, dev, fn, result);
+}
+
+static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
+ struct mmc_host *mmc)
+{
+ int err;
+
+ err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
+ if (err) {
+ pr_debug("%s: DSM not supported, error %d\n",
+ mmc_hostname(mmc), err);
+ return;
+ }
+
+ pr_debug("%s: DSM function mask %#x\n",
+ mmc_hostname(mmc), intel_host->dsm_fns);
+}
+
+static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct device *dev = mmc_dev(mmc);
+ struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ struct intel_host *intel_host = sdhci_acpi_priv(c);
+ unsigned int fn;
+ u32 result = 0;
+ int err;
+
+ err = sdhci_start_signal_voltage_switch(mmc, ios);
+ if (err)
+ return err;
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ fn = INTEL_DSM_V33_SWITCH;
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ fn = INTEL_DSM_V18_SWITCH;
+ break;
+ default:
+ return 0;
+ }
+
+ err = intel_dsm(intel_host, dev, fn, &result);
+ pr_debug("%s: %s DSM fn %u error %d result %u\n",
+ mmc_hostname(mmc), __func__, fn, err, result);
+
+ return 0;
+}
+
static void sdhci_acpi_int_hw_reset(struct sdhci_host *host)
{
u8 reg;
@@ -269,56 +375,26 @@ out:
return ret;
}
-static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
- const char *hid, const char *uid)
+static int intel_probe_slot(struct platform_device *pdev, const char *hid,
+ const char *uid)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
- struct sdhci_host *host;
-
- if (!c || !c->host)
- return 0;
-
- host = c->host;
-
- /* Platform specific code during emmc probe slot goes here */
+ struct intel_host *intel_host = sdhci_acpi_priv(c);
+ struct sdhci_host *host = c->host;
if (hid && uid && !strcmp(hid, "80860F14") && !strcmp(uid, "1") &&
sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 &&
sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807)
host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */
- return 0;
-}
-
-static int sdhci_acpi_sdio_probe_slot(struct platform_device *pdev,
- const char *hid, const char *uid)
-{
- struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
-
- if (!c || !c->host)
- return 0;
-
- /* Platform specific code during sdio probe slot goes here */
-
- return 0;
-}
-
-static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
- const char *hid, const char *uid)
-{
- struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
- struct sdhci_host *host;
-
- if (!c || !c->host || !c->slot)
- return 0;
-
- host = c->host;
-
- /* Platform specific code during sd probe slot goes here */
-
if (hid && !strcmp(hid, "80865ACA"))
host->mmc_host_ops.get_cd = bxt_get_cd;
+ intel_dsm_init(intel_host, &pdev->dev, host->mmc);
+
+ host->mmc_host_ops.start_signal_voltage_switch =
+ intel_start_signal_voltage_switch;
+
return 0;
}
@@ -332,7 +408,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_STOP_WITH_TC |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
- .probe_slot = sdhci_acpi_emmc_probe_slot,
+ .probe_slot = intel_probe_slot,
+ .priv_size = sizeof(struct intel_host),
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
@@ -343,7 +420,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
MMC_CAP_WAIT_WHILE_BUSY,
.flags = SDHCI_ACPI_RUNTIME_PM,
.pm_caps = MMC_PM_KEEP_POWER,
- .probe_slot = sdhci_acpi_sdio_probe_slot,
+ .probe_slot = intel_probe_slot,
+ .priv_size = sizeof(struct intel_host),
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
@@ -353,7 +431,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
SDHCI_QUIRK2_STOP_WITH_TC,
.caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM,
- .probe_slot = sdhci_acpi_sd_probe_slot,
+ .probe_slot = intel_probe_slot,
+ .priv_size = sizeof(struct intel_host),
};
static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd_3v = {
@@ -429,11 +508,13 @@ static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid,
static int sdhci_acpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct sdhci_acpi_slot *slot;
struct acpi_device *device, *child;
struct sdhci_acpi_host *c;
struct sdhci_host *host;
struct resource *iomem;
resource_size_t len;
+ size_t priv_size;
const char *hid;
const char *uid;
int err;
@@ -443,7 +524,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
return -ENODEV;
hid = acpi_device_hid(device);
- uid = device->pnp.unique_id;
+ uid = acpi_device_uid(device);
+
+ slot = sdhci_acpi_get_slot(hid, uid);
/* Power on the SDHCI controller and its children */
acpi_device_fix_up_power(device);
@@ -467,13 +550,14 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev)))
return -ENOMEM;
- host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host));
+ priv_size = slot ? slot->priv_size : 0;
+ host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host) + priv_size);
if (IS_ERR(host))
return PTR_ERR(host);
c = sdhci_priv(host);
c->host = host;
- c->slot = sdhci_acpi_get_slot(hid, uid);
+ c->slot = slot;
c->pdev = pdev;
c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM);
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 56529c3d389a..0f589e26ee63 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -13,6 +13,7 @@
* GNU General Public License for more details.
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/iopoll.h>
#include <linux/module.h>
@@ -27,15 +28,14 @@
#define SDHCI_CDNS_HRS04_ACK BIT(26)
#define SDHCI_CDNS_HRS04_RD BIT(25)
#define SDHCI_CDNS_HRS04_WR BIT(24)
-#define SDHCI_CDNS_HRS04_RDATA_SHIFT 16
-#define SDHCI_CDNS_HRS04_WDATA_SHIFT 8
-#define SDHCI_CDNS_HRS04_ADDR_SHIFT 0
+#define SDHCI_CDNS_HRS04_RDATA GENMASK(23, 16)
+#define SDHCI_CDNS_HRS04_WDATA GENMASK(15, 8)
+#define SDHCI_CDNS_HRS04_ADDR GENMASK(5, 0)
#define SDHCI_CDNS_HRS06 0x18 /* eMMC control */
#define SDHCI_CDNS_HRS06_TUNE_UP BIT(15)
-#define SDHCI_CDNS_HRS06_TUNE_SHIFT 8
-#define SDHCI_CDNS_HRS06_TUNE_MASK 0x3f
-#define SDHCI_CDNS_HRS06_MODE_MASK 0x7
+#define SDHCI_CDNS_HRS06_TUNE GENMASK(13, 8)
+#define SDHCI_CDNS_HRS06_MODE GENMASK(2, 0)
#define SDHCI_CDNS_HRS06_MODE_SD 0x0
#define SDHCI_CDNS_HRS06_MODE_MMC_SDR 0x2
#define SDHCI_CDNS_HRS06_MODE_MMC_DDR 0x3
@@ -105,8 +105,8 @@ static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv,
u32 tmp;
int ret;
- tmp = (data << SDHCI_CDNS_HRS04_WDATA_SHIFT) |
- (addr << SDHCI_CDNS_HRS04_ADDR_SHIFT);
+ tmp = FIELD_PREP(SDHCI_CDNS_HRS04_WDATA, data) |
+ FIELD_PREP(SDHCI_CDNS_HRS04_ADDR, addr);
writel(tmp, reg);
tmp |= SDHCI_CDNS_HRS04_WR;
@@ -189,8 +189,8 @@ static void sdhci_cdns_set_emmc_mode(struct sdhci_cdns_priv *priv, u32 mode)
/* The speed mode for eMMC is selected by HRS06 register */
tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
- tmp &= ~SDHCI_CDNS_HRS06_MODE_MASK;
- tmp |= mode;
+ tmp &= ~SDHCI_CDNS_HRS06_MODE;
+ tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_MODE, mode);
writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS06);
}
@@ -199,7 +199,7 @@ static u32 sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv *priv)
u32 tmp;
tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
- return tmp & SDHCI_CDNS_HRS06_MODE_MASK;
+ return FIELD_GET(SDHCI_CDNS_HRS06_MODE, tmp);
}
static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
@@ -254,12 +254,12 @@ static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS06;
u32 tmp;
- if (WARN_ON(val > SDHCI_CDNS_HRS06_TUNE_MASK))
+ if (WARN_ON(!FIELD_FIT(SDHCI_CDNS_HRS06_TUNE, val)))
return -EINVAL;
tmp = readl(reg);
- tmp &= ~(SDHCI_CDNS_HRS06_TUNE_MASK << SDHCI_CDNS_HRS06_TUNE_SHIFT);
- tmp |= val << SDHCI_CDNS_HRS06_TUNE_SHIFT;
+ tmp &= ~SDHCI_CDNS_HRS06_TUNE;
+ tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_TUNE, val);
tmp |= SDHCI_CDNS_HRS06_TUNE_UP;
writel(tmp, reg);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index fc73e56eb1e2..3fb7d2eec93f 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -123,14 +123,17 @@
#define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
#define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
+
+/* Timeout value to avoid infinite waiting for pwr_irq */
+#define MSM_PWR_IRQ_TIMEOUT_MS 5000
+
struct sdhci_msm_host {
struct platform_device *pdev;
void __iomem *core_mem; /* MSM SDCC mapped address */
int pwr_irq; /* power irq */
- struct clk *clk; /* main SD/MMC bus clock */
- struct clk *pclk; /* SDHC peripheral bus clock */
struct clk *bus_clk; /* SDHC bus voter clock */
struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
+ struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */
unsigned long clk_rate;
struct mmc_host *mmc;
bool use_14lpp_dll_reset;
@@ -138,6 +141,10 @@ struct sdhci_msm_host {
bool calibration_done;
u8 saved_tuning_phase;
bool use_cdclp533;
+ u32 curr_pwr_state;
+ u32 curr_io_level;
+ wait_queue_head_t pwr_irq_wait;
+ bool pwr_irq_flag;
};
static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
@@ -164,10 +171,11 @@ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
struct mmc_ios curr_ios = host->mmc->ios;
+ struct clk *core_clk = msm_host->bulk_clks[0].clk;
int rc;
clock = msm_get_clock_rate_for_bus_mode(host, clock);
- rc = clk_set_rate(msm_host->clk, clock);
+ rc = clk_set_rate(core_clk, clock);
if (rc) {
pr_err("%s: Failed to set clock at rate %u at timing %d\n",
mmc_hostname(host->mmc), clock,
@@ -176,7 +184,7 @@ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
}
msm_host->clk_rate = clock;
pr_debug("%s: Setting clock at rate %lu at timing %d\n",
- mmc_hostname(host->mmc), clk_get_rate(msm_host->clk),
+ mmc_hostname(host->mmc), clk_get_rate(core_clk),
curr_ios.timing);
}
@@ -995,21 +1003,142 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
sdhci_msm_hs400(host, &mmc->ios);
}
-static void sdhci_msm_voltage_switch(struct sdhci_host *host)
+static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host)
+{
+ init_waitqueue_head(&msm_host->pwr_irq_wait);
+}
+
+static inline void sdhci_msm_complete_pwr_irq_wait(
+ struct sdhci_msm_host *msm_host)
+{
+ wake_up(&msm_host->pwr_irq_wait);
+}
+
+/*
+ * sdhci_msm_check_power_status API should be called when registers writes
+ * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens.
+ * To what state the register writes will change the IO lines should be passed
+ * as the argument req_type. This API will check whether the IO line's state
+ * is already the expected state and will wait for power irq only if
+ * power irq is expected to be trigerred based on the current IO line state
+ * and expected IO line state.
+ */
+static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ bool done = false;
+
+ pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
+ mmc_hostname(host->mmc), __func__, req_type,
+ msm_host->curr_pwr_state, msm_host->curr_io_level);
+
+ /*
+ * The IRQ for request type IO High/LOW will be generated when -
+ * there is a state change in 1.8V enable bit (bit 3) of
+ * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0
+ * which indicates 3.3V IO voltage. So, when MMC core layer tries
+ * to set it to 3.3V before card detection happens, the
+ * IRQ doesn't get triggered as there is no state change in this bit.
+ * The driver already handles this case by changing the IO voltage
+ * level to high as part of controller power up sequence. Hence, check
+ * for host->pwr to handle a case where IO voltage high request is
+ * issued even before controller power up.
+ */
+ if ((req_type & REQ_IO_HIGH) && !host->pwr) {
+ pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n",
+ mmc_hostname(host->mmc), req_type);
+ return;
+ }
+ if ((req_type & msm_host->curr_pwr_state) ||
+ (req_type & msm_host->curr_io_level))
+ done = true;
+ /*
+ * This is needed here to handle cases where register writes will
+ * not change the current bus state or io level of the controller.
+ * In this case, no power irq will be triggerred and we should
+ * not wait.
+ */
+ if (!done) {
+ if (!wait_event_timeout(msm_host->pwr_irq_wait,
+ msm_host->pwr_irq_flag,
+ msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
+ dev_warn(&msm_host->pdev->dev,
+ "%s: pwr_irq for req: (%d) timed out\n",
+ mmc_hostname(host->mmc), req_type);
+ }
+ pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
+ __func__, req_type);
+}
+
+static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
+ mmc_hostname(host->mmc),
+ readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS),
+ readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK),
+ readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
+}
+
+static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
u32 irq_status, irq_ack = 0;
+ int retry = 10;
+ int pwr_state = 0, io_level = 0;
+
irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
irq_status &= INT_MASK;
writel_relaxed(irq_status, msm_host->core_mem + CORE_PWRCTL_CLEAR);
- if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
+ /*
+ * There is a rare HW scenario where the first clear pulse could be
+ * lost when actual reset and clear/read of status register is
+ * happening at a time. Hence, retry for at least 10 times to make
+ * sure status register is cleared. Otherwise, this will result in
+ * a spurious power IRQ resulting in system instability.
+ */
+ while (irq_status & readl_relaxed(msm_host->core_mem +
+ CORE_PWRCTL_STATUS)) {
+ if (retry == 0) {
+ pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
+ mmc_hostname(host->mmc), irq_status);
+ sdhci_msm_dump_pwr_ctrl_regs(host);
+ WARN_ON(1);
+ break;
+ }
+ writel_relaxed(irq_status,
+ msm_host->core_mem + CORE_PWRCTL_CLEAR);
+ retry--;
+ udelay(10);
+ }
+
+ /* Handle BUS ON/OFF*/
+ if (irq_status & CORE_PWRCTL_BUS_ON) {
+ pwr_state = REQ_BUS_ON;
+ io_level = REQ_IO_HIGH;
+ irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+ }
+ if (irq_status & CORE_PWRCTL_BUS_OFF) {
+ pwr_state = REQ_BUS_OFF;
+ io_level = REQ_IO_LOW;
irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
- if (irq_status & (CORE_PWRCTL_IO_LOW | CORE_PWRCTL_IO_HIGH))
+ }
+ /* Handle IO LOW/HIGH */
+ if (irq_status & CORE_PWRCTL_IO_LOW) {
+ io_level = REQ_IO_LOW;
+ irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+ }
+ if (irq_status & CORE_PWRCTL_IO_HIGH) {
+ io_level = REQ_IO_HIGH;
irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+ }
/*
* The driver has to acknowledge the interrupt, switch voltages and
@@ -1017,13 +1146,27 @@ static void sdhci_msm_voltage_switch(struct sdhci_host *host)
* switches are handled by the sdhci core, so just report success.
*/
writel_relaxed(irq_ack, msm_host->core_mem + CORE_PWRCTL_CTL);
+
+ if (pwr_state)
+ msm_host->curr_pwr_state = pwr_state;
+ if (io_level)
+ msm_host->curr_io_level = io_level;
+
+ pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n",
+ mmc_hostname(msm_host->mmc), __func__, irq, irq_status,
+ irq_ack);
}
static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
{
struct sdhci_host *host = (struct sdhci_host *)data;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_msm_handle_pwr_irq(host, irq);
+ msm_host->pwr_irq_flag = 1;
+ sdhci_msm_complete_pwr_irq_wait(msm_host);
- sdhci_msm_voltage_switch(host);
return IRQ_HANDLED;
}
@@ -1032,8 +1175,9 @@ static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ struct clk *core_clk = msm_host->bulk_clks[0].clk;
- return clk_round_rate(msm_host->clk, ULONG_MAX);
+ return clk_round_rate(core_clk, ULONG_MAX);
}
static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
@@ -1092,6 +1236,69 @@ out:
__sdhci_msm_set_clock(host, clock);
}
+/*
+ * Platform specific register write functions. This is so that, if any
+ * register write needs to be followed up by platform specific actions,
+ * they can be added here. These functions can go to sleep when writes
+ * to certain registers are done.
+ * These functions are relying on sdhci_set_ios not using spinlock.
+ */
+static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ u32 req_type = 0;
+
+ switch (reg) {
+ case SDHCI_HOST_CONTROL2:
+ req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW :
+ REQ_IO_HIGH;
+ break;
+ case SDHCI_SOFTWARE_RESET:
+ if (host->pwr && (val & SDHCI_RESET_ALL))
+ req_type = REQ_BUS_OFF;
+ break;
+ case SDHCI_POWER_CONTROL:
+ req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
+ break;
+ }
+
+ if (req_type) {
+ msm_host->pwr_irq_flag = 0;
+ /*
+ * Since this register write may trigger a power irq, ensure
+ * all previous register writes are complete by this point.
+ */
+ mb();
+ }
+ return req_type;
+}
+
+/* This function may sleep*/
+static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg)
+{
+ u32 req_type = 0;
+
+ req_type = __sdhci_msm_check_write(host, val, reg);
+ writew_relaxed(val, host->ioaddr + reg);
+
+ if (req_type)
+ sdhci_msm_check_power_status(host, req_type);
+}
+
+/* This function may sleep*/
+static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ u32 req_type = 0;
+
+ req_type = __sdhci_msm_check_write(host, val, reg);
+
+ writeb_relaxed(val, host->ioaddr + reg);
+
+ if (req_type)
+ sdhci_msm_check_power_status(host, req_type);
+}
+
static const struct of_device_id sdhci_msm_dt_match[] = {
{ .compatible = "qcom,sdhci-msm-v4" },
{},
@@ -1106,7 +1313,8 @@ static const struct sdhci_ops sdhci_msm_ops = {
.get_max_clock = sdhci_msm_get_max_clock,
.set_bus_width = sdhci_set_bus_width,
.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
- .voltage_switch = sdhci_msm_voltage_switch,
+ .write_w = sdhci_msm_writew,
+ .write_b = sdhci_msm_writeb,
};
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
@@ -1124,6 +1332,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_msm_host *msm_host;
struct resource *core_memres;
+ struct clk *clk;
int ret;
u16 host_version, core_minor;
u32 core_version, config;
@@ -1160,24 +1369,42 @@ static int sdhci_msm_probe(struct platform_device *pdev)
}
/* Setup main peripheral bus clock */
- msm_host->pclk = devm_clk_get(&pdev->dev, "iface");
- if (IS_ERR(msm_host->pclk)) {
- ret = PTR_ERR(msm_host->pclk);
+ clk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
goto bus_clk_disable;
}
-
- ret = clk_prepare_enable(msm_host->pclk);
- if (ret)
- goto bus_clk_disable;
+ msm_host->bulk_clks[1].clk = clk;
/* Setup SDC MMC clock */
- msm_host->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(msm_host->clk)) {
- ret = PTR_ERR(msm_host->clk);
+ clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
- goto pclk_disable;
+ goto bus_clk_disable;
}
+ msm_host->bulk_clks[0].clk = clk;
+
+ /* Vote for maximum clock rate for maximum performance */
+ ret = clk_set_rate(clk, INT_MAX);
+ if (ret)
+ dev_warn(&pdev->dev, "core clock boost failed\n");
+
+ clk = devm_clk_get(&pdev->dev, "cal");
+ if (IS_ERR(clk))
+ clk = NULL;
+ msm_host->bulk_clks[2].clk = clk;
+
+ clk = devm_clk_get(&pdev->dev, "sleep");
+ if (IS_ERR(clk))
+ clk = NULL;
+ msm_host->bulk_clks[3].clk = clk;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
+ if (ret)
+ goto bus_clk_disable;
/*
* xo clock is needed for FLL feature of cm_dll.
@@ -1189,15 +1416,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
}
- /* Vote for maximum clock rate for maximum performance */
- ret = clk_set_rate(msm_host->clk, INT_MAX);
- if (ret)
- dev_warn(&pdev->dev, "core clock boost failed\n");
-
- ret = clk_prepare_enable(msm_host->clk);
- if (ret)
- goto pclk_disable;
-
core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres);
@@ -1251,6 +1469,21 @@ static int sdhci_msm_probe(struct platform_device *pdev)
CORE_VENDOR_SPEC_CAPABILITIES0);
}
+ /*
+ * Power on reset state may trigger power irq if previous status of
+ * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
+ * interrupt in GIC, any pending power irq interrupt should be
+ * acknowledged. Otherwise power irq interrupt handler would be
+ * fired prematurely.
+ */
+ sdhci_msm_handle_pwr_irq(host, 0);
+
+ /*
+ * Ensure that above writes are propogated before interrupt enablement
+ * in GIC.
+ */
+ mb();
+
/* Setup IRQ for handling power/voltage tasks with PMIC */
msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
if (msm_host->pwr_irq < 0) {
@@ -1260,6 +1493,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto clk_disable;
}
+ sdhci_msm_init_pwr_irq_wait(msm_host);
+ /* Enable pwr irq interrupts */
+ writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK);
+
ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
sdhci_msm_pwr_irq, IRQF_ONESHOT,
dev_name(&pdev->dev), host);
@@ -1290,9 +1527,8 @@ pm_runtime_disable:
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
clk_disable:
- clk_disable_unprepare(msm_host->clk);
-pclk_disable:
- clk_disable_unprepare(msm_host->pclk);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
bus_clk_disable:
if (!IS_ERR(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
@@ -1315,8 +1551,8 @@ static int sdhci_msm_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- clk_disable_unprepare(msm_host->clk);
- clk_disable_unprepare(msm_host->pclk);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
if (!IS_ERR(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
sdhci_pltfm_free(pdev);
@@ -1330,8 +1566,8 @@ static int sdhci_msm_runtime_suspend(struct device *dev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
- clk_disable_unprepare(msm_host->clk);
- clk_disable_unprepare(msm_host->pclk);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
return 0;
}
@@ -1341,21 +1577,9 @@ static int sdhci_msm_runtime_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
- int ret;
- ret = clk_prepare_enable(msm_host->clk);
- if (ret) {
- dev_err(dev, "clk_enable failed for core_clk: %d\n", ret);
- return ret;
- }
- ret = clk_prepare_enable(msm_host->pclk);
- if (ret) {
- dev_err(dev, "clk_enable failed for iface_clk: %d\n", ret);
- clk_disable_unprepare(msm_host->clk);
- return ret;
- }
-
- return 0;
+ return clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
+ msm_host->bulk_clks);
}
#endif
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 4e47ed6bc716..682c573e20a7 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -114,7 +114,8 @@ static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_set_power_noreg(host, mode, vdd);
}
-void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
+static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int timing)
{
if (timing == MMC_TIMING_MMC_DDR52)
sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index d96a057a7db8..1f424374bbbb 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -458,6 +458,33 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
return clock / 256 / 16;
}
+static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
+{
+ u32 val;
+ ktime_t timeout;
+
+ val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
+
+ if (enable)
+ val |= ESDHC_CLOCK_SDCLKEN;
+ else
+ val &= ~ESDHC_CLOCK_SDCLKEN;
+
+ sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
+
+ /* Wait max 20 ms */
+ timeout = ktime_add_ms(ktime_get(), 20);
+ val = ESDHC_CLOCK_STABLE;
+ while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) {
+ if (ktime_after(ktime_get(), timeout)) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ break;
+ }
+ udelay(10);
+ }
+}
+
static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -469,8 +496,10 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
host->mmc->actual_clock = 0;
- if (clock == 0)
+ if (clock == 0) {
+ esdhc_clock_enable(host, false);
return;
+ }
/* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
if (esdhc->vendor_ver < VENDOR_V_23)
@@ -558,33 +587,6 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
sdhci_writel(host, ctrl, ESDHC_PROCTL);
}
-static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
-{
- u32 val;
- ktime_t timeout;
-
- val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
-
- if (enable)
- val |= ESDHC_CLOCK_SDCLKEN;
- else
- val &= ~ESDHC_CLOCK_SDCLKEN;
-
- sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
-
- /* Wait max 20 ms */
- timeout = ktime_add_ms(ktime_get(), 20);
- val = ESDHC_CLOCK_STABLE;
- while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) {
- if (ktime_after(ktime_get(), timeout)) {
- pr_err("%s: Internal clock never stabilised.\n",
- mmc_hostname(host->mmc));
- break;
- }
- udelay(10);
- }
-}
-
static void esdhc_reset(struct sdhci_host *host, u8 mask)
{
sdhci_reset(host, mask);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
new file mode 100644
index 000000000000..628bfe9a3d17
--- /dev/null
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -0,0 +1,607 @@
+/**
+ * SDHCI Controller driver for TI's OMAP SoCs
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+#include "sdhci-pltfm.h"
+
+#define SDHCI_OMAP_CON 0x12c
+#define CON_DW8 BIT(5)
+#define CON_DMA_MASTER BIT(20)
+#define CON_INIT BIT(1)
+#define CON_OD BIT(0)
+
+#define SDHCI_OMAP_CMD 0x20c
+
+#define SDHCI_OMAP_HCTL 0x228
+#define HCTL_SDBP BIT(8)
+#define HCTL_SDVS_SHIFT 9
+#define HCTL_SDVS_MASK (0x7 << HCTL_SDVS_SHIFT)
+#define HCTL_SDVS_33 (0x7 << HCTL_SDVS_SHIFT)
+#define HCTL_SDVS_30 (0x6 << HCTL_SDVS_SHIFT)
+#define HCTL_SDVS_18 (0x5 << HCTL_SDVS_SHIFT)
+
+#define SDHCI_OMAP_SYSCTL 0x22c
+#define SYSCTL_CEN BIT(2)
+#define SYSCTL_CLKD_SHIFT 6
+#define SYSCTL_CLKD_MASK 0x3ff
+
+#define SDHCI_OMAP_STAT 0x230
+
+#define SDHCI_OMAP_IE 0x234
+#define INT_CC_EN BIT(0)
+
+#define SDHCI_OMAP_AC12 0x23c
+#define AC12_V1V8_SIGEN BIT(19)
+
+#define SDHCI_OMAP_CAPA 0x240
+#define CAPA_VS33 BIT(24)
+#define CAPA_VS30 BIT(25)
+#define CAPA_VS18 BIT(26)
+
+#define SDHCI_OMAP_TIMEOUT 1 /* 1 msec */
+
+#define SYSCTL_CLKD_MAX 0x3FF
+
+#define IOV_1V8 1800000 /* 180000 uV */
+#define IOV_3V0 3000000 /* 300000 uV */
+#define IOV_3V3 3300000 /* 330000 uV */
+
+struct sdhci_omap_data {
+ u32 offset;
+};
+
+struct sdhci_omap_host {
+ void __iomem *base;
+ struct device *dev;
+ struct regulator *pbias;
+ bool pbias_enabled;
+ struct sdhci_host *host;
+ u8 bus_mode;
+ u8 power_mode;
+};
+
+static inline u32 sdhci_omap_readl(struct sdhci_omap_host *host,
+ unsigned int offset)
+{
+ return readl(host->base + offset);
+}
+
+static inline void sdhci_omap_writel(struct sdhci_omap_host *host,
+ unsigned int offset, u32 data)
+{
+ writel(data, host->base + offset);
+}
+
+static int sdhci_omap_set_pbias(struct sdhci_omap_host *omap_host,
+ bool power_on, unsigned int iov)
+{
+ int ret;
+ struct device *dev = omap_host->dev;
+
+ if (IS_ERR(omap_host->pbias))
+ return 0;
+
+ if (power_on) {
+ ret = regulator_set_voltage(omap_host->pbias, iov, iov);
+ if (ret) {
+ dev_err(dev, "pbias set voltage failed\n");
+ return ret;
+ }
+
+ if (omap_host->pbias_enabled)
+ return 0;
+
+ ret = regulator_enable(omap_host->pbias);
+ if (ret) {
+ dev_err(dev, "pbias reg enable fail\n");
+ return ret;
+ }
+
+ omap_host->pbias_enabled = true;
+ } else {
+ if (!omap_host->pbias_enabled)
+ return 0;
+
+ ret = regulator_disable(omap_host->pbias);
+ if (ret) {
+ dev_err(dev, "pbias reg disable fail\n");
+ return ret;
+ }
+ omap_host->pbias_enabled = false;
+ }
+
+ return 0;
+}
+
+static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host,
+ unsigned int iov)
+{
+ int ret;
+ struct sdhci_host *host = omap_host->host;
+ struct mmc_host *mmc = host->mmc;
+
+ ret = sdhci_omap_set_pbias(omap_host, false, 0);
+ if (ret)
+ return ret;
+
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ ret = regulator_set_voltage(mmc->supply.vqmmc, iov, iov);
+ if (ret) {
+ dev_err(mmc_dev(mmc), "vqmmc set voltage failed\n");
+ return ret;
+ }
+ }
+
+ ret = sdhci_omap_set_pbias(omap_host, true, iov);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void sdhci_omap_conf_bus_power(struct sdhci_omap_host *omap_host,
+ unsigned char signal_voltage)
+{
+ u32 reg;
+ ktime_t timeout;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL);
+ reg &= ~HCTL_SDVS_MASK;
+
+ if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+ reg |= HCTL_SDVS_33;
+ else
+ reg |= HCTL_SDVS_18;
+
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg);
+
+ reg |= HCTL_SDBP;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg);
+
+ /* wait 1ms */
+ timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
+ while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP)) {
+ if (WARN_ON(ktime_after(ktime_get(), timeout)))
+ return;
+ usleep_range(5, 10);
+ }
+}
+
+static int sdhci_omap_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ u32 reg;
+ int ret;
+ unsigned int iov;
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_omap_host *omap_host;
+ struct device *dev;
+
+ pltfm_host = sdhci_priv(host);
+ omap_host = sdhci_pltfm_priv(pltfm_host);
+ dev = omap_host->dev;
+
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ if (!(reg & CAPA_VS33))
+ return -EOPNOTSUPP;
+
+ sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
+ reg &= ~AC12_V1V8_SIGEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
+
+ iov = IOV_3V3;
+ } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ if (!(reg & CAPA_VS18))
+ return -EOPNOTSUPP;
+
+ sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
+ reg |= AC12_V1V8_SIGEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
+
+ iov = IOV_1V8;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ ret = sdhci_omap_enable_iov(omap_host, iov);
+ if (ret) {
+ dev_err(dev, "failed to switch IO voltage to %dmV\n", iov);
+ return ret;
+ }
+
+ dev_dbg(dev, "IO voltage switched to %dmV\n", iov);
+ return 0;
+}
+
+static void sdhci_omap_set_bus_mode(struct sdhci_omap_host *omap_host,
+ unsigned int mode)
+{
+ u32 reg;
+
+ if (omap_host->bus_mode == mode)
+ return;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ if (mode == MMC_BUSMODE_OPENDRAIN)
+ reg |= CON_OD;
+ else
+ reg &= ~CON_OD;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+
+ omap_host->bus_mode = mode;
+}
+
+static void sdhci_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_omap_host *omap_host;
+
+ pltfm_host = sdhci_priv(host);
+ omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ sdhci_omap_set_bus_mode(omap_host, ios->bus_mode);
+ sdhci_set_ios(mmc, ios);
+}
+
+static u16 sdhci_omap_calc_divisor(struct sdhci_pltfm_host *host,
+ unsigned int clock)
+{
+ u16 dsor;
+
+ dsor = DIV_ROUND_UP(clk_get_rate(host->clk), clock);
+ if (dsor > SYSCTL_CLKD_MAX)
+ dsor = SYSCTL_CLKD_MAX;
+
+ return dsor;
+}
+
+static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host)
+{
+ u32 reg;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL);
+ reg |= SYSCTL_CEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, reg);
+}
+
+static void sdhci_omap_stop_clock(struct sdhci_omap_host *omap_host)
+{
+ u32 reg;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL);
+ reg &= ~SYSCTL_CEN;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, reg);
+}
+
+static void sdhci_omap_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long clkdiv;
+
+ sdhci_omap_stop_clock(omap_host);
+
+ if (!clock)
+ return;
+
+ clkdiv = sdhci_omap_calc_divisor(pltfm_host, clock);
+ clkdiv = (clkdiv & SYSCTL_CLKD_MASK) << SYSCTL_CLKD_SHIFT;
+ sdhci_enable_clk(host, clkdiv);
+
+ sdhci_omap_start_clock(omap_host);
+}
+
+static void sdhci_omap_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+}
+
+static int sdhci_omap_enable_dma(struct sdhci_host *host)
+{
+ u32 reg;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ reg |= CON_DMA_MASTER;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+
+ return 0;
+}
+
+static unsigned int sdhci_omap_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_get_rate(pltfm_host->clk) / SYSCTL_CLKD_MAX;
+}
+
+static void sdhci_omap_set_bus_width(struct sdhci_host *host, int width)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ u32 reg;
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ if (width == MMC_BUS_WIDTH_8)
+ reg |= CON_DW8;
+ else
+ reg &= ~CON_DW8;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+
+ sdhci_set_bus_width(host, width);
+}
+
+static void sdhci_omap_init_74_clocks(struct sdhci_host *host, u8 power_mode)
+{
+ u32 reg;
+ ktime_t timeout;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+
+ if (omap_host->power_mode == power_mode)
+ return;
+
+ if (power_mode != MMC_POWER_ON)
+ return;
+
+ disable_irq(host->irq);
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ reg |= CON_INIT;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CMD, 0x0);
+
+ /* wait 1ms */
+ timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
+ while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN)) {
+ if (WARN_ON(ktime_after(ktime_get(), timeout)))
+ return;
+ usleep_range(5, 10);
+ }
+
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
+ reg &= ~CON_INIT;
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_STAT, INT_CC_EN);
+
+ enable_irq(host->irq);
+
+ omap_host->power_mode = power_mode;
+}
+
+static struct sdhci_ops sdhci_omap_ops = {
+ .set_clock = sdhci_omap_set_clock,
+ .set_power = sdhci_omap_set_power,
+ .enable_dma = sdhci_omap_enable_dma,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_min_clock = sdhci_omap_get_min_clock,
+ .set_bus_width = sdhci_omap_set_bus_width,
+ .platform_send_init_74_clocks = sdhci_omap_init_74_clocks,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
+{
+ u32 reg;
+ int ret = 0;
+ struct device *dev = omap_host->dev;
+ struct regulator *vqmmc;
+
+ vqmmc = regulator_get(dev, "vqmmc");
+ if (IS_ERR(vqmmc)) {
+ ret = PTR_ERR(vqmmc);
+ goto reg_put;
+ }
+
+ /* voltage capabilities might be set by boot loader, clear it */
+ reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ reg &= ~(CAPA_VS18 | CAPA_VS30 | CAPA_VS33);
+
+ if (regulator_is_supported_voltage(vqmmc, IOV_3V3, IOV_3V3))
+ reg |= CAPA_VS33;
+ if (regulator_is_supported_voltage(vqmmc, IOV_1V8, IOV_1V8))
+ reg |= CAPA_VS18;
+
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, reg);
+
+reg_put:
+ regulator_put(vqmmc);
+
+ return ret;
+}
+
+static const struct sdhci_pltfm_data sdhci_omap_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
+ .quirks2 = SDHCI_QUIRK2_NO_1_8_V |
+ SDHCI_QUIRK2_ACMD23_BROKEN |
+ SDHCI_QUIRK2_RSP_136_HAS_CRC,
+ .ops = &sdhci_omap_ops,
+};
+
+static const struct sdhci_omap_data dra7_data = {
+ .offset = 0x200,
+};
+
+static const struct of_device_id omap_sdhci_match[] = {
+ { .compatible = "ti,dra7-sdhci", .data = &dra7_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_sdhci_match);
+
+static int sdhci_omap_probe(struct platform_device *pdev)
+{
+ int ret;
+ u32 offset;
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_omap_host *omap_host;
+ struct mmc_host *mmc;
+ const struct of_device_id *match;
+ struct sdhci_omap_data *data;
+
+ match = of_match_device(omap_sdhci_match, dev);
+ if (!match)
+ return -EINVAL;
+
+ data = (struct sdhci_omap_data *)match->data;
+ if (!data) {
+ dev_err(dev, "no sdhci omap data\n");
+ return -EINVAL;
+ }
+ offset = data->offset;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata,
+ sizeof(*omap_host));
+ if (IS_ERR(host)) {
+ dev_err(dev, "Failed sdhci_pltfm_init\n");
+ return PTR_ERR(host);
+ }
+
+ pltfm_host = sdhci_priv(host);
+ omap_host = sdhci_pltfm_priv(pltfm_host);
+ omap_host->host = host;
+ omap_host->base = host->ioaddr;
+ omap_host->dev = dev;
+ host->ioaddr += offset;
+
+ mmc = host->mmc;
+ ret = mmc_of_parse(mmc);
+ if (ret)
+ goto err_pltfm_free;
+
+ pltfm_host->clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(pltfm_host->clk)) {
+ ret = PTR_ERR(pltfm_host->clk);
+ goto err_pltfm_free;
+ }
+
+ ret = clk_set_rate(pltfm_host->clk, mmc->f_max);
+ if (ret) {
+ dev_err(dev, "failed to set clock to %d\n", mmc->f_max);
+ goto err_pltfm_free;
+ }
+
+ omap_host->pbias = devm_regulator_get_optional(dev, "pbias");
+ if (IS_ERR(omap_host->pbias)) {
+ ret = PTR_ERR(omap_host->pbias);
+ if (ret != -ENODEV)
+ goto err_pltfm_free;
+ dev_dbg(dev, "unable to get pbias regulator %d\n", ret);
+ }
+ omap_host->pbias_enabled = false;
+
+ /*
+ * omap_device_pm_domain has callbacks to enable the main
+ * functional clock, interface clock and also configure the
+ * SYSCONFIG register of omap devices. The callback will be invoked
+ * as part of pm_runtime_get_sync.
+ */
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ pm_runtime_put_noidle(dev);
+ goto err_rpm_disable;
+ }
+
+ ret = sdhci_omap_set_capabilities(omap_host);
+ if (ret) {
+ dev_err(dev, "failed to set system capabilities\n");
+ goto err_put_sync;
+ }
+
+ host->mmc_host_ops.get_ro = mmc_gpio_get_ro;
+ host->mmc_host_ops.start_signal_voltage_switch =
+ sdhci_omap_start_signal_voltage_switch;
+ host->mmc_host_ops.set_ios = sdhci_omap_set_ios;
+
+ sdhci_read_caps(host);
+ host->caps |= SDHCI_CAN_DO_ADMA2;
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_put_sync;
+
+ return 0;
+
+err_put_sync:
+ pm_runtime_put_sync(dev);
+
+err_rpm_disable:
+ pm_runtime_disable(dev);
+
+err_pltfm_free:
+ sdhci_pltfm_free(pdev);
+ return ret;
+}
+
+static int sdhci_omap_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+
+ sdhci_remove_host(host, true);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+
+static struct platform_driver sdhci_omap_driver = {
+ .probe = sdhci_omap_probe,
+ .remove = sdhci_omap_remove,
+ .driver = {
+ .name = "sdhci-omap",
+ .of_match_table = omap_sdhci_match,
+ },
+};
+
+module_platform_driver(sdhci_omap_driver);
+
+MODULE_DESCRIPTION("SDHCI driver for OMAP SoCs");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:sdhci_omap");
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 67d787fa3306..3e4f04fd5175 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -32,7 +32,6 @@
#include "sdhci.h"
#include "sdhci-pci.h"
-#include "sdhci-pci-o2micro.h"
static int sdhci_pci_enable_dma(struct sdhci_host *host);
static void sdhci_pci_hw_reset(struct sdhci_host *host);
@@ -798,15 +797,6 @@ static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = {
.probe_slot = intel_mrfld_mmc_probe_slot,
};
-/* O2Micro extra registers */
-#define O2_SD_LOCK_WP 0xD3
-#define O2_SD_MULTI_VCC3V 0xEE
-#define O2_SD_CLKREQ 0xEC
-#define O2_SD_CAPS 0xE0
-#define O2_SD_ADMA1 0xE2
-#define O2_SD_ADMA2 0xE7
-#define O2_SD_INF_MOD 0xF1
-
static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
{
u8 scratch;
@@ -1290,6 +1280,7 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(INTEL, SPT_SDIO, intel_byt_sdio),
SDHCI_PCI_DEVICE(INTEL, SPT_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, DNV_EMMC, intel_byt_emmc),
+ SDHCI_PCI_DEVICE(INTEL, CDF_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, BXT_EMMC, intel_byt_emmc),
SDHCI_PCI_DEVICE(INTEL, BXT_SDIO, intel_byt_sdio),
SDHCI_PCI_DEVICE(INTEL, BXT_SD, intel_byt_sd),
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 14273ca00641..555970a29c94 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -19,7 +19,40 @@
#include "sdhci.h"
#include "sdhci-pci.h"
-#include "sdhci-pci-o2micro.h"
+
+/*
+ * O2Micro device registers
+ */
+
+#define O2_SD_MISC_REG5 0x64
+#define O2_SD_LD0_CTRL 0x68
+#define O2_SD_DEV_CTRL 0x88
+#define O2_SD_LOCK_WP 0xD3
+#define O2_SD_TEST_REG 0xD4
+#define O2_SD_FUNC_REG0 0xDC
+#define O2_SD_MULTI_VCC3V 0xEE
+#define O2_SD_CLKREQ 0xEC
+#define O2_SD_CAPS 0xE0
+#define O2_SD_ADMA1 0xE2
+#define O2_SD_ADMA2 0xE7
+#define O2_SD_INF_MOD 0xF1
+#define O2_SD_MISC_CTRL4 0xFC
+#define O2_SD_TUNING_CTRL 0x300
+#define O2_SD_PLL_SETTING 0x304
+#define O2_SD_CLK_SETTING 0x328
+#define O2_SD_CAP_REG2 0x330
+#define O2_SD_CAP_REG0 0x334
+#define O2_SD_UHS1_CAP_SETTING 0x33C
+#define O2_SD_DELAY_CTRL 0x350
+#define O2_SD_UHS2_L1_CTRL 0x35C
+#define O2_SD_FUNC_REG3 0x3E0
+#define O2_SD_FUNC_REG4 0x3E4
+#define O2_SD_LED_ENABLE BIT(6)
+#define O2_SD_FREG0_LEDOFF BIT(13)
+#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22)
+
+#define O2_SD_VENDOR_SETTING 0x110
+#define O2_SD_VENDOR_SETTING2 0x1C8
static void o2_pci_set_baseclk(struct sdhci_pci_chip *chip, u32 value)
{
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.h b/drivers/mmc/host/sdhci-pci-o2micro.h
deleted file mode 100644
index 770f53857211..000000000000
--- a/drivers/mmc/host/sdhci-pci-o2micro.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2013 BayHub Technology Ltd.
- *
- * Authors: Peter Guo <peter.guo@bayhubtech.com>
- * Adam Lee <adam.lee@canonical.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __SDHCI_PCI_O2MICRO_H
-#define __SDHCI_PCI_O2MICRO_H
-
-#include "sdhci-pci.h"
-
-/*
- * O2Micro device IDs
- */
-
-#define PCI_DEVICE_ID_O2_SDS0 0x8420
-#define PCI_DEVICE_ID_O2_SDS1 0x8421
-#define PCI_DEVICE_ID_O2_FUJIN2 0x8520
-#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620
-#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621
-
-/*
- * O2Micro device registers
- */
-
-#define O2_SD_MISC_REG5 0x64
-#define O2_SD_LD0_CTRL 0x68
-#define O2_SD_DEV_CTRL 0x88
-#define O2_SD_LOCK_WP 0xD3
-#define O2_SD_TEST_REG 0xD4
-#define O2_SD_FUNC_REG0 0xDC
-#define O2_SD_MULTI_VCC3V 0xEE
-#define O2_SD_CLKREQ 0xEC
-#define O2_SD_CAPS 0xE0
-#define O2_SD_ADMA1 0xE2
-#define O2_SD_ADMA2 0xE7
-#define O2_SD_INF_MOD 0xF1
-#define O2_SD_MISC_CTRL4 0xFC
-#define O2_SD_TUNING_CTRL 0x300
-#define O2_SD_PLL_SETTING 0x304
-#define O2_SD_CLK_SETTING 0x328
-#define O2_SD_CAP_REG2 0x330
-#define O2_SD_CAP_REG0 0x334
-#define O2_SD_UHS1_CAP_SETTING 0x33C
-#define O2_SD_DELAY_CTRL 0x350
-#define O2_SD_UHS2_L1_CTRL 0x35C
-#define O2_SD_FUNC_REG3 0x3E0
-#define O2_SD_FUNC_REG4 0x3E4
-#define O2_SD_LED_ENABLE BIT(6)
-#define O2_SD_FREG0_LEDOFF BIT(13)
-#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22)
-
-#define O2_SD_VENDOR_SETTING 0x110
-#define O2_SD_VENDOR_SETTING2 0x1C8
-
-extern int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
-
-extern int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
-
-extern int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
-
-#endif /* __SDHCI_PCI_O2MICRO_H */
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 3e8ea3e566f6..0056f08a29cc 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -6,6 +6,12 @@
* PCI device IDs, sub IDs
*/
+#define PCI_DEVICE_ID_O2_SDS0 0x8420
+#define PCI_DEVICE_ID_O2_SDS1 0x8421
+#define PCI_DEVICE_ID_O2_FUJIN2 0x8520
+#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620
+#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621
+
#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809
#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a
#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14
@@ -26,6 +32,7 @@
#define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c
#define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d
#define PCI_DEVICE_ID_INTEL_DNV_EMMC 0x19db
+#define PCI_DEVICE_ID_INTEL_CDF_EMMC 0x18db
#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca
#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc
#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0
@@ -164,4 +171,10 @@ static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot)
int sdhci_pci_resume_host(struct sdhci_pci_chip *chip);
#endif
+int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
+int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
+#ifdef CONFIG_PM_SLEEP
+int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
+#endif
+
#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index d328fcf284d1..cda83ccb2702 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -761,32 +761,24 @@ static const struct dev_pm_ops sdhci_s3c_pmops = {
NULL)
};
-#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212)
-static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
- .no_divider = true,
-};
-#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data)
-#else
-#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)NULL)
-#endif
-
static const struct platform_device_id sdhci_s3c_driver_ids[] = {
{
.name = "s3c-sdhci",
.driver_data = (kernel_ulong_t)NULL,
- }, {
- .name = "exynos4-sdhci",
- .driver_data = EXYNOS4_SDHCI_DRV_DATA,
},
{ }
};
MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids);
#ifdef CONFIG_OF
+static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = {
+ .no_divider = true,
+};
+
static const struct of_device_id sdhci_s3c_dt_match[] = {
{ .compatible = "samsung,s3c6410-sdhci", },
{ .compatible = "samsung,exynos4210-sdhci",
- .data = (void *)EXYNOS4_SDHCI_DRV_DATA },
+ .data = &exynos4_sdhci_drv_data },
{},
};
MODULE_DEVICE_TABLE(of, sdhci_s3c_dt_match);
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 0cd6fa80db66..b877c13184c2 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -422,7 +422,15 @@ static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ /* SDHCI controllers on Tegra186 support 40-bit addressing.
+ * IOVA addresses are 48-bit wide on Tegra186.
+ * With 64-bit dma mask used for SDHCI, accesses can
+ * be broken. Disable 64-bit dma, which would fall back
+ * to 32-bit dma mask. Ideally 40-bit dma mask would work,
+ * But it is not supported as of now.
+ */
+ SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
.ops = &tegra114_sdhci_ops,
};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 0d5fcca18c9e..2f14334e42df 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2407,12 +2407,12 @@ static void sdhci_tasklet_finish(unsigned long param)
;
}
-static void sdhci_timeout_timer(unsigned long data)
+static void sdhci_timeout_timer(struct timer_list *t)
{
struct sdhci_host *host;
unsigned long flags;
- host = (struct sdhci_host*)data;
+ host = from_timer(host, t, timer);
spin_lock_irqsave(&host->lock, flags);
@@ -2429,12 +2429,12 @@ static void sdhci_timeout_timer(unsigned long data)
spin_unlock_irqrestore(&host->lock, flags);
}
-static void sdhci_timeout_data_timer(unsigned long data)
+static void sdhci_timeout_data_timer(struct timer_list *t)
{
struct sdhci_host *host;
unsigned long flags;
- host = (struct sdhci_host *)data;
+ host = from_timer(host, t, data_timer);
spin_lock_irqsave(&host->lock, flags);
@@ -3238,7 +3238,7 @@ int sdhci_setup_host(struct sdhci_host *host)
* available.
*/
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
return ret;
DBG("Version: 0x%08x | Present: 0x%08x\n",
@@ -3749,9 +3749,8 @@ int __sdhci_add_host(struct sdhci_host *host)
tasklet_init(&host->finish_tasklet,
sdhci_tasklet_finish, (unsigned long)host);
- setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
- setup_timer(&host->data_timer, sdhci_timeout_data_timer,
- (unsigned long)host);
+ timer_setup(&host->timer, sdhci_timeout_timer, 0);
+ timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
init_waitqueue_head(&host->buf_ready_int);
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index 111b66f5439b..04ca0d33a521 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/clk.h>
#include "sdhci-pltfm.h"
@@ -47,6 +48,7 @@ struct f_sdhost_priv {
struct clk *clk;
u32 vendor_hs200;
struct device *dev;
+ bool enable_cmd_dat_delay;
};
static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host)
@@ -84,10 +86,19 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host)
static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask)
{
+ struct f_sdhost_priv *priv = sdhci_priv(host);
+ u32 ctl;
+
if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0)
sdhci_writew(host, 0xBC01, SDHCI_CLOCK_CONTROL);
sdhci_reset(host, mask);
+
+ if (priv->enable_cmd_dat_delay) {
+ ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL);
+ ctl |= F_SDH30_CMD_DAT_DELAY;
+ sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL);
+ }
}
static const struct sdhci_ops sdhci_f_sdh30_ops = {
@@ -126,6 +137,9 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE |
SDHCI_QUIRK2_TUNING_WORK_AROUND;
+ priv->enable_cmd_dat_delay = device_property_read_bool(dev,
+ "fujitsu,cmd-dat-delay-select");
+
ret = mmc_of_parse(host->mmc);
if (ret)
goto err;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 53c970fe0873..cc98355dbdb9 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1175,11 +1175,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
return -EINVAL;
ret = mmc_regulator_get_supply(host->mmc);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Could not get vmmc supply\n");
+ if (ret)
return ret;
- }
host->reg_base = devm_ioremap_resource(&pdev->dev,
platform_get_resource(pdev, IORESOURCE_MEM, 0));
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index 93c4b40df90a..a3d8380ab480 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -783,9 +783,9 @@ static void tifm_sd_end_cmd(unsigned long data)
mmc_request_done(mmc, mrq);
}
-static void tifm_sd_abort(unsigned long data)
+static void tifm_sd_abort(struct timer_list *t)
{
- struct tifm_sd *host = (struct tifm_sd*)data;
+ struct tifm_sd *host = from_timer(host, t, timer);
pr_err("%s : card failed to respond for a long period of time "
"(%x, %x)\n",
@@ -968,7 +968,7 @@ static int tifm_sd_probe(struct tifm_dev *sock)
tasklet_init(&host->finish_tasklet, tifm_sd_end_cmd,
(unsigned long)host);
- setup_timer(&host->timer, tifm_sd_abort, (unsigned long)host);
+ timer_setup(&host->timer, tifm_sd_abort, 0);
mmc->ops = &tifm_sd_ops;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 9c4e6199b854..583bf3262df5 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -167,11 +167,11 @@ static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
/* HW engineers overrode docs: no sleep needed on R-Car2+ */
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
- msleep(10);
+ usleep_range(10000, 11000);
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
- msleep(10);
+ usleep_range(10000, 11000);
}
}
@@ -179,7 +179,7 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
- msleep(10);
+ usleep_range(10000, 11000);
}
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
@@ -187,7 +187,7 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
/* HW engineers overrode docs: no sleep needed on R-Car2+ */
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
- msleep(10);
+ usleep_range(10000, 11000);
}
static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
@@ -219,7 +219,7 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
- msleep(10);
+ usleep_range(10000, 11000);
tmio_mmc_clk_start(host);
}
@@ -230,11 +230,11 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
- msleep(10);
+ usleep_range(10000, 11000);
sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
- msleep(10);
+ usleep_range(10000, 11000);
if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
@@ -1113,8 +1113,11 @@ static int tmio_mmc_init_ocr(struct tmio_mmc_host *host)
{
struct tmio_mmc_data *pdata = host->pdata;
struct mmc_host *mmc = host->mmc;
+ int err;
- mmc_regulator_get_supply(mmc);
+ err = mmc_regulator_get_supply(mmc);
+ if (err)
+ return err;
/* use ocr_mask if no regulator */
if (!mmc->ocr_avail)
@@ -1299,23 +1302,24 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
pm_runtime_enable(&pdev->dev);
ret = mmc_add_host(mmc);
- if (ret < 0) {
- tmio_mmc_host_remove(_host);
- return ret;
- }
+ if (ret)
+ goto remove_host;
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
- if (ret < 0) {
- tmio_mmc_host_remove(_host);
- return ret;
- }
+ if (ret)
+ goto remove_host;
+
mmc_gpiod_request_cd_irq(mmc);
}
return 0;
+
+remove_host:
+ tmio_mmc_host_remove(_host);
+ return ret;
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_probe);
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 64da6a88cfb9..cdfeb15b6f05 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1757,7 +1757,7 @@ static int usdhi6_probe(struct platform_device *pdev)
return -ENOMEM;
ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
+ if (ret)
goto e_free_mmc;
ret = mmc_of_parse(mmc);
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index a838bf5480d8..32c4211506fc 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -932,12 +932,12 @@ out:
return result;
}
-static void via_sdc_timeout(unsigned long ulongdata)
+static void via_sdc_timeout(struct timer_list *t)
{
struct via_crdr_mmc_host *sdhost;
unsigned long flags;
- sdhost = (struct via_crdr_mmc_host *)ulongdata;
+ sdhost = from_timer(sdhost, t, timer);
spin_lock_irqsave(&sdhost->lock, flags);
@@ -1036,9 +1036,7 @@ static void via_init_mmc_host(struct via_crdr_mmc_host *host)
u32 lenreg;
u32 status;
- init_timer(&host->timer);
- host->timer.data = (unsigned long)host;
- host->timer.function = via_sdc_timeout;
+ timer_setup(&host->timer, via_sdc_timeout, 0);
spin_lock_init(&host->lock);
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index 8f569d257405..1fe68137a30f 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -741,9 +741,10 @@ static void vub300_deadwork_thread(struct work_struct *work)
kref_put(&vub300->kref, vub300_delete);
}
-static void vub300_inactivity_timer_expired(unsigned long data)
+static void vub300_inactivity_timer_expired(struct timer_list *t)
{ /* softirq */
- struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
+ struct vub300_mmc_host *vub300 = from_timer(vub300, t,
+ inactivity_timer);
if (!vub300->interface) {
kref_put(&vub300->kref, vub300_delete);
} else if (vub300->cmd) {
@@ -1180,9 +1181,10 @@ static void send_command(struct vub300_mmc_host *vub300)
* timer callback runs in atomic mode
* so it cannot call usb_kill_urb()
*/
-static void vub300_sg_timed_out(unsigned long data)
+static void vub300_sg_timed_out(struct timer_list *t)
{
- struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data;
+ struct vub300_mmc_host *vub300 = from_timer(vub300, t,
+ sg_transfer_timer);
vub300->usb_timed_out = 1;
usb_sg_cancel(&vub300->sg_request);
usb_unlink_urb(vub300->command_out_urb);
@@ -1244,12 +1246,8 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
USB_RECIP_DEVICE, 0x0000, 0x0000,
xfer_buffer, xfer_length, HZ);
kfree(xfer_buffer);
- if (retval < 0) {
- strncpy(vub300->vub_name,
- "SDIO pseudocode download failed",
- sizeof(vub300->vub_name));
- return;
- }
+ if (retval < 0)
+ goto copy_error_message;
} else {
dev_err(&vub300->udev->dev,
"not enough memory for xfer buffer to send"
@@ -1291,12 +1289,8 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
USB_RECIP_DEVICE, 0x0000, 0x0000,
xfer_buffer, xfer_length, HZ);
kfree(xfer_buffer);
- if (retval < 0) {
- strncpy(vub300->vub_name,
- "SDIO pseudocode download failed",
- sizeof(vub300->vub_name));
- return;
- }
+ if (retval < 0)
+ goto copy_error_message;
} else {
dev_err(&vub300->udev->dev,
"not enough memory for xfer buffer to send"
@@ -1349,6 +1343,12 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
sizeof(vub300->vub_name));
return;
}
+
+ return;
+
+copy_error_message:
+ strncpy(vub300->vub_name, "SDIO pseudocode download failed",
+ sizeof(vub300->vub_name));
}
/*
@@ -2323,13 +2323,10 @@ static int vub300_probe(struct usb_interface *interface,
INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread);
INIT_WORK(&vub300->deadwork, vub300_deadwork_thread);
kref_init(&vub300->kref);
- init_timer(&vub300->sg_transfer_timer);
- vub300->sg_transfer_timer.data = (unsigned long)vub300;
- vub300->sg_transfer_timer.function = vub300_sg_timed_out;
+ timer_setup(&vub300->sg_transfer_timer, vub300_sg_timed_out, 0);
kref_get(&vub300->kref);
- init_timer(&vub300->inactivity_timer);
- vub300->inactivity_timer.data = (unsigned long)vub300;
- vub300->inactivity_timer.function = vub300_inactivity_timer_expired;
+ timer_setup(&vub300->inactivity_timer,
+ vub300_inactivity_timer_expired, 0);
vub300->inactivity_timer.expires = jiffies + HZ;
add_timer(&vub300->inactivity_timer);
if (vub300->card_present)
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 546aaf8d1507..f4233576153b 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -956,9 +956,9 @@ static const struct mmc_host_ops wbsd_ops = {
* Helper function to reset detection ignore
*/
-static void wbsd_reset_ignore(unsigned long data)
+static void wbsd_reset_ignore(struct timer_list *t)
{
- struct wbsd_host *host = (struct wbsd_host *)data;
+ struct wbsd_host *host = from_timer(host, t, ignore_timer);
BUG_ON(host == NULL);
@@ -1224,9 +1224,7 @@ static int wbsd_alloc_mmc(struct device *dev)
/*
* Set up timers
*/
- init_timer(&host->ignore_timer);
- host->ignore_timer.data = (unsigned long)host;
- host->ignore_timer.function = wbsd_reset_ignore;
+ timer_setup(&host->ignore_timer, wbsd_reset_ignore, 0);
/*
* Maximum number of segments. Worst case is one sector per segment
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c02cc817a490..1ed9529e7bd1 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1378,7 +1378,7 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
unsigned int count;
slaves = rcu_dereference(bond->slave_arr);
- count = slaves ? ACCESS_ONCE(slaves->count) : 0;
+ count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count))
tx_slave = slaves->arr[hash_index %
count];
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c99dc59d729b..08a4f57cf409 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1167,7 +1167,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
slave = bond_slave_get_rcu(skb->dev);
bond = slave->bond;
- recv_probe = ACCESS_ONCE(bond->recv_probe);
+ recv_probe = READ_ONCE(bond->recv_probe);
if (recv_probe) {
ret = recv_probe(skb, bond, slave);
if (ret == RX_HANDLER_CONSUMED) {
@@ -2042,6 +2042,7 @@ static int bond_miimon_inspect(struct bonding *bond)
bond_for_each_slave_rcu(bond, slave, iter) {
slave->new_link = BOND_LINK_NOCHANGE;
+ slave->link_new_state = slave->link;
link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -3253,7 +3254,7 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
hash ^= (hash >> 16);
hash ^= (hash >> 8);
- return hash;
+ return hash >> 1;
}
/*-------------------------- Device entry points ----------------------------*/
@@ -3810,7 +3811,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
else
bond_xmit_slave_id(bond, skb, 0);
} else {
- int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
+ int slave_cnt = READ_ONCE(bond->slave_cnt);
if (likely(slave_cnt)) {
slave_id = bond_rr_gen_slave_id(bond);
@@ -3972,7 +3973,7 @@ static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int count;
slaves = rcu_dereference(bond->slave_arr);
- count = slaves ? ACCESS_ONCE(slaves->count) : 0;
+ count = slaves ? READ_ONCE(slaves->count) : 0;
if (likely(count)) {
slave = slaves->arr[bond_xmit_hash(bond, skb) % count];
bond_dev_queue_xmit(bond, skb, slave->dev);
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index cf7c18947189..d065c0e2d18e 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -178,7 +178,6 @@ static int c_can_pci_probe(struct pci_dev *pdev,
break;
case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
break;
default:
ret = -EINVAL;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 46a746ee80bb..b5145a7f874c 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -320,7 +320,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
break;
case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
priv->read_reg32 = d_can_plat_read_reg32;
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 4d1fe8d95042..2772d05ff11c 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -670,9 +670,9 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
priv->base + IFI_CANFD_FTIME);
/* Configure transmitter delay */
- tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK;
- writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc,
- priv->base + IFI_CANFD_TDELAY);
+ tdc = dbt->brp * (dbt->prop_seg + dbt->phase_seg1);
+ tdc &= IFI_CANFD_TDELAY_MASK;
+ writel(IFI_CANFD_TDELAY_EN | tdc, priv->base + IFI_CANFD_TDELAY);
}
static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 51c2d182a33a..b4efd711f824 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -29,14 +29,19 @@
#include "peak_canfd_user.h"
MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
-MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe FD family cards");
-MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe FD CAN cards");
+MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards");
+MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe/M.2 FD CAN cards");
MODULE_LICENSE("GPL v2");
#define PCIEFD_DRV_NAME "peak_pciefd"
#define PEAK_PCI_VENDOR_ID 0x001c /* The PCI device and vendor IDs */
#define PEAK_PCIEFD_ID 0x0013 /* for PCIe slot cards */
+#define PCAN_CPCIEFD_ID 0x0014 /* for Compact-PCI Serial slot cards */
+#define PCAN_PCIE104FD_ID 0x0017 /* for PCIe-104 Express slot cards */
+#define PCAN_MINIPCIEFD_ID 0x0018 /* for mini-PCIe slot cards */
+#define PCAN_PCIEFD_OEM_ID 0x0019 /* for PCIe slot OEM cards */
+#define PCAN_M2_ID 0x001a /* for M2 slot cards */
/* PEAK PCIe board access description */
#define PCIEFD_BAR0_SIZE (64 * 1024)
@@ -203,6 +208,11 @@ struct pciefd_board {
/* supported device ids. */
static const struct pci_device_id peak_pciefd_tbl[] = {
{PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_CPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_PCIE104FD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_MINIPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_PCIEFD_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_M2_ID, PCI_ANY_ID, PCI_ANY_ID,},
{0,}
};
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index b0c80859f746..1ac2090a1721 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -539,6 +539,13 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
}
stats->rx_over_errors++;
stats->rx_errors++;
+
+ /* reset the CAN IP by entering reset mode
+ * ignoring timeout error
+ */
+ set_reset_mode(dev);
+ set_normal_mode(dev);
+
/* clear bit */
sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG);
}
@@ -653,8 +660,9 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
}
- if (isrc & SUN4I_INT_RBUF_VLD) {
- /* receive interrupt */
+ if ((isrc & SUN4I_INT_RBUF_VLD) &&
+ !(isrc & SUN4I_INT_DATA_OR)) {
+ /* receive interrupt - don't read if overrun occurred */
while (status & SUN4I_STA_RBUF_RDY) {
/* RX buffer is not empty */
sun4i_can_rx(dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 4ef68f69b58c..43f52a8fe708 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -405,7 +405,7 @@ void free_tx_desc(struct adapter *adap, struct sge_txq *q,
*/
static inline int reclaimable(const struct sge_txq *q)
{
- int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
+ int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
hw_cidx -= q->cidx;
return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
}
@@ -1375,7 +1375,7 @@ out_free: dev_kfree_skb_any(skb);
*/
static inline void reclaim_completed_tx_imm(struct sge_txq *q)
{
- int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx));
+ int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
int reclaim = hw_cidx - q->cidx;
if (reclaim < 0)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index f2d623a7aee0..123e2c1b65f5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -37,7 +37,7 @@
#define T4FW_VERSION_MAJOR 0x01
#define T4FW_VERSION_MINOR 0x10
-#define T4FW_VERSION_MICRO 0x2D
+#define T4FW_VERSION_MICRO 0x3F
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -46,7 +46,7 @@
#define T5FW_VERSION_MAJOR 0x01
#define T5FW_VERSION_MINOR 0x10
-#define T5FW_VERSION_MICRO 0x2D
+#define T5FW_VERSION_MICRO 0x3F
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -55,7 +55,7 @@
#define T6FW_VERSION_MAJOR 0x01
#define T6FW_VERSION_MINOR 0x10
-#define T6FW_VERSION_MICRO 0x2D
+#define T6FW_VERSION_MICRO 0x3F
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0e3d9f39a807..c6e859a27ee6 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -605,7 +605,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
if (wrapped)
newacc += 65536;
- ACCESS_ONCE(*acc) = newacc;
+ WRITE_ONCE(*acc, newacc);
}
static void populate_erx_stats(struct be_adapter *adapter,
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 0cec06bec63e..340e28211135 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -373,7 +373,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
unsigned int count;
smp_rmb();
- count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail);
+ count = tx_count(READ_ONCE(priv->tx_head), tx_tail);
if (count == 0)
goto out;
@@ -431,7 +431,7 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
dma_addr_t phys;
smp_rmb();
- count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail));
+ count = tx_count(tx_head, READ_ONCE(priv->tx_tail));
if (count == (TX_DESC_NUM - 1)) {
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 8f326f87a815..2cb9539c931e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -264,7 +264,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
vsi->rx_buf_failed, vsi->rx_page_failed);
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+ struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]);
if (!rx_ring)
continue;
@@ -320,7 +320,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed");
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+ struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 05e89864f781..e9e04a485e0a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1570,7 +1570,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
}
rcu_read_lock();
for (j = 0; j < vsi->num_queue_pairs; j++) {
- tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
+ tx_ring = READ_ONCE(vsi->tx_rings[j]);
if (!tx_ring)
continue;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 6498da8806cb..de1fcac7834d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -455,7 +455,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
u64 bytes, packets;
unsigned int start;
- tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+ tx_ring = READ_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
i40e_get_netdev_stats_struct_tx(tx_ring, stats);
@@ -791,7 +791,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */
- p = ACCESS_ONCE(vsi->tx_rings[q]);
+ p = READ_ONCE(vsi->tx_rings[q]);
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index d8456c381c99..97381238eb7c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -130,7 +130,7 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
}
smp_mb(); /* Force any pending update before accessing. */
- adj = ACCESS_ONCE(pf->ptp_base_adj);
+ adj = READ_ONCE(pf->ptp_base_adj);
freq = adj;
freq *= ppb;
@@ -499,7 +499,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
/* Update the base adjustement value. */
- ACCESS_ONCE(pf->ptp_base_adj) = incval;
+ WRITE_ONCE(pf->ptp_base_adj, incval);
smp_mb(); /* Force the above update. */
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 58adbf234e07..31a3f09df9f7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -375,7 +375,7 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg);
/* write operations, indexed using DWORDS */
#define wr32(reg, val) \
do { \
- u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
+ u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
if (!E1000_REMOVED(hw_addr)) \
writel((val), &hw_addr[(reg)]); \
} while (0)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ea69af267d63..18b6c25d4705 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -750,7 +750,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
u32 igb_rd32(struct e1000_hw *hw, u32 reg)
{
struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
- u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
u32 value = 0;
if (E1000_REMOVED(hw_addr))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index e083732adf64..a01409e2e06c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -161,7 +161,7 @@ static inline bool ixgbe_removed(void __iomem *addr)
static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
if (ixgbe_removed(reg_addr))
return;
@@ -180,7 +180,7 @@ static inline void writeq(u64 val, void __iomem *addr)
static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
if (ixgbe_removed(reg_addr))
return;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 6d5f31e94358..935a2f15b0b0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -380,7 +380,7 @@ static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
*/
u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
u32 value;
if (ixgbe_removed(reg_addr))
@@ -8624,7 +8624,7 @@ static void ixgbe_get_stats64(struct net_device *netdev,
rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
+ struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
u64 bytes, packets;
unsigned int start;
@@ -8640,12 +8640,12 @@ static void ixgbe_get_stats64(struct net_device *netdev,
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
+ struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
ixgbe_get_ring_stats64(stats, ring);
}
for (i = 0; i < adapter->num_xdp_queues; i++) {
- struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]);
+ struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
ixgbe_get_ring_stats64(stats, ring);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 86d6924a2b71..ae312c45696a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -378,7 +378,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
}
smp_mb();
- incval = ACCESS_ONCE(adapter->base_incval);
+ incval = READ_ONCE(adapter->base_incval);
freq = incval;
freq *= ppb;
@@ -1159,7 +1159,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
}
/* update the base incval used to calculate frequency adjustment */
- ACCESS_ONCE(adapter->base_incval) = incval;
+ WRITE_ONCE(adapter->base_incval, incval);
smp_mb();
/* need lock to prevent incorrect read while modifying cyclecounter */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 032f8ac06357..cacb30682434 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -164,7 +164,7 @@ static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
u32 value;
if (IXGBE_REMOVED(reg_addr))
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 04d8d4ee4f04..c651fefcc3d2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -182,7 +182,7 @@ struct ixgbevf_info {
static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
if (IXGBE_REMOVED(reg_addr))
return;
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index a37af5813f33..fcf9ba5eb8d1 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -6747,6 +6747,9 @@ static int mvpp2_irqs_init(struct mvpp2_port *port)
for (i = 0; i < port->nqvecs; i++) {
struct mvpp2_queue_vector *qv = port->qvecs + i;
+ if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
+ irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
+
err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
if (err)
goto err;
@@ -6776,6 +6779,7 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
struct mvpp2_queue_vector *qv = port->qvecs + i;
irq_set_affinity_hint(qv->irq, NULL);
+ irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
free_irq(qv->irq, qv);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 8a32a8f7f9c0..3541a7f9d12e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -414,8 +414,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
index = cons_index & size_mask;
cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
- last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb);
- ring_cons = ACCESS_ONCE(ring->cons);
+ last_nr_txbb = READ_ONCE(ring->last_nr_txbb);
+ ring_cons = READ_ONCE(ring->cons);
ring_index = ring_cons & size_mask;
stamp_index = ring_index;
@@ -479,8 +479,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
wmb();
/* we want to dirty this cache line once */
- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
+ WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb);
+ WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped);
if (cq->type == TX_XDP)
return done < budget;
@@ -858,7 +858,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
/* fetch ring->cons far ahead before needing it to avoid stall */
- ring_cons = ACCESS_ONCE(ring->cons);
+ ring_cons = READ_ONCE(ring->cons);
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
&inline_ok, &fragptr);
@@ -1066,7 +1066,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
*/
smp_rmb();
- ring_cons = ACCESS_ONCE(ring->cons);
+ ring_cons = READ_ONCE(ring->cons);
if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index fc281712869b..17b723218b0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -93,7 +93,7 @@ static void delayed_event_release(struct mlx5_device_context *dev_ctx,
list_splice_init(&priv->waiting_events_list, &temp);
if (!dev_ctx->context)
goto out;
- list_for_each_entry_safe(de, n, &priv->waiting_events_list, list)
+ list_for_each_entry_safe(de, n, &temp, list)
dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index cc13d3dbd366..13b5ef9d8703 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -67,7 +67,7 @@
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
-#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 850cdc980ab5..4837045ffba3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -365,21 +365,24 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
struct mlx5e_l2_hash_node *hn)
{
u8 action = hn->action;
+ u8 mac_addr[ETH_ALEN];
int l2_err = 0;
+ ether_addr_copy(mac_addr, hn->ai.addr);
+
switch (action) {
case MLX5E_ACTION_ADD:
mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
- if (!is_multicast_ether_addr(hn->ai.addr)) {
- l2_err = mlx5_mpfs_add_mac(priv->mdev, hn->ai.addr);
+ if (!is_multicast_ether_addr(mac_addr)) {
+ l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
hn->mpfs = !l2_err;
}
hn->action = MLX5E_ACTION_NONE;
break;
case MLX5E_ACTION_DEL:
- if (!is_multicast_ether_addr(hn->ai.addr) && hn->mpfs)
- l2_err = mlx5_mpfs_del_mac(priv->mdev, hn->ai.addr);
+ if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
+ l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
mlx5e_del_l2_flow_rule(priv, &hn->ai);
mlx5e_del_l2_from_hash(hn);
break;
@@ -387,7 +390,7 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
if (l2_err)
netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
- action == MLX5E_ACTION_ADD ? "add" : "del", hn->ai.addr, l2_err);
+ action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
}
static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 15a1687483cc..91b1b0938931 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -215,22 +215,20 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
- struct page *page;
-
if (mlx5e_rx_cache_get(rq, dma_info))
return 0;
- page = dev_alloc_pages(rq->buff.page_order);
- if (unlikely(!page))
+ dma_info->page = dev_alloc_pages(rq->buff.page_order);
+ if (unlikely(!dma_info->page))
return -ENOMEM;
- dma_info->addr = dma_map_page(rq->pdev, page, 0,
+ dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
RQ_PAGE_SIZE(rq), rq->buff.map_dir);
if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
- put_page(page);
+ put_page(dma_info->page);
+ dma_info->page = NULL;
return -ENOMEM;
}
- dma_info->page = page;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index e906b754415c..ab92298eafc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -49,7 +49,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi);
bool busy = false;
- int work_done;
+ int work_done = 0;
int i;
for (i = 0; i < c->num_tc; i++)
@@ -58,15 +58,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq);
- work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
- busy |= work_done == budget;
+ if (likely(budget)) { /* budget=0 means: don't poll rx rings */
+ work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
+ busy |= work_done == budget;
+ }
busy |= c->rq.post_wqes(&c->rq);
if (busy) {
if (likely(mlx5e_channel_no_affinity_change(c)))
return budget;
- if (work_done == budget)
+ if (budget && work_done == budget)
work_done--;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 0d2c8dcd6eae..06562c9a6b9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1482,9 +1482,16 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
return -EAGAIN;
}
+ /* Panic tear down fw command will stop the PCI bus communication
+ * with the HCA, so the health polll is no longer needed.
+ */
+ mlx5_drain_health_wq(dev);
+ mlx5_stop_health_poll(dev);
+
ret = mlx5_cmd_force_teardown_hca(dev);
if (ret) {
mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
+ mlx5_start_health_poll(dev);
return ret;
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 50ea69d88480..5dd5f61e1114 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2629,7 +2629,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
ring = &vdev->vpaths[i].ring;
/* Truncated to machine word size number of frames */
- rx_frms = ACCESS_ONCE(ring->stats.rx_frms);
+ rx_frms = READ_ONCE(ring->stats.rx_frms);
/* Did this vpath received any packets */
if (ring->stats.prev_rx_frms == rx_frms) {
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 13f72f5b18d2..a95a46bcd339 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -2073,7 +2073,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
netif_vdbg(efx, intr, efx->net_dev,
"IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
- if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
+ if (likely(READ_ONCE(efx->irq_soft_enabled))) {
/* Note test interrupts */
if (context->index == efx->irq_level)
efx->last_irq_cpu = raw_smp_processor_id();
@@ -2088,7 +2088,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
- bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
struct efx_channel *channel;
efx_dword_t reg;
u32 queues;
@@ -3291,7 +3291,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
bool rx_cont;
u16 flags = 0;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return 0;
/* Basic packet information */
@@ -3428,7 +3428,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
unsigned int tx_ev_q_label;
int tx_descs = 0;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return 0;
if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
@@ -5316,7 +5316,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx)
int i;
for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
- if (ACCESS_ONCE(table->entry[i].spec) &
+ if (READ_ONCE(table->entry[i].spec) &
EFX_EF10_FILTER_FLAG_AUTO_OLD) {
rc = efx_ef10_filter_remove_internal(efx,
1U << EFX_FILTER_PRI_AUTO, i, true);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index b9cb697b2818..016616a63880 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2809,7 +2809,7 @@ static void efx_reset_work(struct work_struct *data)
unsigned long pending;
enum reset_type method;
- pending = ACCESS_ONCE(efx->reset_pending);
+ pending = READ_ONCE(efx->reset_pending);
method = fls(pending) - 1;
if (method == RESET_TYPE_MC_BIST)
@@ -2874,7 +2874,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
/* If we're not READY then just leave the flags set as the cue
* to abort probing or reschedule the reset later.
*/
- if (ACCESS_ONCE(efx->state) != STATE_READY)
+ if (READ_ONCE(efx->state) != STATE_READY)
return;
/* efx_process_channel() will no longer read events once a
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 29614da91cbf..7263275fde4a 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2545,7 +2545,7 @@ static void ef4_reset_work(struct work_struct *data)
unsigned long pending;
enum reset_type method;
- pending = ACCESS_ONCE(efx->reset_pending);
+ pending = READ_ONCE(efx->reset_pending);
method = fls(pending) - 1;
if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
@@ -2605,7 +2605,7 @@ void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type)
/* If we're not READY then just leave the flags set as the cue
* to abort probing or reschedule the reset later.
*/
- if (ACCESS_ONCE(efx->state) != STATE_READY)
+ if (READ_ONCE(efx->state) != STATE_READY)
return;
queue_work(reset_workqueue, &efx->reset_work);
diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c
index 93c713c1f627..cd8bb472d758 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon.c
@@ -452,7 +452,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
"IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
- if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ if (!likely(READ_ONCE(efx->irq_soft_enabled)))
return IRQ_HANDLED;
/* Check to see if we have a serious error condition */
@@ -1372,7 +1372,7 @@ static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx)
ef4_oword_t reg;
int link_speed, isolate;
- isolate = !!ACCESS_ONCE(efx->reset_pending);
+ isolate = !!READ_ONCE(efx->reset_pending);
switch (link_state->speed) {
case 10000: link_speed = 3; break;
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c
index 05916c710d8c..494884f6af4a 100644
--- a/drivers/net/ethernet/sfc/falcon/farch.c
+++ b/drivers/net/ethernet/sfc/falcon/farch.c
@@ -834,7 +834,7 @@ ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event)
struct ef4_nic *efx = channel->efx;
int tx_packets = 0;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return 0;
if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
@@ -990,7 +990,7 @@ ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event)
struct ef4_rx_queue *rx_queue;
struct ef4_nic *efx = channel->efx;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return;
rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
@@ -1504,7 +1504,7 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx)
irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id)
{
struct ef4_nic *efx = dev_id;
- bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
ef4_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE;
struct ef4_channel *channel;
@@ -1596,7 +1596,7 @@ irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id)
"IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
- if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ if (!likely(READ_ONCE(efx->irq_soft_enabled)))
return IRQ_HANDLED;
/* Handle non-event-queue sources */
diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h
index a4c4592f6023..54ca457cdb15 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.h
+++ b/drivers/net/ethernet/sfc/falcon/nic.h
@@ -83,7 +83,7 @@ static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_
static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue,
unsigned int write_count)
{
- unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+ unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
if (empty_read_count == 0)
return false;
@@ -464,11 +464,11 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx);
static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel)
{
- return ACCESS_ONCE(channel->event_test_cpu);
+ return READ_ONCE(channel->event_test_cpu);
}
static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx)
{
- return ACCESS_ONCE(efx->last_irq_cpu);
+ return READ_ONCE(efx->last_irq_cpu);
}
/* Global Resources */
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
index 6a75f4140a4b..6486814e97dc 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.c
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -134,8 +134,8 @@ static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1)
*/
netif_tx_stop_queue(txq1->core_txq);
smp_mb();
- txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
- txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
+ txq1->old_read_count = READ_ONCE(txq1->read_count);
+ txq2->old_read_count = READ_ONCE(txq2->read_count);
fill_level = max(txq1->insert_count - txq1->old_read_count,
txq2->insert_count - txq2->old_read_count);
@@ -524,7 +524,7 @@ void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index)
/* Check whether the hardware queue is now empty */
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
- tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
+ tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
if (tx_queue->read_count == tx_queue->old_write_count) {
smp_mb();
tx_queue->empty_read_count =
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index ba45150f53c7..86454d25a405 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -827,7 +827,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
struct efx_nic *efx = channel->efx;
int tx_packets = 0;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return 0;
if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
@@ -979,7 +979,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
struct efx_rx_queue *rx_queue;
struct efx_nic *efx = channel->efx;
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ if (unlikely(READ_ONCE(efx->reset_pending)))
return;
rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
@@ -1520,7 +1520,7 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
- bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
efx_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE;
struct efx_channel *channel;
@@ -1612,7 +1612,7 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
- if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ if (!likely(READ_ONCE(efx->irq_soft_enabled)))
return IRQ_HANDLED;
/* Handle non-event-queue sources */
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 4d7fb8af880d..7b51b6371724 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -81,7 +81,7 @@ static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
unsigned int write_count)
{
- unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+ unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
if (empty_read_count == 0)
return false;
@@ -617,11 +617,11 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
{
- return ACCESS_ONCE(channel->event_test_cpu);
+ return READ_ONCE(channel->event_test_cpu);
}
static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
{
- return ACCESS_ONCE(efx->last_irq_cpu);
+ return READ_ONCE(efx->last_irq_cpu);
}
/* Global Resources */
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 60cdb97f58e2..56c2db398def 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -658,7 +658,7 @@ static void efx_ptp_send_times(struct efx_nic *efx,
/* Write host time for specified period or until MC is done */
while ((timespec64_compare(&now.ts_real, &limit) < 0) &&
- ACCESS_ONCE(*mc_running)) {
+ READ_ONCE(*mc_running)) {
struct timespec64 update_time;
unsigned int host_time;
@@ -668,7 +668,7 @@ static void efx_ptp_send_times(struct efx_nic *efx,
do {
pps_get_ts(&now);
} while ((timespec64_compare(&now.ts_real, &update_time) < 0) &&
- ACCESS_ONCE(*mc_running));
+ READ_ONCE(*mc_running));
/* Synchronise NIC with single word of time only */
host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
@@ -832,14 +832,14 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
ptp->start.dma_addr);
/* Clear flag that signals MC ready */
- ACCESS_ONCE(*start) = 0;
+ WRITE_ONCE(*start, 0);
rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
EFX_WARN_ON_ONCE_PARANOID(rc);
/* Wait for start from MCDI (or timeout) */
timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
- while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) {
+ while (!READ_ONCE(*start) && (time_before(jiffies, timeout))) {
udelay(20); /* Usually start MCDI execution quickly */
loops++;
}
@@ -849,7 +849,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
if (!time_before(jiffies, timeout))
++ptp->sync_timeouts;
- if (ACCESS_ONCE(*start))
+ if (READ_ONCE(*start))
efx_ptp_send_times(efx, &last_time);
/* Collect results */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 32bf1fecf864..efb66ea21f27 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -136,8 +136,8 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
*/
netif_tx_stop_queue(txq1->core_txq);
smp_mb();
- txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
- txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
+ txq1->old_read_count = READ_ONCE(txq1->read_count);
+ txq2->old_read_count = READ_ONCE(txq2->read_count);
fill_level = max(txq1->insert_count - txq1->old_read_count,
txq2->insert_count - txq2->old_read_count);
@@ -752,7 +752,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
/* Check whether the hardware queue is now empty */
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
- tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
+ tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
if (tx_queue->read_count == tx_queue->old_write_count) {
smp_mb();
tx_queue->empty_read_count =
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 6a4e8e1bbd90..8ab0fb6892d5 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6245,7 +6245,7 @@ static void niu_get_rx_stats(struct niu *np,
pkts = dropped = errors = bytes = 0;
- rx_rings = ACCESS_ONCE(np->rx_rings);
+ rx_rings = READ_ONCE(np->rx_rings);
if (!rx_rings)
goto no_rings;
@@ -6276,7 +6276,7 @@ static void niu_get_tx_stats(struct niu *np,
pkts = errors = bytes = 0;
- tx_rings = ACCESS_ONCE(np->tx_rings);
+ tx_rings = READ_ONCE(np->tx_rings);
if (!tx_rings)
goto no_rings;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 6c0c84c33e1f..b13890953ebb 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -257,7 +257,7 @@ static struct tap_queue *tap_get_queue(struct tap_dev *tap,
* and validate that the result isn't NULL - in case we are
* racing against queue removal.
*/
- int numvtaps = ACCESS_ONCE(tap->numvtaps);
+ int numvtaps = READ_ONCE(tap->numvtaps);
__u32 rxq;
if (!numvtaps)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 42bb820a56c9..c1685a6d7883 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -469,7 +469,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
u32 numqueues = 0;
rcu_read_lock();
- numqueues = ACCESS_ONCE(tun->numqueues);
+ numqueues = READ_ONCE(tun->numqueues);
txq = __skb_get_hash_symmetric(skb);
if (txq) {
@@ -864,7 +864,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
rcu_read_lock();
tfile = rcu_dereference(tun->tfiles[txq]);
- numqueues = ACCESS_ONCE(tun->numqueues);
+ numqueues = READ_ONCE(tun->numqueues);
/* Drop packet if interface is not attached */
if (txq >= numqueues)
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index b2ff88e69a81..3d4f7959dabb 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -626,7 +626,7 @@ static int asix_suspend(struct usb_interface *intf, pm_message_t message)
struct usbnet *dev = usb_get_intfdata(intf);
struct asix_common_private *priv = dev->driver_priv;
- if (priv->suspend)
+ if (priv && priv->suspend)
priv->suspend(dev);
return usbnet_suspend(intf, message);
@@ -678,7 +678,7 @@ static int asix_resume(struct usb_interface *intf)
struct usbnet *dev = usb_get_intfdata(intf);
struct asix_common_private *priv = dev->driver_priv;
- if (priv->resume)
+ if (priv && priv->resume)
priv->resume(dev);
return usbnet_resume(intf);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 3e7a3ac3a362..05dca3e5c93d 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -230,7 +230,7 @@ skip:
goto bad_desc;
}
- if (header.usb_cdc_ether_desc) {
+ if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) {
dev->hard_mtu = le16_to_cpu(info->ether->wMaxSegmentSize);
/* because of Zaurus, we may be ignoring the host
* side link address we were given.
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 8c3733608271..8d4a6f7cba61 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -499,6 +499,7 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 1;
}
if (rawip) {
+ skb_reset_mac_header(skb);
skb->dev = dev->net; /* normally set by eth_type_trans */
skb->protocol = proto;
return 1;
@@ -681,7 +682,7 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
}
/* errors aren't fatal - we can live with the dynamic address */
- if (cdc_ether) {
+ if (cdc_ether && cdc_ether->wMaxSegmentSize) {
dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize);
usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
}
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index bd8d4392d68b..80f75139495f 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -500,13 +500,13 @@ ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
tx_status = &desc->ud.ds_tx5212.tx_stat;
- txstat1 = ACCESS_ONCE(tx_status->tx_status_1);
+ txstat1 = READ_ONCE(tx_status->tx_status_1);
/* No frame has been send or error */
if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE)))
return -EINPROGRESS;
- txstat0 = ACCESS_ONCE(tx_status->tx_status_0);
+ txstat0 = READ_ONCE(tx_status->tx_status_0);
/*
* Get descriptor status
@@ -700,14 +700,14 @@ ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
u32 rxstat0, rxstat1;
rx_status = &desc->ud.ds_rx.rx_stat;
- rxstat1 = ACCESS_ONCE(rx_status->rx_status_1);
+ rxstat1 = READ_ONCE(rx_status->rx_status_1);
/* No frame received / not ready */
if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE)))
return -EINPROGRESS;
memset(rs, 0, sizeof(struct ath5k_rx_status));
- rxstat0 = ACCESS_ONCE(rx_status->rx_status_0);
+ rxstat0 = READ_ONCE(rx_status->rx_status_0);
/*
* Frame receive status
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 613caca7dc02..785a0f33b7e6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -3628,7 +3628,7 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
bus->dpc_running = true;
wmb();
- while (ACCESS_ONCE(bus->dpc_triggered)) {
+ while (READ_ONCE(bus->dpc_triggered)) {
bus->dpc_triggered = false;
brcmf_sdio_dpc(bus);
bus->idlecount = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 231878969332..0f45f34e39d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1118,7 +1118,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- bool calibrating = ACCESS_ONCE(mvm->calibrating);
+ bool calibrating = READ_ONCE(mvm->calibrating);
if (state)
set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 6f2e2af23219..6e9d3289b9d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -652,7 +652,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
return -1;
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
is_multicast_ether_addr(hdr->addr1)) {
- u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
+ u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
if (ap_sta_id != IWL_MVM_INVALID_STA)
sta_id = ap_sta_id;
@@ -700,7 +700,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
tcp_hdrlen(skb);
- dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
+ dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len);
if (!sta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index a06b6612b658..f25ce3a1ea50 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1247,7 +1247,7 @@ restart:
spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
i = rxq->read;
/* W/A 9000 device step A0 wrap-around bug */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 2e3e013ec95a..9ad3f4fe5894 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2076,12 +2076,12 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
txq = trans_pcie->txq[txq_idx];
- wr_ptr = ACCESS_ONCE(txq->write_ptr);
+ wr_ptr = READ_ONCE(txq->write_ptr);
- while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
+ while (txq->read_ptr != READ_ONCE(txq->write_ptr) &&
!time_after(jiffies,
now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
- u8 write_ptr = ACCESS_ONCE(txq->write_ptr);
+ u8 write_ptr = READ_ONCE(txq->write_ptr);
if (WARN_ONCE(wr_ptr != write_ptr,
"WR pointer moved while flushing %d -> %d\n",
@@ -2553,7 +2553,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
spin_lock(&rxq->lock);
- r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+ r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
for (i = rxq->read, j = 0;
i != r && j < allocated_rb_nums;
@@ -2814,7 +2814,7 @@ static struct iwl_trans_dump_data
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
/* RBs */
- num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num))
+ num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num))
& 0x0FFF;
num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
len += num_rbs * (sizeof(*data) +
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 6467ffac9811..d2b3d6177a55 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1380,7 +1380,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
mac80211_hwsim_monitor_rx(hw, skb, channel);
/* wmediumd mode check */
- _portid = ACCESS_ONCE(data->wmediumd);
+ _portid = READ_ONCE(data->wmediumd);
if (_portid)
return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
@@ -1477,7 +1477,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
struct ieee80211_channel *chan)
{
struct mac80211_hwsim_data *data = hw->priv;
- u32 _pid = ACCESS_ONCE(data->wmediumd);
+ u32 _pid = READ_ONCE(data->wmediumd);
if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) {
struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c
index 8ce69c833362..b793727cd4f7 100644
--- a/drivers/nubus/nubus.c
+++ b/drivers/nubus/nubus.c
@@ -19,11 +19,6 @@
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/hwtest.h>
-#include <asm/mac_via.h>
-#include <asm/mac_oss.h>
-
-extern void via_nubus_init(void);
-extern void oss_nubus_init(void);
/* Constants */
@@ -841,14 +836,6 @@ static int __init nubus_init(void)
if (!MACH_IS_MAC)
return 0;
- /* Initialize the NuBus interrupts */
- if (oss_present) {
- oss_nubus_init();
- } else {
- via_nubus_init();
- }
-
- /* And probe */
pr_info("NuBus: Scanning NuBus slots.\n");
nubus_devices = NULL;
nubus_boards = NULL;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 0fd6195601ba..96cd55f9e3c5 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -244,7 +244,7 @@ config REGULATOR_DA9210
interface.
config REGULATOR_DA9211
- tristate "Dialog Semiconductor DA9211/DA9212/DA9213/DA9214/DA9215 regulator"
+ tristate "Dialog Semiconductor DA9211/DA9212/DA9213/DA9223/DA9214/DA9224/DA9215/DA9225 regulator"
depends on I2C
select REGMAP_I2C
help
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 376a99b7cf5d..181622b2813d 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -244,6 +244,7 @@ static const struct regulator_desc axp22x_drivevbus_regulator = {
.ops = &axp20x_ops_sw,
};
+/* DCDC ranges shared with AXP813 */
static const struct regulator_linear_range axp803_dcdc234_ranges[] = {
REGULATOR_LINEAR_RANGE(500000, 0x0, 0x46, 10000),
REGULATOR_LINEAR_RANGE(1220000, 0x47, 0x4b, 20000),
@@ -426,6 +427,69 @@ static const struct regulator_desc axp809_regulators[] = {
AXP_DESC_SW(AXP809, SW, "sw", "swin", AXP22X_PWR_OUT_CTRL2, BIT(6)),
};
+static const struct regulator_desc axp813_regulators[] = {
+ AXP_DESC(AXP813, DCDC1, "dcdc1", "vin1", 1600, 3400, 100,
+ AXP803_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(0)),
+ AXP_DESC_RANGES(AXP813, DCDC2, "dcdc2", "vin2", axp803_dcdc234_ranges,
+ 76, AXP803_DCDC2_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(1)),
+ AXP_DESC_RANGES(AXP813, DCDC3, "dcdc3", "vin3", axp803_dcdc234_ranges,
+ 76, AXP803_DCDC3_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(2)),
+ AXP_DESC_RANGES(AXP813, DCDC4, "dcdc4", "vin4", axp803_dcdc234_ranges,
+ 76, AXP803_DCDC4_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(3)),
+ AXP_DESC_RANGES(AXP813, DCDC5, "dcdc5", "vin5", axp803_dcdc5_ranges,
+ 68, AXP803_DCDC5_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(4)),
+ AXP_DESC_RANGES(AXP813, DCDC6, "dcdc6", "vin6", axp803_dcdc6_ranges,
+ 72, AXP803_DCDC6_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(5)),
+ AXP_DESC_RANGES(AXP813, DCDC7, "dcdc7", "vin7", axp803_dcdc6_ranges,
+ 72, AXP813_DCDC7_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
+ BIT(6)),
+ AXP_DESC(AXP813, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
+ AXP22X_ALDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(5)),
+ AXP_DESC(AXP813, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
+ AXP22X_ALDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(6)),
+ AXP_DESC(AXP813, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
+ AXP22X_ALDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(7)),
+ AXP_DESC(AXP813, DLDO1, "dldo1", "dldoin", 700, 3300, 100,
+ AXP22X_DLDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(3)),
+ AXP_DESC_RANGES(AXP813, DLDO2, "dldo2", "dldoin", axp803_dldo2_ranges,
+ 32, AXP22X_DLDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2,
+ BIT(4)),
+ AXP_DESC(AXP813, DLDO3, "dldo3", "dldoin", 700, 3300, 100,
+ AXP22X_DLDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(5)),
+ AXP_DESC(AXP813, DLDO4, "dldo4", "dldoin", 700, 3300, 100,
+ AXP22X_DLDO4_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(6)),
+ AXP_DESC(AXP813, ELDO1, "eldo1", "eldoin", 700, 1900, 50,
+ AXP22X_ELDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(0)),
+ AXP_DESC(AXP813, ELDO2, "eldo2", "eldoin", 700, 1900, 50,
+ AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
+ AXP_DESC(AXP813, ELDO3, "eldo3", "eldoin", 700, 1900, 50,
+ AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
+ /* to do / check ... */
+ AXP_DESC(AXP813, FLDO1, "fldo1", "fldoin", 700, 1450, 50,
+ AXP803_FLDO1_V_OUT, 0x0f, AXP22X_PWR_OUT_CTRL3, BIT(2)),
+ AXP_DESC(AXP813, FLDO2, "fldo2", "fldoin", 700, 1450, 50,
+ AXP803_FLDO2_V_OUT, 0x0f, AXP22X_PWR_OUT_CTRL3, BIT(3)),
+ /*
+ * TODO: FLDO3 = {DCDC5, FLDOIN} / 2
+ *
+ * This means FLDO3 effectively switches supplies at runtime,
+ * something the regulator subsystem does not support.
+ */
+ AXP_DESC_FIXED(AXP813, RTC_LDO, "rtc-ldo", "ips", 1800),
+ AXP_DESC_IO(AXP813, LDO_IO0, "ldo-io0", "ips", 700, 3300, 100,
+ AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
+ AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
+ AXP_DESC_IO(AXP813, LDO_IO1, "ldo-io1", "ips", 700, 3300, 100,
+ AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
+ AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
+ AXP_DESC_SW(AXP813, SW, "sw", "swin", AXP22X_PWR_OUT_CTRL2, BIT(7)),
+};
+
static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
{
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
@@ -441,9 +505,10 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
step = 75;
break;
case AXP803_ID:
+ case AXP813_ID:
/*
- * AXP803 DCDC work frequency setting has the same range and
- * step as AXP22X, but at a different register.
+ * AXP803/AXP813 DCDC work frequency setting has the same
+ * range and step as AXP22X, but at a different register.
* Fall through to the check below.
* (See include/linux/mfd/axp20x.h)
*/
@@ -561,6 +626,14 @@ static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 work
workmode <<= id - AXP803_DCDC1;
break;
+ case AXP813_ID:
+ if (id < AXP813_DCDC1 || id > AXP813_DCDC7)
+ return -EINVAL;
+
+ mask = AXP22X_WORKMODE_DCDCX_MASK(id - AXP813_DCDC1);
+ workmode <<= id - AXP813_DCDC1;
+ break;
+
default:
/* should not happen */
WARN_ON(1);
@@ -579,11 +652,12 @@ static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id)
u32 reg = 0;
/*
- * Currently in our supported AXP variants, only AXP803 and AXP806
- * have polyphase regulators.
+ * Currently in our supported AXP variants, only AXP803, AXP806,
+ * and AXP813 have polyphase regulators.
*/
switch (axp20x->variant) {
case AXP803_ID:
+ case AXP813_ID:
regmap_read(axp20x->regmap, AXP803_POLYPHASE_CTRL, &reg);
switch (id) {
@@ -656,6 +730,12 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
regulators = axp809_regulators;
nregulators = AXP809_REG_ID_MAX;
break;
+ case AXP813_ID:
+ regulators = axp813_regulators;
+ nregulators = AXP813_REG_ID_MAX;
+ drivevbus = of_property_read_bool(pdev->dev.parent->of_node,
+ "x-powers,drive-vbus-en");
+ break;
default:
dev_err(&pdev->dev, "Unsupported AXP variant: %ld\n",
axp20x->variant);
@@ -677,6 +757,10 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
if (axp20x_is_polyphase_slave(axp20x, i))
continue;
+ /* Support for AXP813's FLDO3 is not implemented */
+ if (axp20x->variant == AXP813_ID && i == AXP813_FLDO3)
+ continue;
+
/*
* Regulators DC1SW and DC5LDO are connected internally,
* so we have to handle their supply names separately.
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index aa47280efd32..9b8f47617724 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -1,6 +1,6 @@
/*
* da9211-regulator.c - Regulator device driver for DA9211/DA9212
- * /DA9213/DA9214/DA9215
+ * /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
* This library is free software; you can redistribute it and/or
@@ -496,8 +496,11 @@ static const struct i2c_device_id da9211_i2c_id[] = {
{"da9211", DA9211},
{"da9212", DA9212},
{"da9213", DA9213},
+ {"da9223", DA9223},
{"da9214", DA9214},
+ {"da9224", DA9224},
{"da9215", DA9215},
+ {"da9225", DA9225},
{},
};
MODULE_DEVICE_TABLE(i2c, da9211_i2c_id);
@@ -507,8 +510,11 @@ static const struct of_device_id da9211_dt_ids[] = {
{ .compatible = "dlg,da9211", .data = &da9211_i2c_id[0] },
{ .compatible = "dlg,da9212", .data = &da9211_i2c_id[1] },
{ .compatible = "dlg,da9213", .data = &da9211_i2c_id[2] },
- { .compatible = "dlg,da9214", .data = &da9211_i2c_id[3] },
- { .compatible = "dlg,da9215", .data = &da9211_i2c_id[4] },
+ { .compatible = "dlg,da9223", .data = &da9211_i2c_id[3] },
+ { .compatible = "dlg,da9214", .data = &da9211_i2c_id[4] },
+ { .compatible = "dlg,da9224", .data = &da9211_i2c_id[5] },
+ { .compatible = "dlg,da9215", .data = &da9211_i2c_id[6] },
+ { .compatible = "dlg,da9225", .data = &da9211_i2c_id[7] },
{},
};
MODULE_DEVICE_TABLE(of, da9211_dt_ids);
@@ -526,5 +532,5 @@ static struct i2c_driver da9211_regulator_driver = {
module_i2c_driver(da9211_regulator_driver);
MODULE_AUTHOR("James Ban <James.Ban.opensource@diasemi.com>");
-MODULE_DESCRIPTION("DA9211/DA9212/DA9213/DA9214/DA9215 regulator driver");
+MODULE_DESCRIPTION("DA9211/DA9212/DA9213/DA9223/DA9214/DA9224/DA9215/DA9225 regulator driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/da9211-regulator.h b/drivers/regulator/da9211-regulator.h
index b841bbf330cc..2cb32aab4f82 100644
--- a/drivers/regulator/da9211-regulator.h
+++ b/drivers/regulator/da9211-regulator.h
@@ -1,6 +1,6 @@
/*
* da9211-regulator.h - Regulator definitions for DA9211/DA9212
- * /DA9213/DA9214/DA9215
+ * /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225
* Copyright (C) 2015 Dialog Semiconductor Ltd.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index 0cb76ba29e84..8f782d22fdbe 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -34,6 +34,8 @@ struct pbias_reg_info {
u32 vmode;
unsigned int enable_time;
char *name;
+ const unsigned int *pbias_volt_table;
+ int n_voltages;
};
struct pbias_regulator_data {
@@ -49,11 +51,16 @@ struct pbias_of_data {
unsigned int offset;
};
-static const unsigned int pbias_volt_table[] = {
+static const unsigned int pbias_volt_table_3_0V[] = {
1800000,
3000000
};
+static const unsigned int pbias_volt_table_3_3V[] = {
+ 1800000,
+ 3300000
+};
+
static const struct regulator_ops pbias_regulator_voltage_ops = {
.list_voltage = regulator_list_voltage_table,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -69,6 +76,8 @@ static const struct pbias_reg_info pbias_mmc_omap2430 = {
.vmode = BIT(0),
.disable_val = 0,
.enable_time = 100,
+ .pbias_volt_table = pbias_volt_table_3_0V,
+ .n_voltages = 2,
.name = "pbias_mmc_omap2430"
};
@@ -77,6 +86,8 @@ static const struct pbias_reg_info pbias_sim_omap3 = {
.enable_mask = BIT(9),
.vmode = BIT(8),
.enable_time = 100,
+ .pbias_volt_table = pbias_volt_table_3_0V,
+ .n_voltages = 2,
.name = "pbias_sim_omap3"
};
@@ -86,6 +97,8 @@ static const struct pbias_reg_info pbias_mmc_omap4 = {
.disable_val = BIT(25),
.vmode = BIT(21),
.enable_time = 100,
+ .pbias_volt_table = pbias_volt_table_3_0V,
+ .n_voltages = 2,
.name = "pbias_mmc_omap4"
};
@@ -95,6 +108,8 @@ static const struct pbias_reg_info pbias_mmc_omap5 = {
.disable_val = BIT(25),
.vmode = BIT(21),
.enable_time = 100,
+ .pbias_volt_table = pbias_volt_table_3_3V,
+ .n_voltages = 2,
.name = "pbias_mmc_omap5"
};
@@ -199,8 +214,8 @@ static int pbias_regulator_probe(struct platform_device *pdev)
drvdata[data_idx].desc.owner = THIS_MODULE;
drvdata[data_idx].desc.type = REGULATOR_VOLTAGE;
drvdata[data_idx].desc.ops = &pbias_regulator_voltage_ops;
- drvdata[data_idx].desc.volt_table = pbias_volt_table;
- drvdata[data_idx].desc.n_voltages = 2;
+ drvdata[data_idx].desc.volt_table = info->pbias_volt_table;
+ drvdata[data_idx].desc.n_voltages = info->n_voltages;
drvdata[data_idx].desc.enable_time = info->enable_time;
drvdata[data_idx].desc.vsel_reg = offset;
drvdata[data_idx].desc.vsel_mask = info->vmode;
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 16c5f84e06a7..0241ada47d04 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -593,13 +593,20 @@ static int spmi_sw_selector_to_hw(struct spmi_regulator *vreg,
u8 *voltage_sel)
{
const struct spmi_voltage_range *range, *end;
+ unsigned offset;
range = vreg->set_points->range;
end = range + vreg->set_points->count;
for (; range < end; range++) {
if (selector < range->n_voltages) {
- *voltage_sel = selector;
+ /*
+ * hardware selectors between set point min and real
+ * min are invalid so we ignore them
+ */
+ offset = range->set_point_min_uV - range->min_uV;
+ offset /= range->step_uV;
+ *voltage_sel = selector + offset;
*range_sel = range->range_sel;
return 0;
}
@@ -613,15 +620,35 @@ static int spmi_sw_selector_to_hw(struct spmi_regulator *vreg,
static int spmi_hw_selector_to_sw(struct spmi_regulator *vreg, u8 hw_sel,
const struct spmi_voltage_range *range)
{
- int sw_sel = hw_sel;
+ unsigned sw_sel = 0;
+ unsigned offset, max_hw_sel;
const struct spmi_voltage_range *r = vreg->set_points->range;
-
- while (r != range) {
+ const struct spmi_voltage_range *end = r + vreg->set_points->count;
+
+ for (; r < end; r++) {
+ if (r == range && range->n_voltages) {
+ /*
+ * hardware selectors between set point min and real
+ * min and between set point max and real max are
+ * invalid so we return an error if they're
+ * programmed into the hardware
+ */
+ offset = range->set_point_min_uV - range->min_uV;
+ offset /= range->step_uV;
+ if (hw_sel < offset)
+ return -EINVAL;
+
+ max_hw_sel = range->set_point_max_uV - range->min_uV;
+ max_hw_sel /= range->step_uV;
+ if (hw_sel > max_hw_sel)
+ return -EINVAL;
+
+ return sw_sel + hw_sel - offset;
+ }
sw_sel += r->n_voltages;
- r++;
}
- return sw_sel;
+ return -EINVAL;
}
static const struct spmi_voltage_range *
@@ -1619,11 +1646,20 @@ static const struct spmi_regulator_data pm8994_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pmi8994_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "l1", 0x4000, "vdd_l1", },
+ { }
+};
+
static const struct of_device_id qcom_spmi_regulator_match[] = {
{ .compatible = "qcom,pm8841-regulators", .data = &pm8841_regulators },
{ .compatible = "qcom,pm8916-regulators", .data = &pm8916_regulators },
{ .compatible = "qcom,pm8941-regulators", .data = &pm8941_regulators },
{ .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators },
+ { .compatible = "qcom,pmi8994-regulators", .data = &pmi8994_regulators },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_spmi_regulator_match);
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 9aafbb03482d..bc489958fed7 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -154,7 +154,7 @@ static int tps65218_pmic_set_suspend_disable(struct regulator_dev *dev)
if (!tps->strobes[rid]) {
if (rid == TPS65218_DCDC_3)
- tps->info[rid]->strobe = 3;
+ tps->strobes[rid] = 3;
else
return -EINVAL;
}
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 4630782b5456..a7917d473774 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -296,7 +296,7 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
{
struct dasd_ccw_req *temp_cqr;
int data_size;
- struct timeval tv;
+ struct timespec64 ts;
struct dasd_eer_header header;
unsigned long flags;
struct eerbuffer *eerb;
@@ -310,9 +310,9 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
header.trigger = trigger;
- do_gettimeofday(&tv);
- header.tv_sec = tv.tv_sec;
- header.tv_usec = tv.tv_usec;
+ ktime_get_real_ts64(&ts);
+ header.tv_sec = ts.tv_sec;
+ header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
strncpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
@@ -340,7 +340,7 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
{
int data_size;
int snss_rc;
- struct timeval tv;
+ struct timespec64 ts;
struct dasd_eer_header header;
unsigned long flags;
struct eerbuffer *eerb;
@@ -353,9 +353,9 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
header.trigger = DASD_EER_STATECHANGE;
- do_gettimeofday(&tv);
- header.tv_sec = tv.tv_sec;
- header.tv_usec = tv.tv_usec;
+ ktime_get_real_ts64(&ts);
+ header.tv_sec = ts.tv_sec;
+ header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
strncpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index e94080a5196f..b095a23bcc0c 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -96,14 +96,6 @@ do { \
d_data); \
} while(0)
-#define DBF_DEV_EXC(d_level, d_device, d_str, d_data...) \
-do { \
- debug_sprintf_exception(d_device->debug_area, \
- d_level, \
- d_str "\n", \
- d_data); \
-} while(0)
-
#define DBF_EVENT(d_level, d_str, d_data...)\
do { \
debug_sprintf_event(dasd_debug_area, \
@@ -122,14 +114,6 @@ do { \
__dev_id.ssid, __dev_id.devno, d_data); \
} while (0)
-#define DBF_EXC(d_level, d_str, d_data...)\
-do { \
- debug_sprintf_exception(dasd_debug_area, \
- d_level,\
- d_str "\n", \
- d_data); \
-} while(0)
-
/* limit size for an errorstring */
#define ERRORLENGTH 30
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index aa42c3a2c90a..a05a4297cfae 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -56,13 +56,7 @@ extern debug_info_t *scm_debug;
static inline void SCM_LOG_HEX(int level, void *data, int length)
{
- if (!debug_level_enabled(scm_debug, level))
- return;
- while (length > 0) {
- debug_event(scm_debug, level, data, length);
- length -= scm_debug->buf_size;
- data += scm_debug->buf_size;
- }
+ debug_event(scm_debug, level, data, length);
}
static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index d247f238faf8..7027e61a6931 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -211,11 +211,8 @@ sclp_console_write(struct console *console, const char *message,
/* Setup timer to output current console buffer after 1/10 second */
if (sclp_conbuf != NULL && sclp_chars_in_buffer(sclp_conbuf) != 0 &&
!timer_pending(&sclp_con_timer)) {
- init_timer(&sclp_con_timer);
- sclp_con_timer.function = sclp_console_timeout;
- sclp_con_timer.data = 0UL;
- sclp_con_timer.expires = jiffies + HZ/10;
- add_timer(&sclp_con_timer);
+ setup_timer(&sclp_con_timer, sclp_console_timeout, 0UL);
+ mod_timer(&sclp_con_timer, jiffies + HZ / 10);
}
out:
spin_unlock_irqrestore(&sclp_con_lock, flags);
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 875628dab419..1cceefdc03e0 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -218,11 +218,8 @@ static int sclp_tty_write_string(const unsigned char *str, int count, int may_fa
/* Setup timer to output current console buffer after 1/10 second */
if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) &&
!timer_pending(&sclp_tty_timer)) {
- init_timer(&sclp_tty_timer);
- sclp_tty_timer.function = sclp_tty_timeout;
- sclp_tty_timer.data = 0UL;
- sclp_tty_timer.expires = jiffies + HZ/10;
- add_timer(&sclp_tty_timer);
+ setup_timer(&sclp_tty_timer, sclp_tty_timeout, 0UL);
+ mod_timer(&sclp_tty_timer, jiffies + HZ / 10);
}
spin_unlock_irqrestore(&sclp_tty_lock, flags);
out:
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index 91c3c642c76e..e7d23048d3f0 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -68,9 +68,8 @@ struct tape_class_device *register_tape_dev(
tcd->char_device->owner = fops->owner;
tcd->char_device->ops = fops;
- tcd->char_device->dev = dev;
- rc = cdev_add(tcd->char_device, tcd->char_device->dev, 1);
+ rc = cdev_add(tcd->char_device, dev, 1);
if (rc)
goto fail_with_cdev;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index b19020b9efff..62559dc0169f 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -812,8 +812,7 @@ static int vmlogrdr_register_cdev(dev_t dev)
}
vmlogrdr_cdev->owner = THIS_MODULE;
vmlogrdr_cdev->ops = &vmlogrdr_fops;
- vmlogrdr_cdev->dev = dev;
- rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
+ rc = cdev_add(vmlogrdr_cdev, dev, MAXMINOR);
if (!rc)
return 0;
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 04aceb694d51..fa90ef05afc0 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -110,7 +110,7 @@ static struct urdev *urdev_alloc(struct ccw_device *cdev)
mutex_init(&urd->io_mutex);
init_waitqueue_head(&urd->wait);
spin_lock_init(&urd->open_lock);
- atomic_set(&urd->ref_count, 1);
+ refcount_set(&urd->ref_count, 1);
urd->cdev = cdev;
get_device(&cdev->dev);
return urd;
@@ -126,7 +126,7 @@ static void urdev_free(struct urdev *urd)
static void urdev_get(struct urdev *urd)
{
- atomic_inc(&urd->ref_count);
+ refcount_inc(&urd->ref_count);
}
static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
@@ -159,7 +159,7 @@ static struct urdev *urdev_get_from_devno(u16 devno)
static void urdev_put(struct urdev *urd)
{
- if (atomic_dec_and_test(&urd->ref_count))
+ if (refcount_dec_and_test(&urd->ref_count))
urdev_free(urd);
}
@@ -892,10 +892,9 @@ static int ur_set_online(struct ccw_device *cdev)
}
urd->char_device->ops = &ur_fops;
- urd->char_device->dev = MKDEV(major, minor);
urd->char_device->owner = ur_fops.owner;
- rc = cdev_add(urd->char_device, urd->char_device->dev, 1);
+ rc = cdev_add(urd->char_device, MKDEV(major, minor), 1);
if (rc)
goto fail_free_cdev;
if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
@@ -946,7 +945,7 @@ static int ur_set_offline_force(struct ccw_device *cdev, int force)
rc = -EBUSY;
goto fail_urdev_put;
}
- if (!force && (atomic_read(&urd->ref_count) > 2)) {
+ if (!force && (refcount_read(&urd->ref_count) > 2)) {
/* There is still a user of urd (e.g. ur_open) */
TRACE("ur_set_offline: BUSY\n");
rc = -EBUSY;
diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h
index 67164ba22f11..608b0719ce17 100644
--- a/drivers/s390/char/vmur.h
+++ b/drivers/s390/char/vmur.h
@@ -12,6 +12,8 @@
#ifndef _VMUR_H_
#define _VMUR_H_
+#include <linux/refcount.h>
+
#define DEV_CLASS_UR_I 0x20 /* diag210 unit record input device class */
#define DEV_CLASS_UR_O 0x10 /* diag210 unit record output device class */
/*
@@ -70,7 +72,7 @@ struct urdev {
size_t reclen; /* Record length for *write* CCWs */
int class; /* VM device class */
int io_request_rc; /* return code from I/O request */
- atomic_t ref_count; /* reference counter */
+ refcount_t ref_count; /* reference counter */
wait_queue_head_t wait; /* wait queue to serialize open */
int open_flag; /* "urdev is open" flag */
spinlock_t open_lock; /* serialize critical sections */
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 34b9ad6b3143..e2f7b6e93efd 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -373,6 +373,12 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
rc = -EINVAL;
goto error;
}
+ /* Check if the devices are bound to the required ccw driver. */
+ if (gdev->count && gdrv && gdrv->ccw_driver &&
+ gdev->cdev[0]->drv != gdrv->ccw_driver) {
+ rc = -EINVAL;
+ goto error;
+ }
dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
gdev->dev.groups = ccwgroup_attr_groups;
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 735052ecd3e5..8e7e19b9e92c 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -43,11 +43,7 @@ static DEFINE_MUTEX(on_close_mutex);
static void CHSC_LOG_HEX(int level, void *data, int length)
{
- while (length > 0) {
- debug_event(chsc_debug_log_id, level, data, length);
- length -= chsc_debug_log_id->buf_size;
- data += chsc_debug_log_id->buf_size;
- }
+ debug_event(chsc_debug_log_id, level, data, length);
}
MODULE_AUTHOR("IBM Corporation");
diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h
index fa817efcec8f..7bdbe73707c2 100644
--- a/drivers/s390/cio/cio_debug.h
+++ b/drivers/s390/cio/cio_debug.h
@@ -23,13 +23,7 @@ extern debug_info_t *cio_debug_crw_id;
static inline void CIO_HEX_EVENT(int level, void *data, int length)
{
- if (unlikely(!cio_debug_trace_id))
- return;
- while (length > 0) {
- debug_event(cio_debug_trace_id, level, data, length);
- length -= cio_debug_trace_id->buf_size;
- data += cio_debug_trace_id->buf_size;
- }
+ debug_event(cio_debug_trace_id, level, data, length);
}
#endif
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 220491d27ef4..7d59230e88bb 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -58,8 +58,9 @@
/* indices for READCMB */
enum cmb_index {
+ avg_utilization = -1,
/* basic and exended format: */
- cmb_ssch_rsch_count,
+ cmb_ssch_rsch_count = 0,
cmb_sample_count,
cmb_device_connect_time,
cmb_function_pending_time,
@@ -215,71 +216,52 @@ struct set_schib_struct {
unsigned long address;
wait_queue_head_t wait;
int ret;
- struct kref kref;
};
-static void cmf_set_schib_release(struct kref *kref)
-{
- struct set_schib_struct *set_data;
-
- set_data = container_of(kref, struct set_schib_struct, kref);
- kfree(set_data);
-}
-
#define CMF_PENDING 1
+#define SET_SCHIB_TIMEOUT (10 * HZ)
static int set_schib_wait(struct ccw_device *cdev, u32 mme,
- int mbfc, unsigned long address)
+ int mbfc, unsigned long address)
{
- struct set_schib_struct *set_data;
- int ret;
+ struct set_schib_struct set_data;
+ int ret = -ENODEV;
spin_lock_irq(cdev->ccwlock);
- if (!cdev->private->cmb) {
- ret = -ENODEV;
+ if (!cdev->private->cmb)
goto out;
- }
- set_data = kzalloc(sizeof(struct set_schib_struct), GFP_ATOMIC);
- if (!set_data) {
- ret = -ENOMEM;
- goto out;
- }
- init_waitqueue_head(&set_data->wait);
- kref_init(&set_data->kref);
- set_data->mme = mme;
- set_data->mbfc = mbfc;
- set_data->address = address;
ret = set_schib(cdev, mme, mbfc, address);
if (ret != -EBUSY)
- goto out_put;
+ goto out;
- if (cdev->private->state != DEV_STATE_ONLINE) {
- /* if the device is not online, don't even try again */
- ret = -EBUSY;
- goto out_put;
- }
+ /* if the device is not online, don't even try again */
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ goto out;
- cdev->private->state = DEV_STATE_CMFCHANGE;
- set_data->ret = CMF_PENDING;
- cdev->private->cmb_wait = set_data;
+ init_waitqueue_head(&set_data.wait);
+ set_data.mme = mme;
+ set_data.mbfc = mbfc;
+ set_data.address = address;
+ set_data.ret = CMF_PENDING;
+ cdev->private->state = DEV_STATE_CMFCHANGE;
+ cdev->private->cmb_wait = &set_data;
spin_unlock_irq(cdev->ccwlock);
- if (wait_event_interruptible(set_data->wait,
- set_data->ret != CMF_PENDING)) {
- spin_lock_irq(cdev->ccwlock);
- if (set_data->ret == CMF_PENDING) {
- set_data->ret = -ERESTARTSYS;
+
+ ret = wait_event_interruptible_timeout(set_data.wait,
+ set_data.ret != CMF_PENDING,
+ SET_SCHIB_TIMEOUT);
+ spin_lock_irq(cdev->ccwlock);
+ if (ret <= 0) {
+ if (set_data.ret == CMF_PENDING) {
+ set_data.ret = (ret == 0) ? -ETIME : ret;
if (cdev->private->state == DEV_STATE_CMFCHANGE)
cdev->private->state = DEV_STATE_ONLINE;
}
- spin_unlock_irq(cdev->ccwlock);
}
- spin_lock_irq(cdev->ccwlock);
cdev->private->cmb_wait = NULL;
- ret = set_data->ret;
-out_put:
- kref_put(&set_data->kref, cmf_set_schib_release);
+ ret = set_data.ret;
out:
spin_unlock_irq(cdev->ccwlock);
return ret;
@@ -287,28 +269,21 @@ out:
void retry_set_schib(struct ccw_device *cdev)
{
- struct set_schib_struct *set_data;
+ struct set_schib_struct *set_data = cdev->private->cmb_wait;
- set_data = cdev->private->cmb_wait;
- if (!set_data) {
- WARN_ON(1);
+ if (!set_data)
return;
- }
- kref_get(&set_data->kref);
+
set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
set_data->address);
wake_up(&set_data->wait);
- kref_put(&set_data->kref, cmf_set_schib_release);
}
static int cmf_copy_block(struct ccw_device *cdev)
{
- struct subchannel *sch;
- void *reference_buf;
- void *hw_block;
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct cmb_data *cmb_data;
-
- sch = to_subchannel(cdev->dev.parent);
+ void *hw_block;
if (cio_update_schib(sch))
return -ENODEV;
@@ -323,102 +298,65 @@ static int cmf_copy_block(struct ccw_device *cdev)
}
cmb_data = cdev->private->cmb;
hw_block = cmb_data->hw_block;
- if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size))
- /* No need to copy. */
- return 0;
- reference_buf = kzalloc(cmb_data->size, GFP_ATOMIC);
- if (!reference_buf)
- return -ENOMEM;
- /* Ensure consistency of block copied from hardware. */
- do {
- memcpy(cmb_data->last_block, hw_block, cmb_data->size);
- memcpy(reference_buf, hw_block, cmb_data->size);
- } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size));
+ memcpy(cmb_data->last_block, hw_block, cmb_data->size);
cmb_data->last_update = get_tod_clock();
- kfree(reference_buf);
return 0;
}
struct copy_block_struct {
wait_queue_head_t wait;
int ret;
- struct kref kref;
};
-static void cmf_copy_block_release(struct kref *kref)
-{
- struct copy_block_struct *copy_block;
-
- copy_block = container_of(kref, struct copy_block_struct, kref);
- kfree(copy_block);
-}
-
static int cmf_cmb_copy_wait(struct ccw_device *cdev)
{
- struct copy_block_struct *copy_block;
- int ret;
- unsigned long flags;
+ struct copy_block_struct copy_block;
+ int ret = -ENODEV;
- spin_lock_irqsave(cdev->ccwlock, flags);
- if (!cdev->private->cmb) {
- ret = -ENODEV;
- goto out;
- }
- copy_block = kzalloc(sizeof(struct copy_block_struct), GFP_ATOMIC);
- if (!copy_block) {
- ret = -ENOMEM;
+ spin_lock_irq(cdev->ccwlock);
+ if (!cdev->private->cmb)
goto out;
- }
- init_waitqueue_head(&copy_block->wait);
- kref_init(&copy_block->kref);
ret = cmf_copy_block(cdev);
if (ret != -EBUSY)
- goto out_put;
+ goto out;
- if (cdev->private->state != DEV_STATE_ONLINE) {
- ret = -EBUSY;
- goto out_put;
- }
+ if (cdev->private->state != DEV_STATE_ONLINE)
+ goto out;
+
+ init_waitqueue_head(&copy_block.wait);
+ copy_block.ret = CMF_PENDING;
cdev->private->state = DEV_STATE_CMFUPDATE;
- copy_block->ret = CMF_PENDING;
- cdev->private->cmb_wait = copy_block;
+ cdev->private->cmb_wait = &copy_block;
+ spin_unlock_irq(cdev->ccwlock);
- spin_unlock_irqrestore(cdev->ccwlock, flags);
- if (wait_event_interruptible(copy_block->wait,
- copy_block->ret != CMF_PENDING)) {
- spin_lock_irqsave(cdev->ccwlock, flags);
- if (copy_block->ret == CMF_PENDING) {
- copy_block->ret = -ERESTARTSYS;
+ ret = wait_event_interruptible(copy_block.wait,
+ copy_block.ret != CMF_PENDING);
+ spin_lock_irq(cdev->ccwlock);
+ if (ret) {
+ if (copy_block.ret == CMF_PENDING) {
+ copy_block.ret = -ERESTARTSYS;
if (cdev->private->state == DEV_STATE_CMFUPDATE)
cdev->private->state = DEV_STATE_ONLINE;
}
- spin_unlock_irqrestore(cdev->ccwlock, flags);
}
- spin_lock_irqsave(cdev->ccwlock, flags);
cdev->private->cmb_wait = NULL;
- ret = copy_block->ret;
-out_put:
- kref_put(&copy_block->kref, cmf_copy_block_release);
+ ret = copy_block.ret;
out:
- spin_unlock_irqrestore(cdev->ccwlock, flags);
+ spin_unlock_irq(cdev->ccwlock);
return ret;
}
void cmf_retry_copy_block(struct ccw_device *cdev)
{
- struct copy_block_struct *copy_block;
+ struct copy_block_struct *copy_block = cdev->private->cmb_wait;
- copy_block = cdev->private->cmb_wait;
- if (!copy_block) {
- WARN_ON(1);
+ if (!copy_block)
return;
- }
- kref_get(&copy_block->kref);
+
copy_block->ret = cmf_copy_block(cdev);
wake_up(&copy_block->wait);
- kref_put(&copy_block->kref, cmf_copy_block_release);
}
static void cmf_generic_reset(struct ccw_device *cdev)
@@ -650,25 +588,44 @@ static int set_cmb(struct ccw_device *cdev, u32 mme)
return set_schib_wait(cdev, mme, 0, offset);
}
+/* calculate utilization in 0.1 percent units */
+static u64 __cmb_utilization(u64 device_connect_time, u64 function_pending_time,
+ u64 device_disconnect_time, u64 start_time)
+{
+ u64 utilization, elapsed_time;
+
+ utilization = time_to_nsec(device_connect_time +
+ function_pending_time +
+ device_disconnect_time);
+
+ elapsed_time = get_tod_clock() - start_time;
+ elapsed_time = tod_to_ns(elapsed_time);
+ elapsed_time /= 1000;
+
+ return elapsed_time ? (utilization / elapsed_time) : 0;
+}
+
static u64 read_cmb(struct ccw_device *cdev, int index)
{
+ struct cmb_data *cmb_data;
+ unsigned long flags;
struct cmb *cmb;
+ u64 ret = 0;
u32 val;
- int ret;
- unsigned long flags;
-
- ret = cmf_cmb_copy_wait(cdev);
- if (ret < 0)
- return 0;
spin_lock_irqsave(cdev->ccwlock, flags);
- if (!cdev->private->cmb) {
- ret = 0;
+ cmb_data = cdev->private->cmb;
+ if (!cmb_data)
goto out;
- }
- cmb = ((struct cmb_data *)cdev->private->cmb)->last_block;
+ cmb = cmb_data->hw_block;
switch (index) {
+ case avg_utilization:
+ ret = __cmb_utilization(cmb->device_connect_time,
+ cmb->function_pending_time,
+ cmb->device_disconnect_time,
+ cdev->private->cmb_start_time);
+ goto out;
case cmb_ssch_rsch_count:
ret = cmb->ssch_rsch_count;
goto out;
@@ -691,7 +648,6 @@ static u64 read_cmb(struct ccw_device *cdev, int index)
val = cmb->device_active_only_time;
break;
default:
- ret = 0;
goto out;
}
ret = time_to_avg_nsec(val, cmb->sample_count);
@@ -729,8 +685,7 @@ static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
/* we only know values before device_busy_time */
data->size = offsetof(struct cmbdata, device_busy_time);
- /* convert to nanoseconds */
- data->elapsed_time = (time * 1000) >> 12;
+ data->elapsed_time = tod_to_ns(time);
/* copy data to new structure */
data->ssch_rsch_count = cmb->ssch_rsch_count;
@@ -904,28 +859,27 @@ static int set_cmbe(struct ccw_device *cdev, u32 mme)
return set_schib_wait(cdev, mme, 1, mba);
}
-
static u64 read_cmbe(struct ccw_device *cdev, int index)
{
- struct cmbe *cmb;
struct cmb_data *cmb_data;
- u32 val;
- int ret;
unsigned long flags;
-
- ret = cmf_cmb_copy_wait(cdev);
- if (ret < 0)
- return 0;
+ struct cmbe *cmb;
+ u64 ret = 0;
+ u32 val;
spin_lock_irqsave(cdev->ccwlock, flags);
cmb_data = cdev->private->cmb;
- if (!cmb_data) {
- ret = 0;
+ if (!cmb_data)
goto out;
- }
- cmb = cmb_data->last_block;
+ cmb = cmb_data->hw_block;
switch (index) {
+ case avg_utilization:
+ ret = __cmb_utilization(cmb->device_connect_time,
+ cmb->function_pending_time,
+ cmb->device_disconnect_time,
+ cdev->private->cmb_start_time);
+ goto out;
case cmb_ssch_rsch_count:
ret = cmb->ssch_rsch_count;
goto out;
@@ -954,7 +908,6 @@ static u64 read_cmbe(struct ccw_device *cdev, int index)
val = cmb->initial_command_response_time;
break;
default:
- ret = 0;
goto out;
}
ret = time_to_avg_nsec(val, cmb->sample_count);
@@ -991,8 +944,7 @@ static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
/* we only know values before device_busy_time */
data->size = offsetof(struct cmbdata, device_busy_time);
- /* conver to nanoseconds */
- data->elapsed_time = (time * 1000) >> 12;
+ data->elapsed_time = tod_to_ns(time);
cmb = cmb_data->last_block;
/* copy data to new structure */
@@ -1045,19 +997,15 @@ static ssize_t cmb_show_avg_sample_interval(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct ccw_device *cdev;
- long interval;
+ struct ccw_device *cdev = to_ccwdev(dev);
unsigned long count;
- struct cmb_data *cmb_data;
+ long interval;
- cdev = to_ccwdev(dev);
count = cmf_read(cdev, cmb_sample_count);
spin_lock_irq(cdev->ccwlock);
- cmb_data = cdev->private->cmb;
if (count) {
- interval = cmb_data->last_update -
- cdev->private->cmb_start_time;
- interval = (interval * 1000) >> 12;
+ interval = get_tod_clock() - cdev->private->cmb_start_time;
+ interval = tod_to_ns(interval);
interval /= count;
} else
interval = -1;
@@ -1069,27 +1017,9 @@ static ssize_t cmb_show_avg_utilization(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct cmbdata data;
- u64 utilization;
- unsigned long t, u;
- int ret;
-
- ret = cmf_readall(to_ccwdev(dev), &data);
- if (ret == -EAGAIN || ret == -ENODEV)
- /* No data (yet/currently) available to use for calculation. */
- return sprintf(buf, "n/a\n");
- else if (ret)
- return ret;
-
- utilization = data.device_connect_time +
- data.function_pending_time +
- data.device_disconnect_time;
-
- /* calculate value in 0.1 percent units */
- t = data.elapsed_time / 1000;
- u = utilization / t;
+ unsigned long u = cmf_read(to_ccwdev(dev), avg_utilization);
- return sprintf(buf, "%02ld.%01ld%%\n", u/ 10, u - (u/ 10) * 10);
+ return sprintf(buf, "%02lu.%01lu%%\n", u / 10, u % 10);
}
#define cmf_attr(name) \
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index 0f11f3bcac82..d14795f7110b 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -43,13 +43,7 @@ static debug_info_t *eadm_debug;
static void EADM_LOG_HEX(int level, void *data, int length)
{
- if (!debug_level_enabled(eadm_debug, level))
- return;
- while (length > 0) {
- debug_event(eadm_debug, level, data, length);
- length -= eadm_debug->buf_size;
- data += eadm_debug->buf_size;
- }
+ debug_event(eadm_debug, level, data, length);
}
static void orb_init(union orb *orb)
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index e06496ab0036..f85f5fa7cefc 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -34,11 +34,7 @@ extern debug_info_t *qdio_dbf_error;
static inline void DBF_HEX(void *addr, int len)
{
- while (len > 0) {
- debug_event(qdio_dbf_setup, DBF_ERR, addr, len);
- len -= qdio_dbf_setup->buf_size;
- addr += qdio_dbf_setup->buf_size;
- }
+ debug_event(qdio_dbf_setup, DBF_ERR, addr, len);
}
#define DBF_ERROR(text...) \
@@ -50,11 +46,7 @@ static inline void DBF_HEX(void *addr, int len)
static inline void DBF_ERROR_HEX(void *addr, int len)
{
- while (len > 0) {
- debug_event(qdio_dbf_error, DBF_ERR, addr, len);
- len -= qdio_dbf_error->buf_size;
- addr += qdio_dbf_error->buf_size;
- }
+ debug_event(qdio_dbf_error, DBF_ERR, addr, len);
}
#define DBF_DEV_EVENT(level, device, text...) \
@@ -69,11 +61,7 @@ static inline void DBF_ERROR_HEX(void *addr, int len)
static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr,
int len, int level)
{
- while (len > 0) {
- debug_event(dev->debug_area, level, addr, len);
- len -= dev->debug_area->buf_size;
- addr += dev->debug_area->buf_size;
- }
+ debug_event(dev->debug_area, level, addr, len);
}
int qdio_allocate_dbf(struct qdio_initialize *init_data,
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index a739bdf9630e..0787b587e4b8 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -57,10 +57,8 @@ static u32 *get_indicator(void)
int i;
for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
- if (!atomic_read(&q_indicators[i].count)) {
- atomic_set(&q_indicators[i].count, 1);
+ if (!atomic_cmpxchg(&q_indicators[i].count, 0, 1))
return &q_indicators[i].ind;
- }
/* use the shared indicator */
atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
@@ -69,13 +67,11 @@ static u32 *get_indicator(void)
static void put_indicator(u32 *addr)
{
- int i;
+ struct indicator_t *ind = container_of(addr, struct indicator_t, ind);
if (!addr)
return;
- i = ((unsigned long)addr - (unsigned long)q_indicators) /
- sizeof(struct indicator_t);
- atomic_dec(&q_indicators[i].count);
+ atomic_dec(&ind->count);
}
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index f20b4d66c75f..d9a2fffd034b 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -106,7 +106,10 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
{
int ret = 0;
- if (!len || pa->pa_nr)
+ if (!len)
+ return 0;
+
+ if (pa->pa_nr)
return -EINVAL;
pa->pa_iova = iova;
@@ -330,6 +333,8 @@ static void ccwchain_cda_free(struct ccwchain *chain, int idx)
{
struct ccw1 *ccw = chain->ch_ccw + idx;
+ if (ccw_is_test(ccw) || ccw_is_noop(ccw) || ccw_is_tic(ccw))
+ return;
if (!ccw->count)
return;
@@ -502,6 +507,16 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
ccw = chain->ch_ccw + idx;
+ if (!ccw->count) {
+ /*
+ * We just want the translation result of any direct ccw
+ * to be an IDA ccw, so let's add the IDA flag for it.
+ * Although the flag will be ignored by firmware.
+ */
+ ccw->flags |= CCW_FLAG_IDA;
+ return 0;
+ }
+
/*
* Pin data page(s) in memory.
* The number of pages actually is the count of the idaws which will be
@@ -542,6 +557,9 @@ static int ccwchain_fetch_idal(struct ccwchain *chain,
ccw = chain->ch_ccw + idx;
+ if (!ccw->count)
+ return 0;
+
/* Calculate size of idaws. */
ret = copy_from_iova(cp->mdev, &idaw_iova, ccw->cda, sizeof(idaw_iova));
if (ret)
@@ -570,10 +588,6 @@ static int ccwchain_fetch_idal(struct ccwchain *chain,
for (i = 0; i < idaw_nr; i++) {
idaw_iova = *(idaws + i);
- if (IS_ERR_VALUE(idaw_iova)) {
- ret = -EFAULT;
- goto out_free_idaws;
- }
ret = pfn_array_alloc_pin(pat->pat_pa + i, cp->mdev,
idaw_iova, 1);
diff --git a/drivers/s390/crypto/ap_asm.h b/drivers/s390/crypto/ap_asm.h
index 6c0474c834d4..16b59ce5e01d 100644
--- a/drivers/s390/crypto/ap_asm.h
+++ b/drivers/s390/crypto/ap_asm.h
@@ -117,6 +117,49 @@ static inline int ap_qci(void *config)
return reg1;
}
+/*
+ * union ap_qact_ap_info - used together with the
+ * ap_aqic() function to provide a convenient way
+ * to handle the ap info needed by the qact function.
+ */
+union ap_qact_ap_info {
+ unsigned long val;
+ struct {
+ unsigned int : 3;
+ unsigned int mode : 3;
+ unsigned int : 26;
+ unsigned int cat : 8;
+ unsigned int : 8;
+ unsigned char ver[2];
+ };
+};
+
+/**
+ * ap_qact(): Query AP combatibility type.
+ * @qid: The AP queue number
+ * @apinfo: On input the info about the AP queue. On output the
+ * alternate AP queue info provided by the qact function
+ * in GR2 is stored in.
+ *
+ * Returns AP queue status. Check response_code field for failures.
+ */
+static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
+ union ap_qact_ap_info *apinfo)
+{
+ register unsigned long reg0 asm ("0") = qid | (5UL << 24)
+ | ((ifbit & 0x01) << 22);
+ register unsigned long reg1_in asm ("1") = apinfo->val;
+ register struct ap_queue_status reg1_out asm ("1");
+ register unsigned long reg2 asm ("2") = 0;
+
+ asm volatile(
+ ".long 0xb2af0000" /* PQAP(QACT) */
+ : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
+ : : "cc");
+ apinfo->val = reg2;
+ return reg1_out;
+}
+
/**
* ap_nqap(): Send message to adjunct processor queue.
* @qid: The AP queue number
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 5f0be2040272..8b5658b0bec3 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -176,6 +176,18 @@ static int ap_apft_available(void)
return test_facility(15);
}
+/*
+ * ap_qact_available(): Test if the PQAP(QACT) subfunction is available.
+ *
+ * Returns 1 if the QACT subfunction is available.
+ */
+static inline int ap_qact_available(void)
+{
+ if (ap_configuration)
+ return ap_configuration->qact;
+ return 0;
+}
+
/**
* ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number
@@ -988,6 +1000,47 @@ static int ap_select_domain(void)
}
/*
+ * This function checks the type and returns either 0 for not
+ * supported or the highest compatible type value (which may
+ * include the input type value).
+ */
+static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
+{
+ int comp_type = 0;
+
+ /* < CEX2A is not supported */
+ if (rawtype < AP_DEVICE_TYPE_CEX2A)
+ return 0;
+ /* up to CEX6 known and fully supported */
+ if (rawtype <= AP_DEVICE_TYPE_CEX6)
+ return rawtype;
+ /*
+ * unknown new type > CEX6, check for compatibility
+ * to the highest known and supported type which is
+ * currently CEX6 with the help of the QACT function.
+ */
+ if (ap_qact_available()) {
+ struct ap_queue_status status;
+ union ap_qact_ap_info apinfo = {0};
+
+ apinfo.mode = (func >> 26) & 0x07;
+ apinfo.cat = AP_DEVICE_TYPE_CEX6;
+ status = ap_qact(qid, 0, &apinfo);
+ if (status.response_code == AP_RESPONSE_NORMAL
+ && apinfo.cat >= AP_DEVICE_TYPE_CEX2A
+ && apinfo.cat <= AP_DEVICE_TYPE_CEX6)
+ comp_type = apinfo.cat;
+ }
+ if (!comp_type)
+ AP_DBF(DBF_WARN, "queue=%02x.%04x unable to map type %d\n",
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
+ else if (comp_type != rawtype)
+ AP_DBF(DBF_INFO, "queue=%02x.%04x map type %d to %d\n",
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype, comp_type);
+ return comp_type;
+}
+
+/*
* helper function to be used with bus_find_dev
* matches for the card device with the given id
*/
@@ -1014,8 +1067,8 @@ static void ap_scan_bus(struct work_struct *unused)
struct ap_card *ac;
struct device *dev;
ap_qid_t qid;
- int depth = 0, type = 0;
- unsigned int functions = 0;
+ int comp_type, depth = 0, type = 0;
+ unsigned int func = 0;
int rc, id, dom, borked, domains, defdomdevs = 0;
AP_DBF(DBF_DEBUG, "ap_scan_bus running\n");
@@ -1066,12 +1119,12 @@ static void ap_scan_bus(struct work_struct *unused)
}
continue;
}
- rc = ap_query_queue(qid, &depth, &type, &functions);
+ rc = ap_query_queue(qid, &depth, &type, &func);
if (dev) {
spin_lock_bh(&aq->lock);
if (rc == -ENODEV ||
/* adapter reconfiguration */
- (ac && ac->functions != functions))
+ (ac && ac->functions != func))
aq->state = AP_STATE_BORKED;
borked = aq->state == AP_STATE_BORKED;
spin_unlock_bh(&aq->lock);
@@ -1087,11 +1140,14 @@ static void ap_scan_bus(struct work_struct *unused)
}
if (rc)
continue;
- /* new queue device needed */
+ /* a new queue device is needed, check out comp type */
+ comp_type = ap_get_compatible_type(qid, type, func);
+ if (!comp_type)
+ continue;
+ /* maybe a card device needs to be created first */
if (!ac) {
- /* but first create the card device */
- ac = ap_card_create(id, depth,
- type, functions);
+ ac = ap_card_create(id, depth, type,
+ comp_type, func);
if (!ac)
continue;
ac->ap_dev.device.bus = &ap_bus_type;
@@ -1109,7 +1165,7 @@ static void ap_scan_bus(struct work_struct *unused)
get_device(&ac->ap_dev.device);
}
/* now create the new queue device */
- aq = ap_queue_create(qid, type);
+ aq = ap_queue_create(qid, comp_type);
if (!aq)
continue;
aq->card = ac;
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 754cf2223cfb..3a0e19d87e7c 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -250,8 +250,8 @@ void ap_queue_remove(struct ap_queue *aq);
void ap_queue_suspend(struct ap_device *ap_dev);
void ap_queue_resume(struct ap_device *ap_dev);
-struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
- unsigned int device_functions);
+struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
+ int comp_device_type, unsigned int functions);
int ap_module_init(void);
void ap_module_exit(void);
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index 8a31c9e95430..97a8cf578116 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -171,22 +171,20 @@ static void ap_card_device_release(struct device *dev)
kfree(ac);
}
-struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
- unsigned int functions)
+struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
+ int comp_type, unsigned int functions)
{
struct ap_card *ac;
ac = kzalloc(sizeof(*ac), GFP_KERNEL);
if (!ac)
return NULL;
+ INIT_LIST_HEAD(&ac->list);
INIT_LIST_HEAD(&ac->queues);
ac->ap_dev.device.release = ap_card_device_release;
ac->ap_dev.device.type = &ap_card_type;
- ac->ap_dev.device_type = device_type;
- /* CEX6 toleration: map to CEX5 */
- if (device_type == AP_DEVICE_TYPE_CEX6)
- ac->ap_dev.device_type = AP_DEVICE_TYPE_CEX5;
- ac->raw_hwtype = device_type;
+ ac->ap_dev.device_type = comp_type;
+ ac->raw_hwtype = raw_type;
ac->queue_depth = queue_depth;
ac->functions = functions;
ac->id = id;
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 6c8bd8ad6185..a550d40921e7 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -627,13 +627,11 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
aq->ap_dev.device.release = ap_queue_device_release;
aq->ap_dev.device.type = &ap_queue_type;
aq->ap_dev.device_type = device_type;
- /* CEX6 toleration: map to CEX5 */
- if (device_type == AP_DEVICE_TYPE_CEX6)
- aq->ap_dev.device_type = AP_DEVICE_TYPE_CEX5;
aq->qid = qid;
aq->state = AP_STATE_RESET_START;
aq->interrupt = AP_INTR_DISABLED;
spin_lock_init(&aq->lock);
+ INIT_LIST_HEAD(&aq->list);
INIT_LIST_HEAD(&aq->pendingq);
INIT_LIST_HEAD(&aq->requestq);
setup_timer(&aq->timeout, ap_request_timeout, (unsigned long) aq);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index f61fa47135a6..8dda5bb34a2f 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -125,10 +125,9 @@ static int alloc_and_prep_cprbmem(size_t paramblen,
* allocate consecutive memory for request CPRB, request param
* block, reply CPRB and reply param block
*/
- cprbmem = kmalloc(2 * cprbplusparamblen, GFP_KERNEL);
+ cprbmem = kzalloc(2 * cprbplusparamblen, GFP_KERNEL);
if (!cprbmem)
return -ENOMEM;
- memset(cprbmem, 0, 2 * cprbplusparamblen);
preqcblk = (struct CPRBX *) cprbmem;
prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen);
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 6c94efd23eac..73541a798db7 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -76,6 +76,7 @@ struct ica_z90_status {
#define ZCRYPT_CEX3A 8
#define ZCRYPT_CEX4 10
#define ZCRYPT_CEX5 11
+#define ZCRYPT_CEX6 12
/**
* Large random numbers are pulled in 4096 byte chunks from the crypto cards
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 4e91163d70a6..e2eebc775a37 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -45,6 +45,8 @@ static struct ap_device_id zcrypt_cex4_card_ids[] = {
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX5,
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX6,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ /* end of list */ },
};
@@ -55,6 +57,8 @@ static struct ap_device_id zcrypt_cex4_queue_ids[] = {
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX5,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX6,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ /* end of list */ },
};
@@ -72,17 +76,25 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
* MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
*/
static const int CEX4A_SPEED_IDX[] = {
- 5, 6, 59, 20, 115, 581, 0, 0};
+ 14, 19, 249, 42, 228, 1458, 0, 0};
static const int CEX5A_SPEED_IDX[] = {
- 3, 3, 6, 8, 32, 218, 0, 0};
+ 8, 9, 20, 18, 66, 458, 0, 0};
+ static const int CEX6A_SPEED_IDX[] = {
+ 6, 9, 20, 17, 65, 438, 0, 0};
+
static const int CEX4C_SPEED_IDX[] = {
- 24, 25, 82, 41, 138, 1111, 79, 8};
+ 59, 69, 308, 83, 278, 2204, 209, 40};
static const int CEX5C_SPEED_IDX[] = {
- 10, 14, 23, 17, 45, 242, 63, 4};
+ 24, 31, 50, 37, 90, 479, 27, 10};
+ static const int CEX6C_SPEED_IDX[] = {
+ 16, 20, 32, 27, 77, 455, 23, 9};
+
static const int CEX4P_SPEED_IDX[] = {
- 142, 198, 1852, 203, 331, 1563, 0, 8};
+ 224, 313, 3560, 359, 605, 2827, 0, 50};
static const int CEX5P_SPEED_IDX[] = {
- 49, 67, 131, 52, 85, 287, 0, 4};
+ 63, 84, 156, 83, 142, 533, 0, 10};
+ static const int CEX6P_SPEED_IDX[] = {
+ 55, 70, 121, 73, 129, 522, 0, 9};
struct ap_card *ac = to_ap_card(&ap_dev->device);
struct zcrypt_card *zc;
@@ -99,11 +111,16 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX4;
memcpy(zc->speed_rating, CEX4A_SPEED_IDX,
sizeof(CEX4A_SPEED_IDX));
- } else {
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
zc->type_string = "CEX5A";
zc->user_space_type = ZCRYPT_CEX5;
memcpy(zc->speed_rating, CEX5A_SPEED_IDX,
sizeof(CEX5A_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX6A";
+ zc->user_space_type = ZCRYPT_CEX6;
+ memcpy(zc->speed_rating, CEX6A_SPEED_IDX,
+ sizeof(CEX6A_SPEED_IDX));
}
zc->min_mod_size = CEX4A_MIN_MOD_SIZE;
if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
@@ -125,7 +142,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX3C;
memcpy(zc->speed_rating, CEX4C_SPEED_IDX,
sizeof(CEX4C_SPEED_IDX));
- } else {
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
zc->type_string = "CEX5C";
/* wrong user space type, must be CEX5
* just keep it for cca compatibility
@@ -133,6 +150,14 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX3C;
memcpy(zc->speed_rating, CEX5C_SPEED_IDX,
sizeof(CEX5C_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX6C";
+ /* wrong user space type, must be CEX6
+ * just keep it for cca compatibility
+ */
+ zc->user_space_type = ZCRYPT_CEX3C;
+ memcpy(zc->speed_rating, CEX6C_SPEED_IDX,
+ sizeof(CEX6C_SPEED_IDX));
}
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
@@ -143,11 +168,16 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->user_space_type = ZCRYPT_CEX4;
memcpy(zc->speed_rating, CEX4P_SPEED_IDX,
sizeof(CEX4P_SPEED_IDX));
- } else {
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
zc->type_string = "CEX5P";
zc->user_space_type = ZCRYPT_CEX5;
memcpy(zc->speed_rating, CEX5P_SPEED_IDX,
sizeof(CEX5P_SPEED_IDX));
+ } else {
+ zc->type_string = "CEX6P";
+ zc->user_space_type = ZCRYPT_CEX6;
+ memcpy(zc->speed_rating, CEX6P_SPEED_IDX,
+ sizeof(CEX6P_SPEED_IDX));
}
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 6dd5d7c58dd0..db5bde47dfb0 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -240,8 +240,7 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
inp = meb2->message + sizeof(meb2->message) - mod_len;
- } else {
- /* mod_len > 256 = 4096 bit RSA Key */
+ } else if (mod_len <= 512) {
struct type50_meb3_msg *meb3 = ap_msg->message;
memset(meb3, 0, sizeof(*meb3));
ap_msg->length = sizeof(*meb3);
@@ -251,7 +250,8 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
inp = meb3->message + sizeof(meb3->message) - mod_len;
- }
+ } else
+ return -EINVAL;
if (copy_from_user(mod, mex->n_modulus, mod_len) ||
copy_from_user(exp, mex->b_key, mod_len) ||
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index afd20cee7ea0..785620d30504 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -474,7 +474,8 @@ static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
*fcode = (msg->hdr.function_code[0] << 8) | msg->hdr.function_code[1];
*dom = (unsigned short *)&msg->cprbx.domain;
- if (memcmp(function_code, "US", 2) == 0)
+ if (memcmp(function_code, "US", 2) == 0
+ || memcmp(function_code, "AU", 2) == 0)
ap_msg->special = 1;
else
ap_msg->special = 0;
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 26363e0816fe..be9f17218531 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1761,6 +1761,7 @@ static struct ccwgroup_driver ctcm_group_driver = {
.owner = THIS_MODULE,
.name = CTC_DRIVER_NAME,
},
+ .ccw_driver = &ctcm_ccw_driver,
.setup = ctcm_probe_device,
.remove = ctcm_remove_device,
.set_online = ctcm_new_device,
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index d01b5c2a7760..0bf7b7356f10 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -2396,6 +2396,7 @@ static struct ccwgroup_driver lcs_group_driver = {
.owner = THIS_MODULE,
.name = "lcs",
},
+ .ccw_driver = &lcs_ccw_driver,
.setup = lcs_probe_device,
.remove = lcs_remove_device,
.set_online = lcs_new_device,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index bae7440abc01..61cf3e9c0acb 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5875,6 +5875,7 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
.owner = THIS_MODULE,
.name = "qeth",
},
+ .ccw_driver = &qeth_ccw_driver,
.setup = qeth_core_probe_device,
.remove = qeth_core_remove_device,
.set_online = qeth_core_set_online,
diff --git a/drivers/s390/virtio/Makefile b/drivers/s390/virtio/Makefile
index df40692a9011..f68af1f317f1 100644
--- a/drivers/s390/virtio/Makefile
+++ b/drivers/s390/virtio/Makefile
@@ -6,8 +6,4 @@
# it under the terms of the GNU General Public License (version 2 only)
# as published by the Free Software Foundation.
-s390-virtio-objs := virtio_ccw.o
-ifdef CONFIG_S390_GUEST_OLD_TRANSPORT
-s390-virtio-objs += kvm_virtio.o
-endif
-obj-$(CONFIG_S390_GUEST) += $(s390-virtio-objs)
+obj-$(CONFIG_S390_GUEST) += virtio_ccw.o
diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c
deleted file mode 100644
index a99d09a11f05..000000000000
--- a/drivers/s390/virtio/kvm_virtio.c
+++ /dev/null
@@ -1,515 +0,0 @@
-/*
- * virtio for kvm on s390
- *
- * Copyright IBM Corp. 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
- * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- */
-
-#include <linux/kernel_stat.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/err.h>
-#include <linux/virtio.h>
-#include <linux/virtio_config.h>
-#include <linux/slab.h>
-#include <linux/virtio_console.h>
-#include <linux/interrupt.h>
-#include <linux/virtio_ring.h>
-#include <linux/export.h>
-#include <linux/pfn.h>
-#include <asm/io.h>
-#include <asm/kvm_para.h>
-#include <asm/kvm_virtio.h>
-#include <asm/sclp.h>
-#include <asm/setup.h>
-#include <asm/irq.h>
-
-#define VIRTIO_SUBCODE_64 0x0D00
-
-/*
- * The pointer to our (page) of device descriptions.
- */
-static void *kvm_devices;
-static struct work_struct hotplug_work;
-
-struct kvm_device {
- struct virtio_device vdev;
- struct kvm_device_desc *desc;
-};
-
-#define to_kvmdev(vd) container_of(vd, struct kvm_device, vdev)
-
-/*
- * memory layout:
- * - kvm_device_descriptor
- * struct kvm_device_desc
- * - configuration
- * struct kvm_vqconfig
- * - feature bits
- * - config space
- */
-static struct kvm_vqconfig *kvm_vq_config(const struct kvm_device_desc *desc)
-{
- return (struct kvm_vqconfig *)(desc + 1);
-}
-
-static u8 *kvm_vq_features(const struct kvm_device_desc *desc)
-{
- return (u8 *)(kvm_vq_config(desc) + desc->num_vq);
-}
-
-static u8 *kvm_vq_configspace(const struct kvm_device_desc *desc)
-{
- return kvm_vq_features(desc) + desc->feature_len * 2;
-}
-
-/*
- * The total size of the config page used by this device (incl. desc)
- */
-static unsigned desc_size(const struct kvm_device_desc *desc)
-{
- return sizeof(*desc)
- + desc->num_vq * sizeof(struct kvm_vqconfig)
- + desc->feature_len * 2
- + desc->config_len;
-}
-
-/* This gets the device's feature bits. */
-static u64 kvm_get_features(struct virtio_device *vdev)
-{
- unsigned int i;
- u32 features = 0;
- struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
- u8 *in_features = kvm_vq_features(desc);
-
- for (i = 0; i < min(desc->feature_len * 8, 32); i++)
- if (in_features[i / 8] & (1 << (i % 8)))
- features |= (1 << i);
- return features;
-}
-
-static int kvm_finalize_features(struct virtio_device *vdev)
-{
- unsigned int i, bits;
- struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
- /* Second half of bitmap is features we accept. */
- u8 *out_features = kvm_vq_features(desc) + desc->feature_len;
-
- /* Give virtio_ring a chance to accept features. */
- vring_transport_features(vdev);
-
- /* Make sure we don't have any features > 32 bits! */
- BUG_ON((u32)vdev->features != vdev->features);
-
- memset(out_features, 0, desc->feature_len);
- bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8;
- for (i = 0; i < bits; i++) {
- if (__virtio_test_bit(vdev, i))
- out_features[i / 8] |= (1 << (i % 8));
- }
-
- return 0;
-}
-
-/*
- * Reading and writing elements in config space
- */
-static void kvm_get(struct virtio_device *vdev, unsigned int offset,
- void *buf, unsigned len)
-{
- struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
-
- BUG_ON(offset + len > desc->config_len);
- memcpy(buf, kvm_vq_configspace(desc) + offset, len);
-}
-
-static void kvm_set(struct virtio_device *vdev, unsigned int offset,
- const void *buf, unsigned len)
-{
- struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
-
- BUG_ON(offset + len > desc->config_len);
- memcpy(kvm_vq_configspace(desc) + offset, buf, len);
-}
-
-/*
- * The operations to get and set the status word just access
- * the status field of the device descriptor. set_status will also
- * make a hypercall to the host, to tell about status changes
- */
-static u8 kvm_get_status(struct virtio_device *vdev)
-{
- return to_kvmdev(vdev)->desc->status;
-}
-
-static void kvm_set_status(struct virtio_device *vdev, u8 status)
-{
- BUG_ON(!status);
- to_kvmdev(vdev)->desc->status = status;
- kvm_hypercall1(KVM_S390_VIRTIO_SET_STATUS,
- (unsigned long) to_kvmdev(vdev)->desc);
-}
-
-/*
- * To reset the device, we use the KVM_VIRTIO_RESET hypercall, using the
- * descriptor address. The Host will zero the status and all the
- * features.
- */
-static void kvm_reset(struct virtio_device *vdev)
-{
- kvm_hypercall1(KVM_S390_VIRTIO_RESET,
- (unsigned long) to_kvmdev(vdev)->desc);
-}
-
-/*
- * When the virtio_ring code wants to notify the Host, it calls us here and we
- * make a hypercall. We hand the address of the virtqueue so the Host
- * knows which virtqueue we're talking about.
- */
-static bool kvm_notify(struct virtqueue *vq)
-{
- long rc;
- struct kvm_vqconfig *config = vq->priv;
-
- rc = kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address);
- if (rc < 0)
- return false;
- return true;
-}
-
-/*
- * This routine finds the first virtqueue described in the configuration of
- * this device and sets it up.
- */
-static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
- unsigned index,
- void (*callback)(struct virtqueue *vq),
- const char *name, bool ctx)
-{
- struct kvm_device *kdev = to_kvmdev(vdev);
- struct kvm_vqconfig *config;
- struct virtqueue *vq;
- int err;
-
- if (index >= kdev->desc->num_vq)
- return ERR_PTR(-ENOENT);
-
- if (!name)
- return NULL;
-
- config = kvm_vq_config(kdev->desc)+index;
-
- err = vmem_add_mapping(config->address,
- vring_size(config->num,
- KVM_S390_VIRTIO_RING_ALIGN));
- if (err)
- goto out;
-
- vq = vring_new_virtqueue(index, config->num, KVM_S390_VIRTIO_RING_ALIGN,
- vdev, true, ctx, (void *) config->address,
- kvm_notify, callback, name);
- if (!vq) {
- err = -ENOMEM;
- goto unmap;
- }
-
- /*
- * register a callback token
- * The host will sent this via the external interrupt parameter
- */
- config->token = (u64) vq;
-
- vq->priv = config;
- return vq;
-unmap:
- vmem_remove_mapping(config->address,
- vring_size(config->num,
- KVM_S390_VIRTIO_RING_ALIGN));
-out:
- return ERR_PTR(err);
-}
-
-static void kvm_del_vq(struct virtqueue *vq)
-{
- struct kvm_vqconfig *config = vq->priv;
-
- vring_del_virtqueue(vq);
- vmem_remove_mapping(config->address,
- vring_size(config->num,
- KVM_S390_VIRTIO_RING_ALIGN));
-}
-
-static void kvm_del_vqs(struct virtio_device *vdev)
-{
- struct virtqueue *vq, *n;
-
- list_for_each_entry_safe(vq, n, &vdev->vqs, list)
- kvm_del_vq(vq);
-}
-
-static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
- struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[],
- const bool *ctx,
- struct irq_affinity *desc)
-{
- struct kvm_device *kdev = to_kvmdev(vdev);
- int i;
-
- /* We must have this many virtqueues. */
- if (nvqs > kdev->desc->num_vq)
- return -ENOENT;
-
- for (i = 0; i < nvqs; ++i) {
- vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i],
- ctx ? ctx[i] : false);
- if (IS_ERR(vqs[i]))
- goto error;
- }
- return 0;
-
-error:
- kvm_del_vqs(vdev);
- return PTR_ERR(vqs[i]);
-}
-
-static const char *kvm_bus_name(struct virtio_device *vdev)
-{
- return "";
-}
-
-/*
- * The config ops structure as defined by virtio config
- */
-static const struct virtio_config_ops kvm_vq_configspace_ops = {
- .get_features = kvm_get_features,
- .finalize_features = kvm_finalize_features,
- .get = kvm_get,
- .set = kvm_set,
- .get_status = kvm_get_status,
- .set_status = kvm_set_status,
- .reset = kvm_reset,
- .find_vqs = kvm_find_vqs,
- .del_vqs = kvm_del_vqs,
- .bus_name = kvm_bus_name,
-};
-
-/*
- * The root device for the kvm virtio devices.
- * This makes them appear as /sys/devices/kvm_s390/0,1,2 not /sys/devices/0,1,2.
- */
-static struct device *kvm_root;
-
-/*
- * adds a new device and register it with virtio
- * appropriate drivers are loaded by the device model
- */
-static void add_kvm_device(struct kvm_device_desc *d, unsigned int offset)
-{
- struct kvm_device *kdev;
-
- kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
- if (!kdev) {
- printk(KERN_EMERG "Cannot allocate kvm dev %u type %u\n",
- offset, d->type);
- return;
- }
-
- kdev->vdev.dev.parent = kvm_root;
- kdev->vdev.id.device = d->type;
- kdev->vdev.config = &kvm_vq_configspace_ops;
- kdev->desc = d;
-
- if (register_virtio_device(&kdev->vdev) != 0) {
- printk(KERN_ERR "Failed to register kvm device %u type %u\n",
- offset, d->type);
- kfree(kdev);
- }
-}
-
-/*
- * scan_devices() simply iterates through the device page.
- * The type 0 is reserved to mean "end of devices".
- */
-static void scan_devices(void)
-{
- unsigned int i;
- struct kvm_device_desc *d;
-
- for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
- d = kvm_devices + i;
-
- if (d->type == 0)
- break;
-
- add_kvm_device(d, i);
- }
-}
-
-/*
- * match for a kvm device with a specific desc pointer
- */
-static int match_desc(struct device *dev, void *data)
-{
- struct virtio_device *vdev = dev_to_virtio(dev);
- struct kvm_device *kdev = to_kvmdev(vdev);
-
- return kdev->desc == data;
-}
-
-/*
- * hotplug_device tries to find changes in the device page.
- */
-static void hotplug_devices(struct work_struct *dummy)
-{
- unsigned int i;
- struct kvm_device_desc *d;
- struct device *dev;
-
- for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
- d = kvm_devices + i;
-
- /* end of list */
- if (d->type == 0)
- break;
-
- /* device already exists */
- dev = device_find_child(kvm_root, d, match_desc);
- if (dev) {
- /* XXX check for hotplug remove */
- put_device(dev);
- continue;
- }
-
- /* new device */
- printk(KERN_INFO "Adding new virtio device %p\n", d);
- add_kvm_device(d, i);
- }
-}
-
-/*
- * we emulate the request_irq behaviour on top of s390 extints
- */
-static void kvm_extint_handler(struct ext_code ext_code,
- unsigned int param32, unsigned long param64)
-{
- struct virtqueue *vq;
- u32 param;
-
- if ((ext_code.subcode & 0xff00) != VIRTIO_SUBCODE_64)
- return;
- inc_irq_stat(IRQEXT_VRT);
-
- /* The LSB might be overloaded, we have to mask it */
- vq = (struct virtqueue *)(param64 & ~1UL);
-
- /* We use ext_params to decide what this interrupt means */
- param = param32 & VIRTIO_PARAM_MASK;
-
- switch (param) {
- case VIRTIO_PARAM_CONFIG_CHANGED:
- virtio_config_changed(vq->vdev);
- break;
- case VIRTIO_PARAM_DEV_ADD:
- schedule_work(&hotplug_work);
- break;
- case VIRTIO_PARAM_VRING_INTERRUPT:
- default:
- vring_interrupt(0, vq);
- break;
- }
-}
-
-/*
- * For s390-virtio, we expect a page above main storage containing
- * the virtio configuration. Try to actually load from this area
- * in order to figure out if the host provides this page.
- */
-static int __init test_devices_support(unsigned long addr)
-{
- int ret = -EIO;
-
- asm volatile(
- "0: lura 0,%1\n"
- "1: xgr %0,%0\n"
- "2:\n"
- EX_TABLE(0b,2b)
- EX_TABLE(1b,2b)
- : "+d" (ret)
- : "a" (addr)
- : "0", "cc");
- return ret;
-}
-/*
- * Init function for virtio
- * devices are in a single page above top of "normal" + standby mem
- */
-static int __init kvm_devices_init(void)
-{
- int rc;
- unsigned long total_memory_size = sclp.rzm * sclp.rnmax;
-
- if (!MACHINE_IS_KVM)
- return -ENODEV;
-
- if (test_devices_support(total_memory_size) < 0)
- return -ENODEV;
-
- pr_warn("The s390-virtio transport is deprecated. Please switch to a modern host providing virtio-ccw.\n");
-
- rc = vmem_add_mapping(total_memory_size, PAGE_SIZE);
- if (rc)
- return rc;
-
- kvm_devices = (void *) total_memory_size;
-
- kvm_root = root_device_register("kvm_s390");
- if (IS_ERR(kvm_root)) {
- rc = PTR_ERR(kvm_root);
- printk(KERN_ERR "Could not register kvm_s390 root device");
- vmem_remove_mapping(total_memory_size, PAGE_SIZE);
- return rc;
- }
-
- INIT_WORK(&hotplug_work, hotplug_devices);
-
- irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
- register_external_irq(EXT_IRQ_CP_SERVICE, kvm_extint_handler);
-
- scan_devices();
- return 0;
-}
-
-/* code for early console output with virtio_console */
-static int early_put_chars(u32 vtermno, const char *buf, int count)
-{
- char scratch[17];
- unsigned int len = count;
-
- if (len > sizeof(scratch) - 1)
- len = sizeof(scratch) - 1;
- scratch[len] = '\0';
- memcpy(scratch, buf, len);
- kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, __pa(scratch));
- return len;
-}
-
-static int __init s390_virtio_console_init(void)
-{
- if (sclp.has_vt220 || sclp.has_linemode)
- return -ENODEV;
- return virtio_cons_early_init(early_put_chars);
-}
-console_initcall(s390_virtio_console_init);
-
-
-/*
- * We do this after core stuff, but before the drivers.
- */
-postcore_initcall(kvm_devices_init);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index f05cfc83c9c8..f946bf889015 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -996,7 +996,7 @@ static void qlt_free_session_done(struct work_struct *work)
if (logout_started) {
bool traced = false;
- while (!ACCESS_ONCE(sess->logout_completed)) {
+ while (!READ_ONCE(sess->logout_completed)) {
if (!traced) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
"%s: waiting for sess %p logout\n",
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index a75f2a2cf780..603783976b81 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -1,10 +1,6 @@
#
# SPI driver configuration
#
-# NOTE: the reason this doesn't show SPI slave support is mostly that
-# nobody's needed a slave side API yet. The master-role API is not
-# fully appropriate there, so it'd need some thought to do well.
-#
menuconfig SPI
bool "SPI support"
depends on HAS_IOMEM
@@ -379,7 +375,7 @@ config SPI_FSL_DSPI
tristate "Freescale DSPI controller"
select REGMAP_MMIO
depends on HAS_DMA
- depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
+ depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || M5441x || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
mode. VF610 platform uses the controller.
@@ -626,6 +622,13 @@ config SPI_SIRF
help
SPI driver for CSR SiRFprimaII SoCs
+config SPI_SPRD_ADI
+ tristate "Spreadtrum ADI controller"
+ depends on ARCH_SPRD || COMPILE_TEST
+ depends on HWSPINLOCK || (COMPILE_TEST && !HWSPINLOCK)
+ help
+ ADI driver based on SPI for Spreadtrum SoCs.
+
config SPI_STM32
tristate "STMicroelectronics STM32 SPI controller"
depends on ARCH_STM32 || COMPILE_TEST
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 8e0cda73b324..34c5f2832ddf 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -91,6 +91,7 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
+obj-$(CONFIG_SPI_SPRD_ADI) += spi-sprd-adi.o
obj-$(CONFIG_SPI_STM32) += spi-stm32.o
obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 568e1c65aa82..77fe55ce790c 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -213,7 +213,7 @@ static void a3700_spi_mode_set(struct a3700_spi *a3700_spi,
}
static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
- unsigned int speed_hz, u16 mode)
+ unsigned int speed_hz)
{
u32 val;
u32 prescale;
@@ -231,17 +231,6 @@ static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
val |= A3700_SPI_CLK_CAPT_EDGE;
spireg_write(a3700_spi, A3700_SPI_IF_TIME_REG, val);
}
-
- val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
- val &= ~(A3700_SPI_CLK_POL | A3700_SPI_CLK_PHA);
-
- if (mode & SPI_CPOL)
- val |= A3700_SPI_CLK_POL;
-
- if (mode & SPI_CPHA)
- val |= A3700_SPI_CLK_PHA;
-
- spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
}
static void a3700_spi_bytelen_set(struct a3700_spi *a3700_spi, unsigned int len)
@@ -423,7 +412,7 @@ static void a3700_spi_transfer_setup(struct spi_device *spi,
a3700_spi = spi_master_get_devdata(spi->master);
- a3700_spi_clock_set(a3700_spi, xfer->speed_hz, spi->mode);
+ a3700_spi_clock_set(a3700_spi, xfer->speed_hz);
byte_len = xfer->bits_per_word >> 3;
@@ -584,6 +573,8 @@ static int a3700_spi_prepare_message(struct spi_master *master,
a3700_spi_bytelen_set(a3700_spi, 4);
+ a3700_spi_mode_set(a3700_spi, spi->mode);
+
return 0;
}
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 6ab4c7700228..68cfc351b47f 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -553,7 +553,7 @@ err_put_master:
static int spi_engine_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct spi_engine *spi_engine = spi_master_get_devdata(master);
int irq = platform_get_irq(pdev, 0);
@@ -561,6 +561,8 @@ static int spi_engine_remove(struct platform_device *pdev)
free_irq(irq, master);
+ spi_master_put(master);
+
writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index d89127f4a46d..f652f70cb8db 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -32,6 +32,7 @@
#include <linux/regmap.h>
#include <linux/sched.h>
#include <linux/spi/spi.h>
+#include <linux/spi/spi-fsl-dspi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/time.h>
@@ -151,6 +152,11 @@ static const struct fsl_dspi_devtype_data ls2085a_data = {
.max_clock_factor = 8,
};
+static const struct fsl_dspi_devtype_data coldfire_data = {
+ .trans_mode = DSPI_EOQ_MODE,
+ .max_clock_factor = 8,
+};
+
struct fsl_dspi_dma {
/* Length of transfer in words of DSPI_FIFO_SIZE */
u32 curr_xfer_len;
@@ -741,6 +747,7 @@ static int dspi_setup(struct spi_device *spi)
{
struct chip_data *chip;
struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
+ struct fsl_dspi_platform_data *pdata;
u32 cs_sck_delay = 0, sck_cs_delay = 0;
unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
unsigned char pasc = 0, asc = 0, fmsz = 0;
@@ -761,11 +768,18 @@ static int dspi_setup(struct spi_device *spi)
return -ENOMEM;
}
- of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
- &cs_sck_delay);
+ pdata = dev_get_platdata(&dspi->pdev->dev);
- of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
- &sck_cs_delay);
+ if (!pdata) {
+ of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
+ &cs_sck_delay);
+
+ of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
+ &sck_cs_delay);
+ } else {
+ cs_sck_delay = pdata->cs_sck_delay;
+ sck_cs_delay = pdata->sck_cs_delay;
+ }
chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS |
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF;
@@ -949,6 +963,7 @@ static int dspi_probe(struct platform_device *pdev)
struct fsl_dspi *dspi;
struct resource *res;
void __iomem *base;
+ struct fsl_dspi_platform_data *pdata;
int ret = 0, cs_num, bus_num;
master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
@@ -969,25 +984,34 @@ static int dspi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
SPI_BPW_MASK(16);
- ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
- goto out_master_put;
- }
- master->num_chipselect = cs_num;
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ master->num_chipselect = pdata->cs_num;
+ master->bus_num = pdata->bus_num;
- ret = of_property_read_u32(np, "bus-num", &bus_num);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't get bus-num\n");
- goto out_master_put;
- }
- master->bus_num = bus_num;
+ dspi->devtype_data = &coldfire_data;
+ } else {
- dspi->devtype_data = of_device_get_match_data(&pdev->dev);
- if (!dspi->devtype_data) {
- dev_err(&pdev->dev, "can't get devtype_data\n");
- ret = -EFAULT;
- goto out_master_put;
+ ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
+ goto out_master_put;
+ }
+ master->num_chipselect = cs_num;
+
+ ret = of_property_read_u32(np, "bus-num", &bus_num);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get bus-num\n");
+ goto out_master_put;
+ }
+ master->bus_num = bus_num;
+
+ dspi->devtype_data = of_device_get_match_data(&pdev->dev);
+ if (!dspi->devtype_data) {
+ dev_err(&pdev->dev, "can't get devtype_data\n");
+ ret = -EFAULT;
+ goto out_master_put;
+ }
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index babb15f07995..79ddefe4180d 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -53,10 +53,13 @@
/* generic defines to abstract from the different register layouts */
#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
+#define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */
/* The maximum bytes that a sdma BD can transfer.*/
#define MAX_SDMA_BD_BYTES (1 << 15)
#define MX51_ECSPI_CTRL_MAX_BURST 512
+/* The maximum bytes that IMX53_ECSPI can transfer in slave mode.*/
+#define MX53_MAX_TRANSFER_BYTES 512
enum spi_imx_devtype {
IMX1_CSPI,
@@ -76,7 +79,9 @@ struct spi_imx_devtype_data {
void (*trigger)(struct spi_imx_data *);
int (*rx_available)(struct spi_imx_data *);
void (*reset)(struct spi_imx_data *);
+ void (*disable)(struct spi_imx_data *);
bool has_dmamode;
+ bool has_slavemode;
unsigned int fifo_size;
bool dynamic_burst;
enum spi_imx_devtype devtype;
@@ -108,6 +113,11 @@ struct spi_imx_data {
unsigned int dynamic_burst, read_u32;
unsigned int word_mask;
+ /* Slave mode */
+ bool slave_mode;
+ bool slave_aborted;
+ unsigned int slave_burst;
+
/* DMA */
bool usedma;
u32 wml;
@@ -221,6 +231,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
if (!master->dma_rx)
return false;
+ if (spi_imx->slave_mode)
+ return false;
+
bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
if (bytes_per_word != 1 && bytes_per_word != 2 && bytes_per_word != 4)
@@ -262,6 +275,7 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
#define MX51_ECSPI_INT 0x10
#define MX51_ECSPI_INT_TEEN (1 << 0)
#define MX51_ECSPI_INT_RREN (1 << 3)
+#define MX51_ECSPI_INT_RDREN (1 << 4)
#define MX51_ECSPI_DMA 0x14
#define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f)
@@ -378,6 +392,44 @@ static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
spi_imx_buf_tx_u16(spi_imx);
}
+static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
+{
+ u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA));
+
+ if (spi_imx->rx_buf) {
+ int n_bytes = spi_imx->slave_burst % sizeof(val);
+
+ if (!n_bytes)
+ n_bytes = sizeof(val);
+
+ memcpy(spi_imx->rx_buf,
+ ((u8 *)&val) + sizeof(val) - n_bytes, n_bytes);
+
+ spi_imx->rx_buf += n_bytes;
+ spi_imx->slave_burst -= n_bytes;
+ }
+}
+
+static void mx53_ecspi_tx_slave(struct spi_imx_data *spi_imx)
+{
+ u32 val = 0;
+ int n_bytes = spi_imx->count % sizeof(val);
+
+ if (!n_bytes)
+ n_bytes = sizeof(val);
+
+ if (spi_imx->tx_buf) {
+ memcpy(((u8 *)&val) + sizeof(val) - n_bytes,
+ spi_imx->tx_buf, n_bytes);
+ val = cpu_to_be32(val);
+ spi_imx->tx_buf += n_bytes;
+ }
+
+ spi_imx->count -= n_bytes;
+
+ writel(val, spi_imx->base + MXC_CSPITXDATA);
+}
+
/* MX51 eCSPI */
static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
unsigned int fspi, unsigned int *fres)
@@ -427,6 +479,9 @@ static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
if (enable & MXC_INT_RR)
val |= MX51_ECSPI_INT_RREN;
+ if (enable & MXC_INT_RDR)
+ val |= MX51_ECSPI_INT_RDREN;
+
writel(val, spi_imx->base + MX51_ECSPI_INT);
}
@@ -439,6 +494,15 @@ static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
}
+static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
+{
+ u32 ctrl;
+
+ ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
+ ctrl &= ~MX51_ECSPI_CTRL_ENABLE;
+ writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
+}
+
static int mx51_ecspi_config(struct spi_device *spi)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
@@ -446,14 +510,11 @@ static int mx51_ecspi_config(struct spi_device *spi)
u32 clk = spi_imx->speed_hz, delay, reg;
u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
- /*
- * The hardware seems to have a race condition when changing modes. The
- * current assumption is that the selection of the channel arrives
- * earlier in the hardware than the mode bits when they are written at
- * the same time.
- * So set master mode for all channels as we do not support slave mode.
- */
- ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
+ /* set Master or Slave mode */
+ if (spi_imx->slave_mode)
+ ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK;
+ else
+ ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
/*
* Enable SPI_RDY handling (falling edge/level triggered).
@@ -468,9 +529,22 @@ static int mx51_ecspi_config(struct spi_device *spi)
/* set chip select to use */
ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
- ctrl |= (spi_imx->bits_per_word - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
+ if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
+ ctrl |= (spi_imx->slave_burst * 8 - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
+ else
+ ctrl |= (spi_imx->bits_per_word - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
- cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
+ /*
+ * eCSPI burst completion by Chip Select signal in Slave mode
+ * is not functional for imx53 Soc, config SPI burst completed when
+ * BURST_LENGTH + 1 bits are received
+ */
+ if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
+ cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
+ else
+ cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select);
if (spi->mode & SPI_CPHA)
cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select);
@@ -805,6 +879,7 @@ static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX1_CSPI,
};
@@ -817,6 +892,7 @@ static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX21_CSPI,
};
@@ -830,6 +906,7 @@ static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX27_CSPI,
};
@@ -842,6 +919,7 @@ static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX31_CSPI,
};
@@ -855,6 +933,7 @@ static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = true,
.dynamic_burst = false,
+ .has_slavemode = false,
.devtype = IMX35_CSPI,
};
@@ -867,6 +946,8 @@ static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
.fifo_size = 64,
.has_dmamode = true,
.dynamic_burst = true,
+ .has_slavemode = true,
+ .disable = mx51_ecspi_disable,
.devtype = IMX51_ECSPI,
};
@@ -878,6 +959,8 @@ static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
.reset = mx51_ecspi_reset,
.fifo_size = 64,
.has_dmamode = true,
+ .has_slavemode = true,
+ .disable = mx51_ecspi_disable,
.devtype = IMX53_ECSPI,
};
@@ -945,14 +1028,16 @@ static void spi_imx_push(struct spi_imx_data *spi_imx)
spi_imx->txfifo++;
}
- spi_imx->devtype_data->trigger(spi_imx);
+ if (!spi_imx->slave_mode)
+ spi_imx->devtype_data->trigger(spi_imx);
}
static irqreturn_t spi_imx_isr(int irq, void *dev_id)
{
struct spi_imx_data *spi_imx = dev_id;
- while (spi_imx->devtype_data->rx_available(spi_imx)) {
+ while (spi_imx->txfifo &&
+ spi_imx->devtype_data->rx_available(spi_imx)) {
spi_imx->rx(spi_imx);
spi_imx->txfifo--;
}
@@ -1034,7 +1119,7 @@ static int spi_imx_setupxfer(struct spi_device *spi,
spi_imx->speed_hz = t->speed_hz;
/* Initialize the functions for transfer */
- if (spi_imx->devtype_data->dynamic_burst) {
+ if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode) {
u32 mask;
spi_imx->dynamic_burst = 0;
@@ -1078,6 +1163,12 @@ static int spi_imx_setupxfer(struct spi_device *spi,
return ret;
}
+ if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
+ spi_imx->rx = mx53_ecspi_rx_slave;
+ spi_imx->tx = mx53_ecspi_tx_slave;
+ spi_imx->slave_burst = t->len;
+ }
+
spi_imx->devtype_data->config(spi);
return 0;
@@ -1262,11 +1353,61 @@ static int spi_imx_pio_transfer(struct spi_device *spi,
return transfer->len;
}
+static int spi_imx_pio_transfer_slave(struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ int ret = transfer->len;
+
+ if (is_imx53_ecspi(spi_imx) &&
+ transfer->len > MX53_MAX_TRANSFER_BYTES) {
+ dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n",
+ MX53_MAX_TRANSFER_BYTES);
+ return -EMSGSIZE;
+ }
+
+ spi_imx->tx_buf = transfer->tx_buf;
+ spi_imx->rx_buf = transfer->rx_buf;
+ spi_imx->count = transfer->len;
+ spi_imx->txfifo = 0;
+
+ reinit_completion(&spi_imx->xfer_done);
+ spi_imx->slave_aborted = false;
+
+ spi_imx_push(spi_imx);
+
+ spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR);
+
+ if (wait_for_completion_interruptible(&spi_imx->xfer_done) ||
+ spi_imx->slave_aborted) {
+ dev_dbg(&spi->dev, "interrupted\n");
+ ret = -EINTR;
+ }
+
+ /* ecspi has a HW issue when works in Slave mode,
+ * after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
+ * ECSPI_TXDATA keeps shift out the last word data,
+ * so we have to disable ECSPI when in slave mode after the
+ * transfer completes
+ */
+ if (spi_imx->devtype_data->disable)
+ spi_imx->devtype_data->disable(spi_imx);
+
+ return ret;
+}
+
static int spi_imx_transfer(struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ /* flush rxfifo before transfer */
+ while (spi_imx->devtype_data->rx_available(spi_imx))
+ spi_imx->rx(spi_imx);
+
+ if (spi_imx->slave_mode)
+ return spi_imx_pio_transfer_slave(spi, transfer);
+
if (spi_imx->usedma)
return spi_imx_dma_transfer(spi_imx, transfer);
else
@@ -1323,6 +1464,16 @@ spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg)
return 0;
}
+static int spi_imx_slave_abort(struct spi_master *master)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+
+ spi_imx->slave_aborted = true;
+ complete(&spi_imx->xfer_done);
+
+ return 0;
+}
+
static int spi_imx_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1334,13 +1485,23 @@ static int spi_imx_probe(struct platform_device *pdev)
struct spi_imx_data *spi_imx;
struct resource *res;
int i, ret, irq, spi_drctl;
+ const struct spi_imx_devtype_data *devtype_data = of_id ? of_id->data :
+ (struct spi_imx_devtype_data *)pdev->id_entry->driver_data;
+ bool slave_mode;
if (!np && !mxc_platform_info) {
dev_err(&pdev->dev, "can't get the platform data\n");
return -EINVAL;
}
- master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data));
+ slave_mode = devtype_data->has_slavemode &&
+ of_property_read_bool(np, "spi-slave");
+ if (slave_mode)
+ master = spi_alloc_slave(&pdev->dev,
+ sizeof(struct spi_imx_data));
+ else
+ master = spi_alloc_master(&pdev->dev,
+ sizeof(struct spi_imx_data));
if (!master)
return -ENOMEM;
@@ -1358,20 +1519,29 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx = spi_master_get_devdata(master);
spi_imx->bitbang.master = master;
spi_imx->dev = &pdev->dev;
+ spi_imx->slave_mode = slave_mode;
- spi_imx->devtype_data = of_id ? of_id->data :
- (struct spi_imx_devtype_data *)pdev->id_entry->driver_data;
+ spi_imx->devtype_data = devtype_data;
+ /* Get number of chip selects, either platform data or OF */
if (mxc_platform_info) {
master->num_chipselect = mxc_platform_info->num_chipselect;
- master->cs_gpios = devm_kzalloc(&master->dev,
- sizeof(int) * master->num_chipselect, GFP_KERNEL);
- if (!master->cs_gpios)
- return -ENOMEM;
+ if (mxc_platform_info->chipselect) {
+ master->cs_gpios = devm_kzalloc(&master->dev,
+ sizeof(int) * master->num_chipselect, GFP_KERNEL);
+ if (!master->cs_gpios)
+ return -ENOMEM;
+
+ for (i = 0; i < master->num_chipselect; i++)
+ master->cs_gpios[i] = mxc_platform_info->chipselect[i];
+ }
+ } else {
+ u32 num_cs;
- for (i = 0; i < master->num_chipselect; i++)
- master->cs_gpios[i] = mxc_platform_info->chipselect[i];
- }
+ if (!of_property_read_u32(np, "num-cs", &num_cs))
+ master->num_chipselect = num_cs;
+ /* If not preset, default value of 1 is used */
+ }
spi_imx->bitbang.chipselect = spi_imx_chipselect;
spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
@@ -1380,6 +1550,7 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
+ spi_imx->bitbang.master->slave_abort = spi_imx_slave_abort;
spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
| SPI_NO_CS;
if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) ||
@@ -1451,37 +1622,38 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->devtype_data->intctrl(spi_imx, 0);
master->dev.of_node = pdev->dev.of_node;
+
+ /* Request GPIO CS lines, if any */
+ if (!spi_imx->slave_mode && master->cs_gpios) {
+ for (i = 0; i < master->num_chipselect; i++) {
+ if (!gpio_is_valid(master->cs_gpios[i]))
+ continue;
+
+ ret = devm_gpio_request(&pdev->dev,
+ master->cs_gpios[i],
+ DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get CS GPIO %i\n",
+ master->cs_gpios[i]);
+ goto out_spi_bitbang;
+ }
+ }
+ }
+
ret = spi_bitbang_start(&spi_imx->bitbang);
if (ret) {
dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
goto out_clk_put;
}
- if (!master->cs_gpios) {
- dev_err(&pdev->dev, "No CS GPIOs available\n");
- ret = -EINVAL;
- goto out_clk_put;
- }
-
- for (i = 0; i < master->num_chipselect; i++) {
- if (!gpio_is_valid(master->cs_gpios[i]))
- continue;
-
- ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
- DRIVER_NAME);
- if (ret) {
- dev_err(&pdev->dev, "Can't get CS GPIO %i\n",
- master->cs_gpios[i]);
- goto out_clk_put;
- }
- }
-
dev_info(&pdev->dev, "probed\n");
clk_disable(spi_imx->clk_ipg);
clk_disable(spi_imx->clk_per);
return ret;
+out_spi_bitbang:
+ spi_bitbang_stop(&spi_imx->bitbang);
out_clk_put:
clk_disable_unprepare(spi_imx->clk_ipg);
out_put_per:
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 5b0e9a3e83f6..3d216b950b41 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -44,6 +44,7 @@
#include <linux/completion.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/stmp_device.h>
#include <linux/spi/spi.h>
@@ -442,6 +443,85 @@ static int mxs_spi_transfer_one(struct spi_master *master,
return status;
}
+static int mxs_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mxs_spi *spi = spi_master_get_devdata(master);
+ struct mxs_ssp *ssp = &spi->ssp;
+ int ret;
+
+ clk_disable_unprepare(ssp->clk);
+
+ ret = pinctrl_pm_select_idle_state(dev);
+ if (ret) {
+ int ret2 = clk_prepare_enable(ssp->clk);
+
+ if (ret2)
+ dev_warn(dev, "Failed to reenable clock after failing pinctrl request (pinctrl: %d, clk: %d)\n",
+ ret, ret2);
+ }
+
+ return ret;
+}
+
+static int mxs_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mxs_spi *spi = spi_master_get_devdata(master);
+ struct mxs_ssp *ssp = &spi->ssp;
+ int ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(ssp->clk);
+ if (ret)
+ pinctrl_pm_select_idle_state(dev);
+
+ return ret;
+}
+
+static int __maybe_unused mxs_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ return mxs_spi_runtime_suspend(dev);
+ else
+ return 0;
+}
+
+static int __maybe_unused mxs_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ if (!pm_runtime_suspended(dev))
+ ret = mxs_spi_runtime_resume(dev);
+ else
+ ret = 0;
+ if (ret)
+ return ret;
+
+ ret = spi_master_resume(master);
+ if (ret < 0 && !pm_runtime_suspended(dev))
+ mxs_spi_runtime_suspend(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops mxs_spi_pm = {
+ SET_RUNTIME_PM_OPS(mxs_spi_runtime_suspend,
+ mxs_spi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(mxs_spi_suspend, mxs_spi_resume)
+};
+
static const struct of_device_id mxs_spi_dt_ids[] = {
{ .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, },
{ .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, },
@@ -493,12 +573,15 @@ static int mxs_spi_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
+ platform_set_drvdata(pdev, master);
+
master->transfer_one_message = mxs_spi_transfer_one;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->num_chipselect = 3;
master->dev.of_node = np;
master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->auto_runtime_pm = true;
spi = spi_master_get_devdata(master);
ssp = &spi->ssp;
@@ -521,28 +604,41 @@ static int mxs_spi_probe(struct platform_device *pdev)
goto out_master_free;
}
- ret = clk_prepare_enable(ssp->clk);
- if (ret)
- goto out_dma_release;
+ pm_runtime_enable(ssp->dev);
+ if (!pm_runtime_enabled(ssp->dev)) {
+ ret = mxs_spi_runtime_resume(ssp->dev);
+ if (ret < 0) {
+ dev_err(ssp->dev, "runtime resume failed\n");
+ goto out_dma_release;
+ }
+ }
+
+ ret = pm_runtime_get_sync(ssp->dev);
+ if (ret < 0) {
+ dev_err(ssp->dev, "runtime_get_sync failed\n");
+ goto out_pm_runtime_disable;
+ }
clk_set_rate(ssp->clk, clk_freq);
ret = stmp_reset_block(ssp->base);
if (ret)
- goto out_disable_clk;
-
- platform_set_drvdata(pdev, master);
+ goto out_pm_runtime_put;
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret);
- goto out_disable_clk;
+ goto out_pm_runtime_put;
}
+ pm_runtime_put(ssp->dev);
+
return 0;
-out_disable_clk:
- clk_disable_unprepare(ssp->clk);
+out_pm_runtime_put:
+ pm_runtime_put(ssp->dev);
+out_pm_runtime_disable:
+ pm_runtime_disable(ssp->dev);
out_dma_release:
dma_release_channel(ssp->dmach);
out_master_free:
@@ -560,7 +656,10 @@ static int mxs_spi_remove(struct platform_device *pdev)
spi = spi_master_get_devdata(master);
ssp = &spi->ssp;
- clk_disable_unprepare(ssp->clk);
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ mxs_spi_runtime_suspend(&pdev->dev);
+
dma_release_channel(ssp->dmach);
return 0;
@@ -572,6 +671,7 @@ static struct platform_driver mxs_spi_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = mxs_spi_dt_ids,
+ .pm = &mxs_spi_pm,
},
};
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 4b6dd73b80da..8974bb340b3a 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -671,7 +671,6 @@ static int orion_spi_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"%pOF has no valid 'reg' property (%d)\n",
np, status);
- status = 0;
continue;
}
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 2a10b3f94ff7..2ce875764ca6 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1221,7 +1221,6 @@ static int rspi_probe(struct platform_device *pdev)
struct spi_master *master;
struct rspi_data *rspi;
int ret;
- const struct of_device_id *of_id;
const struct rspi_plat_data *rspi_pd;
const struct spi_ops *ops;
@@ -1229,9 +1228,8 @@ static int rspi_probe(struct platform_device *pdev)
if (master == NULL)
return -ENOMEM;
- of_id = of_match_device(rspi_of_match, &pdev->dev);
- if (of_id) {
- ops = of_id->data;
+ ops = of_device_get_match_data(&pdev->dev);
+ if (ops) {
ret = rspi_parse_dt(&pdev->dev, master);
if (ret)
goto error1;
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index b392cca8fa4f..de7df20f8712 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -752,7 +752,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
struct s3c64xx_spi_driver_data *sdd;
- struct s3c64xx_spi_info *sci;
int err;
sdd = spi_master_get_devdata(spi->master);
@@ -788,8 +787,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
spi_set_ctldata(spi, cs);
}
- sci = sdd->cntrlr_info;
-
pm_runtime_get_sync(&sdd->pdev->dev);
/* Check if we can provide the requested rate */
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 0eb1e9583485..fcd261f98b9f 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -900,7 +900,7 @@ static int sh_msiof_transfer_one(struct spi_master *master,
break;
copy32 = copy_bswap32;
} else if (bits <= 16) {
- if (l & 1)
+ if (l & 3)
break;
copy32 = copy_wswap32;
} else {
@@ -1021,6 +1021,8 @@ static const struct sh_msiof_chipdata rcar_gen3_data = {
static const struct of_device_id sh_msiof_match[] = {
{ .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
+ { .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data },
+ { .compatible = "renesas,msiof-r8a7745", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7790", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7791", .data = &rcar_gen2_data },
{ .compatible = "renesas,msiof-r8a7792", .data = &rcar_gen2_data },
@@ -1188,12 +1190,10 @@ free_tx_chan:
static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
{
struct spi_master *master = p->master;
- struct device *dev;
if (!master->dma_tx)
return;
- dev = &p->pdev->dev;
dma_unmap_single(master->dma_rx->device->dev, p->rx_dma_addr,
PAGE_SIZE, DMA_FROM_DEVICE);
dma_unmap_single(master->dma_tx->device->dev, p->tx_dma_addr,
@@ -1209,15 +1209,13 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
struct resource *r;
struct spi_master *master;
const struct sh_msiof_chipdata *chipdata;
- const struct of_device_id *of_id;
struct sh_msiof_spi_info *info;
struct sh_msiof_spi_priv *p;
int i;
int ret;
- of_id = of_match_device(sh_msiof_match, &pdev->dev);
- if (of_id) {
- chipdata = of_id->data;
+ chipdata = of_device_get_match_data(&pdev->dev);
+ if (chipdata) {
info = sh_msiof_spi_parse_dt(&pdev->dev);
} else {
chipdata = (const void *)pdev->id_entry->driver_data;
diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
new file mode 100644
index 000000000000..5993bdbf79e4
--- /dev/null
+++ b/drivers/spi/spi-sprd-adi.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/hwspinlock.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/sizes.h>
+
+/* Registers definitions for ADI controller */
+#define REG_ADI_CTRL0 0x4
+#define REG_ADI_CHN_PRIL 0x8
+#define REG_ADI_CHN_PRIH 0xc
+#define REG_ADI_INT_EN 0x10
+#define REG_ADI_INT_RAW 0x14
+#define REG_ADI_INT_MASK 0x18
+#define REG_ADI_INT_CLR 0x1c
+#define REG_ADI_GSSI_CFG0 0x20
+#define REG_ADI_GSSI_CFG1 0x24
+#define REG_ADI_RD_CMD 0x28
+#define REG_ADI_RD_DATA 0x2c
+#define REG_ADI_ARM_FIFO_STS 0x30
+#define REG_ADI_STS 0x34
+#define REG_ADI_EVT_FIFO_STS 0x38
+#define REG_ADI_ARM_CMD_STS 0x3c
+#define REG_ADI_CHN_EN 0x40
+#define REG_ADI_CHN_ADDR(id) (0x44 + (id - 2) * 4)
+#define REG_ADI_CHN_EN1 0x20c
+
+/* Bits definitions for register REG_ADI_GSSI_CFG0 */
+#define BIT_CLK_ALL_ON BIT(30)
+
+/* Bits definitions for register REG_ADI_RD_DATA */
+#define BIT_RD_CMD_BUSY BIT(31)
+#define RD_ADDR_SHIFT 16
+#define RD_VALUE_MASK GENMASK(15, 0)
+#define RD_ADDR_MASK GENMASK(30, 16)
+
+/* Bits definitions for register REG_ADI_ARM_FIFO_STS */
+#define BIT_FIFO_FULL BIT(11)
+#define BIT_FIFO_EMPTY BIT(10)
+
+/*
+ * ADI slave devices include RTC, ADC, regulator, charger, thermal and so on.
+ * The slave devices address offset is always 0x8000 and size is 4K.
+ */
+#define ADI_SLAVE_ADDR_SIZE SZ_4K
+#define ADI_SLAVE_OFFSET 0x8000
+
+/* Timeout (ms) for the trylock of hardware spinlocks */
+#define ADI_HWSPINLOCK_TIMEOUT 5000
+/*
+ * ADI controller has 50 channels including 2 software channels
+ * and 48 hardware channels.
+ */
+#define ADI_HW_CHNS 50
+
+#define ADI_FIFO_DRAIN_TIMEOUT 1000
+#define ADI_READ_TIMEOUT 2000
+#define REG_ADDR_LOW_MASK GENMASK(11, 0)
+
+struct sprd_adi {
+ struct spi_controller *ctlr;
+ struct device *dev;
+ void __iomem *base;
+ struct hwspinlock *hwlock;
+ unsigned long slave_vbase;
+ unsigned long slave_pbase;
+};
+
+static int sprd_adi_check_paddr(struct sprd_adi *sadi, u32 paddr)
+{
+ if (paddr < sadi->slave_pbase || paddr >
+ (sadi->slave_pbase + ADI_SLAVE_ADDR_SIZE)) {
+ dev_err(sadi->dev,
+ "slave physical address is incorrect, addr = 0x%x\n",
+ paddr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned long sprd_adi_to_vaddr(struct sprd_adi *sadi, u32 paddr)
+{
+ return (paddr - sadi->slave_pbase + sadi->slave_vbase);
+}
+
+static int sprd_adi_drain_fifo(struct sprd_adi *sadi)
+{
+ u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
+ u32 sts;
+
+ do {
+ sts = readl_relaxed(sadi->base + REG_ADI_ARM_FIFO_STS);
+ if (sts & BIT_FIFO_EMPTY)
+ break;
+
+ cpu_relax();
+ } while (--timeout);
+
+ if (timeout == 0) {
+ dev_err(sadi->dev, "drain write fifo timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int sprd_adi_fifo_is_full(struct sprd_adi *sadi)
+{
+ return readl_relaxed(sadi->base + REG_ADI_ARM_FIFO_STS) & BIT_FIFO_FULL;
+}
+
+static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
+{
+ int read_timeout = ADI_READ_TIMEOUT;
+ u32 val, rd_addr;
+
+ /*
+ * Set the physical register address need to read into RD_CMD register,
+ * then ADI controller will start to transfer automatically.
+ */
+ writel_relaxed(reg_paddr, sadi->base + REG_ADI_RD_CMD);
+
+ /*
+ * Wait read operation complete, the BIT_RD_CMD_BUSY will be set
+ * simultaneously when writing read command to register, and the
+ * BIT_RD_CMD_BUSY will be cleared after the read operation is
+ * completed.
+ */
+ do {
+ val = readl_relaxed(sadi->base + REG_ADI_RD_DATA);
+ if (!(val & BIT_RD_CMD_BUSY))
+ break;
+
+ cpu_relax();
+ } while (--read_timeout);
+
+ if (read_timeout == 0) {
+ dev_err(sadi->dev, "ADI read timeout\n");
+ return -EBUSY;
+ }
+
+ /*
+ * The return value includes data and read register address, from bit 0
+ * to bit 15 are data, and from bit 16 to bit 30 are read register
+ * address. Then we can check the returned register address to validate
+ * data.
+ */
+ rd_addr = (val & RD_ADDR_MASK ) >> RD_ADDR_SHIFT;
+
+ if (rd_addr != (reg_paddr & REG_ADDR_LOW_MASK)) {
+ dev_err(sadi->dev, "read error, reg addr = 0x%x, val = 0x%x\n",
+ reg_paddr, val);
+ return -EIO;
+ }
+
+ *read_val = val & RD_VALUE_MASK;
+ return 0;
+}
+
+static int sprd_adi_write(struct sprd_adi *sadi, unsigned long reg, u32 val)
+{
+ u32 timeout = ADI_FIFO_DRAIN_TIMEOUT;
+ int ret;
+
+ ret = sprd_adi_drain_fifo(sadi);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * we should wait for write fifo is empty before writing data to PMIC
+ * registers.
+ */
+ do {
+ if (!sprd_adi_fifo_is_full(sadi)) {
+ writel_relaxed(val, (void __iomem *)reg);
+ break;
+ }
+
+ cpu_relax();
+ } while (--timeout);
+
+ if (timeout == 0) {
+ dev_err(sadi->dev, "write fifo is full\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int sprd_adi_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi_dev,
+ struct spi_transfer *t)
+{
+ struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
+ unsigned long flags, virt_reg;
+ u32 phy_reg, val;
+ int ret;
+
+ if (t->rx_buf) {
+ phy_reg = *(u32 *)t->rx_buf + sadi->slave_pbase;
+
+ ret = sprd_adi_check_paddr(sadi, phy_reg);
+ if (ret)
+ return ret;
+
+ ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
+ ADI_HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret) {
+ dev_err(sadi->dev, "get the hw lock failed\n");
+ return ret;
+ }
+
+ ret = sprd_adi_read(sadi, phy_reg, &val);
+ hwspin_unlock_irqrestore(sadi->hwlock, &flags);
+ if (ret)
+ return ret;
+
+ *(u32 *)t->rx_buf = val;
+ } else if (t->tx_buf) {
+ u32 *p = (u32 *)t->tx_buf;
+
+ /*
+ * Get the physical register address need to write and convert
+ * the physical address to virtual address. Since we need
+ * virtual register address to write.
+ */
+ phy_reg = *p++ + sadi->slave_pbase;
+ ret = sprd_adi_check_paddr(sadi, phy_reg);
+ if (ret)
+ return ret;
+
+ virt_reg = sprd_adi_to_vaddr(sadi, phy_reg);
+ val = *p;
+
+ ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
+ ADI_HWSPINLOCK_TIMEOUT,
+ &flags);
+ if (ret) {
+ dev_err(sadi->dev, "get the hw lock failed\n");
+ return ret;
+ }
+
+ ret = sprd_adi_write(sadi, virt_reg, val);
+ hwspin_unlock_irqrestore(sadi->hwlock, &flags);
+ if (ret)
+ return ret;
+ } else {
+ dev_err(sadi->dev, "no buffer for transfer\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void sprd_adi_hw_init(struct sprd_adi *sadi)
+{
+ struct device_node *np = sadi->dev->of_node;
+ int i, size, chn_cnt;
+ const __be32 *list;
+ u32 tmp;
+
+ /* Address bits select default 12 bits */
+ writel_relaxed(0, sadi->base + REG_ADI_CTRL0);
+
+ /* Set all channels as default priority */
+ writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIL);
+ writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIH);
+
+ /* Set clock auto gate mode */
+ tmp = readl_relaxed(sadi->base + REG_ADI_GSSI_CFG0);
+ tmp &= ~BIT_CLK_ALL_ON;
+ writel_relaxed(tmp, sadi->base + REG_ADI_GSSI_CFG0);
+
+ /* Set hardware channels setting */
+ list = of_get_property(np, "sprd,hw-channels", &size);
+ if (!list || !size) {
+ dev_info(sadi->dev, "no hw channels setting in node\n");
+ return;
+ }
+
+ chn_cnt = size / 8;
+ for (i = 0; i < chn_cnt; i++) {
+ u32 value;
+ u32 chn_id = be32_to_cpu(*list++);
+ u32 chn_config = be32_to_cpu(*list++);
+
+ /* Channel 0 and 1 are software channels */
+ if (chn_id < 2)
+ continue;
+
+ writel_relaxed(chn_config, sadi->base +
+ REG_ADI_CHN_ADDR(chn_id));
+
+ if (chn_id < 32) {
+ value = readl_relaxed(sadi->base + REG_ADI_CHN_EN);
+ value |= BIT(chn_id);
+ writel_relaxed(value, sadi->base + REG_ADI_CHN_EN);
+ } else if (chn_id < ADI_HW_CHNS) {
+ value = readl_relaxed(sadi->base + REG_ADI_CHN_EN1);
+ value |= BIT(chn_id - 32);
+ writel_relaxed(value, sadi->base + REG_ADI_CHN_EN1);
+ }
+ }
+}
+
+static int sprd_adi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spi_controller *ctlr;
+ struct sprd_adi *sadi;
+ struct resource *res;
+ u32 num_chipselect;
+ int ret;
+
+ if (!np) {
+ dev_err(&pdev->dev, "can not find the adi bus node\n");
+ return -ENODEV;
+ }
+
+ pdev->id = of_alias_get_id(np, "spi");
+ num_chipselect = of_get_child_count(np);
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(struct sprd_adi));
+ if (!ctlr)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, ctlr);
+ sadi = spi_controller_get_devdata(ctlr);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sadi->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sadi->base)) {
+ ret = PTR_ERR(sadi->base);
+ goto put_ctlr;
+ }
+
+ sadi->slave_vbase = (unsigned long)sadi->base + ADI_SLAVE_OFFSET;
+ sadi->slave_pbase = res->start + ADI_SLAVE_OFFSET;
+ sadi->ctlr = ctlr;
+ sadi->dev = &pdev->dev;
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not get the hardware spinlock\n");
+ goto put_ctlr;
+ }
+
+ sadi->hwlock = hwspin_lock_request_specific(ret);
+ if (!sadi->hwlock) {
+ ret = -ENXIO;
+ goto put_ctlr;
+ }
+
+ sprd_adi_hw_init(sadi);
+
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->bus_num = pdev->id;
+ ctlr->num_chipselect = num_chipselect;
+ ctlr->flags = SPI_MASTER_HALF_DUPLEX;
+ ctlr->bits_per_word_mask = 0;
+ ctlr->transfer_one = sprd_adi_transfer_one;
+
+ ret = devm_spi_register_controller(&pdev->dev, ctlr);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register SPI controller\n");
+ goto free_hwlock;
+ }
+
+ return 0;
+
+free_hwlock:
+ hwspin_lock_free(sadi->hwlock);
+put_ctlr:
+ spi_controller_put(ctlr);
+ return ret;
+}
+
+static int sprd_adi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
+ struct sprd_adi *sadi = spi_controller_get_devdata(ctlr);
+
+ hwspin_lock_free(sadi->hwlock);
+ return 0;
+}
+
+static const struct of_device_id sprd_adi_of_match[] = {
+ {
+ .compatible = "sprd,sc9860-adi",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_adi_of_match);
+
+static struct platform_driver sprd_adi_driver = {
+ .driver = {
+ .name = "sprd-adi",
+ .of_match_table = sprd_adi_of_match,
+ },
+ .probe = sprd_adi_probe,
+ .remove = sprd_adi_remove,
+};
+module_platform_driver(sprd_adi_driver);
+
+MODULE_DESCRIPTION("Spreadtrum ADI Controller Driver");
+MODULE_AUTHOR("Baolin Wang <Baolin.Wang@spreadtrum.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 44550182a4a3..a76acedd7e2f 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -50,7 +50,7 @@
#define SPI_IDLE_SDA_PULL_LOW (2 << 18)
#define SPI_IDLE_SDA_PULL_HIGH (3 << 18)
#define SPI_IDLE_SDA_MASK (3 << 18)
-#define SPI_CS_SS_VAL (1 << 20)
+#define SPI_CS_SW_VAL (1 << 20)
#define SPI_CS_SW_HW (1 << 21)
/* SPI_CS_POL_INACTIVE bits are default high */
/* n from 0 to 3 */
@@ -705,9 +705,9 @@ static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
command1 |= SPI_CS_SW_HW;
if (spi->mode & SPI_CS_HIGH)
- command1 |= SPI_CS_SS_VAL;
+ command1 |= SPI_CS_SW_VAL;
else
- command1 &= ~SPI_CS_SS_VAL;
+ command1 &= ~SPI_CS_SW_VAL;
tegra_spi_writel(tspi, 0, SPI_COMMAND2);
} else {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index e8b5a5e21b2e..b33a727a0158 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2200,7 +2200,7 @@ static void devm_spi_unregister(struct device *dev, void *res)
* Context: can sleep
*
* Register a SPI device as with spi_register_controller() which will
- * automatically be unregister
+ * automatically be unregistered and freed.
*
* Return: zero on success, else a negative error code.
*/
@@ -2241,15 +2241,18 @@ static int __unregister(struct device *dev, void *null)
* only ones directly touching chip registers.
*
* This must be called from context that can sleep.
+ *
+ * Note that this function also drops a reference to the controller.
*/
void spi_unregister_controller(struct spi_controller *ctlr)
{
struct spi_controller *found;
+ int id = ctlr->bus_num;
int dummy;
/* First make sure that this controller was ever added */
mutex_lock(&board_lock);
- found = idr_find(&spi_master_idr, ctlr->bus_num);
+ found = idr_find(&spi_master_idr, id);
mutex_unlock(&board_lock);
if (found != ctlr) {
dev_dbg(&ctlr->dev,
@@ -2269,7 +2272,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
device_unregister(&ctlr->dev);
/* free bus id */
mutex_lock(&board_lock);
- idr_remove(&spi_master_idr, ctlr->bus_num);
+ idr_remove(&spi_master_idr, id);
mutex_unlock(&board_lock);
}
EXPORT_SYMBOL_GPL(spi_unregister_controller);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 942d094269fb..9469695f5871 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -985,7 +985,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
mb = udev->mb_addr;
tcmu_flush_dcache_range(mb, sizeof(*mb));
- while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) {
+ while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
struct tcmu_cmd *cmd;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 3e865dbf878c..fbaa2a90d25d 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -483,7 +483,7 @@ static ssize_t wdm_read
if (rv < 0)
return -ERESTARTSYS;
- cntr = ACCESS_ONCE(desc->length);
+ cntr = READ_ONCE(desc->length);
if (cntr == 0) {
desc->read = 0;
retry:
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index e9326f31db8d..4ae667d8c238 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -150,7 +150,7 @@ static int usbfs_increase_memory_usage(u64 amount)
{
u64 lim;
- lim = ACCESS_ONCE(usbfs_memory_mb);
+ lim = READ_ONCE(usbfs_memory_mb);
lim <<= 20;
atomic64_add(amount, &usbfs_memory_usage);
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index d930bfda4010..58d59c5f8592 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -973,7 +973,7 @@ static ssize_t interface_show(struct device *dev, struct device_attribute *attr,
char *string;
intf = to_usb_interface(dev);
- string = ACCESS_ONCE(intf->cur_altsetting->string);
+ string = READ_ONCE(intf->cur_altsetting->string);
if (!string)
return 0;
return sprintf(buf, "%s\n", string);
@@ -989,7 +989,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
intf = to_usb_interface(dev);
udev = interface_to_usbdev(intf);
- alt = ACCESS_ONCE(intf->cur_altsetting);
+ alt = READ_ONCE(intf->cur_altsetting);
return sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X"
"ic%02Xisc%02Xip%02Xin%02X\n",
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 1f9941145746..0b59fa50aa30 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -1261,7 +1261,7 @@ static int gr_handle_in_ep(struct gr_ep *ep)
if (!req->last_desc)
return 0;
- if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
+ if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
return 0; /* Not put in hardware buffers yet */
if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
@@ -1290,7 +1290,7 @@ static int gr_handle_out_ep(struct gr_ep *ep)
if (!req->curr_desc)
return 0;
- ctrl = ACCESS_ONCE(req->curr_desc->ctrl);
+ ctrl = READ_ONCE(req->curr_desc->ctrl);
if (ctrl & GR_DESC_OUT_CTRL_EN)
return 0; /* Not received yet */
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 44924824fa41..c86f89babd57 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -785,7 +785,7 @@ static void io_watchdog_func(unsigned long _ohci)
}
/* find the last TD processed by the controller. */
- head = hc32_to_cpu(ohci, ACCESS_ONCE(ed->hwHeadP)) & TD_MASK;
+ head = hc32_to_cpu(ohci, READ_ONCE(ed->hwHeadP)) & TD_MASK;
td_start = td;
td_next = list_prepare_entry(td, &ed->td_list, td_list);
list_for_each_entry_continue(td_next, &ed->td_list, td_list) {
diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
index d97f0d9b3ce6..f1cc47292a59 100644
--- a/drivers/usb/host/uhci-hcd.h
+++ b/drivers/usb/host/uhci-hcd.h
@@ -187,7 +187,7 @@ struct uhci_qh {
* We need a special accessor for the element pointer because it is
* subject to asynchronous updates by the controller.
*/
-#define qh_element(qh) ACCESS_ONCE((qh)->element)
+#define qh_element(qh) READ_ONCE((qh)->element)
#define LINK_TO_QH(uhci, qh) (UHCI_PTR_QH((uhci)) | \
cpu_to_hc32((uhci), (qh)->dma_handle))
@@ -275,7 +275,7 @@ struct uhci_td {
* subject to asynchronous updates by the controller.
*/
#define td_status(uhci, td) hc32_to_cpu((uhci), \
- ACCESS_ONCE((td)->status))
+ READ_ONCE((td)->status))
#define LINK_TO_TD(uhci, td) (cpu_to_hc32((uhci), (td)->dma_handle))
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index f5a86f651f38..2bc3705a99bd 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -665,7 +665,7 @@ static int vfio_dev_viable(struct device *dev, void *data)
{
struct vfio_group *group = data;
struct vfio_device *device;
- struct device_driver *drv = ACCESS_ONCE(dev->driver);
+ struct device_driver *drv = READ_ONCE(dev->driver);
struct vfio_unbound_dev *unbound;
int ret = -EINVAL;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 046f6d280af5..35e929f132e8 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -929,7 +929,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
continue;
}
- tpg = ACCESS_ONCE(vs_tpg[*target]);
+ tpg = READ_ONCE(vs_tpg[*target]);
if (unlikely(!tpg)) {
/* Target does not exist, fail the request */
vhost_scsi_send_bad_target(vs, vq, head, out);