summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c15
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h1
-rw-r--r--drivers/gpu/drm/drm_connector.c17
-rw-r--r--drivers/gpu/drm/drm_edid.c11
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c5
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c1
-rw-r--r--drivers/gpu/drm/tegra/sor.c157
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig11
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile3
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c264
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts72
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h25
-rw-r--r--drivers/gpu/ipu-v3/ipu-dc.c3
-rw-r--r--drivers/mtd/Kconfig1
-rw-r--r--drivers/mtd/chips/map_ram.c34
-rw-r--r--drivers/mtd/chips/map_rom.c34
-rw-r--r--drivers/mtd/devices/docg3.c7
-rw-r--r--drivers/mtd/devices/lart.c2
-rw-r--r--drivers/mtd/devices/m25p80.c1
-rw-r--r--drivers/mtd/devices/mtdram.c36
-rw-r--r--drivers/mtd/devices/slram.c9
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c2
-rw-r--r--drivers/mtd/maps/impa7.c2
-rw-r--r--drivers/mtd/maps/netsc520.c2
-rw-r--r--drivers/mtd/maps/nettel.c2
-rw-r--r--drivers/mtd/maps/plat-ram.c38
-rw-r--r--drivers/mtd/maps/sbc_gxx.c2
-rw-r--r--drivers/mtd/maps/ts5500_flash.c2
-rw-r--r--drivers/mtd/maps/uclinux.c2
-rw-r--r--drivers/mtd/mtdconcat.c27
-rw-r--r--drivers/mtd/mtdcore.c61
-rw-r--r--drivers/mtd/mtdpart.c14
-rw-r--r--drivers/mtd/mtdswap.c4
-rw-r--r--drivers/mtd/nand/Kconfig5
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/ams-delta.c2
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c7
-rw-r--r--drivers/mtd/nand/atmel/pmecc.c17
-rw-r--r--drivers/mtd/nand/atmel/pmecc.h1
-rw-r--r--drivers/mtd/nand/au1550nd.c3
-rw-r--r--drivers/mtd/nand/cmx270_nand.c2
-rw-r--r--drivers/mtd/nand/denali.c291
-rw-r--r--drivers/mtd/nand/denali.h44
-rw-r--r--drivers/mtd/nand/denali_dt.c4
-rw-r--r--drivers/mtd/nand/denali_pci.c5
-rw-r--r--drivers/mtd/nand/diskonchip.c3
-rw-r--r--drivers/mtd/nand/gpio.c112
-rw-r--r--drivers/mtd/nand/hisi504_nand.c3
-rw-r--r--drivers/mtd/nand/mtk_ecc.c13
-rw-r--r--drivers/mtd/nand/mxc_nand.c19
-rw-r--r--drivers/mtd/nand/nand_base.c34
-rw-r--r--drivers/mtd/nand/nandsim.c13
-rw-r--r--drivers/mtd/nand/nuc900_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c377
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c41
-rw-r--r--drivers/mtd/nand/qcom_nandc.c127
-rw-r--r--drivers/mtd/nand/sh_flctl.c9
-rw-r--r--drivers/mtd/parsers/Kconfig8
-rw-r--r--drivers/mtd/parsers/Makefile1
-rw-r--r--drivers/mtd/parsers/sharpslpart.c398
-rw-r--r--drivers/mtd/spi-nor/Kconfig6
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c55
-rw-r--r--drivers/mtd/spi-nor/intel-spi-pci.c3
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c209
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c70
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c105
-rw-r--r--drivers/mtd/spi-nor/stm32-quadspi.c35
-rw-r--r--drivers/net/dsa/bcm_sf2.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c165
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c21
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_client.c38
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c10
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c10
-rw-r--r--drivers/net/ethernet/realtek/r8169.c38
-rw-r--r--drivers/net/geneve.c16
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c36
-rw-r--r--drivers/net/phy/cortina.c4
-rw-r--r--drivers/net/tap.c2
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c51
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/a000.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c132
-rw-r--r--drivers/platform/x86/dell-laptop.c4
-rw-r--r--drivers/platform/x86/dell-smbios-wmi.c13
-rw-r--r--drivers/platform/x86/dell-wmi-descriptor.c26
-rw-r--r--drivers/platform/x86/dell-wmi-descriptor.h1
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c63
-rw-r--r--drivers/pwm/pwm-img.c160
-rw-r--r--drivers/pwm/pwm-mediatek.c53
-rw-r--r--drivers/pwm/pwm-stm32-lp.c3
-rw-r--r--drivers/pwm/pwm-sun4i.c8
-rw-r--r--drivers/rtc/Kconfig42
-rw-r--r--drivers/rtc/Makefile3
-rw-r--r--drivers/rtc/interface.c6
-rw-r--r--drivers/rtc/rtc-abx80x.c12
-rw-r--r--drivers/rtc/rtc-armada38x.c101
-rw-r--r--drivers/rtc/rtc-at91rm9200.c19
-rw-r--r--drivers/rtc/rtc-ds1305.c70
-rw-r--r--drivers/rtc/rtc-ds1307.c57
-rw-r--r--drivers/rtc/rtc-ds1390.c7
-rw-r--r--drivers/rtc/rtc-ds1511.c75
-rw-r--r--drivers/rtc/rtc-jz4740.c6
-rw-r--r--drivers/rtc/rtc-m41t80.c84
-rw-r--r--drivers/rtc/rtc-m48t86.c58
-rw-r--r--drivers/rtc/rtc-mt7622.c422
-rw-r--r--drivers/rtc/rtc-omap.c57
-rw-r--r--drivers/rtc/rtc-pcf8523.c40
-rw-r--r--drivers/rtc/rtc-pcf85363.c220
-rw-r--r--drivers/rtc/rtc-pcf8563.c4
-rw-r--r--drivers/rtc/rtc-pl031.c48
-rw-r--r--drivers/rtc/rtc-rv3029c2.c18
-rw-r--r--drivers/rtc/rtc-rx8010.c7
-rw-r--r--drivers/rtc/rtc-sc27xx.c662
-rw-r--r--drivers/rtc/rtc-sysfs.c25
-rw-r--r--drivers/rtc/rtc-xgene.c47
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c40
-rw-r--r--drivers/scsi/scsi_devinfo.c18
-rw-r--r--drivers/scsi/scsi_priv.h15
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit.h2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c45
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c8
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c80
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c39
-rw-r--r--drivers/target/iscsi/iscsi_target_seq_pdu_list.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c4
-rw-r--r--drivers/target/target_core_alua.c51
-rw-r--r--drivers/target/target_core_alua.h9
-rw-r--r--drivers/target/target_core_configfs.c14
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_file.c4
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_pr.c41
-rw-r--r--drivers/target/target_core_tmr.c12
-rw-r--r--drivers/target/target_core_transport.c84
-rw-r--r--drivers/target/target_core_user.c208
181 files changed, 4979 insertions, 2037 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index c21adf60a7f2..057e1ecd83ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -59,12 +59,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size)
return false;
}
- tmp = bios[0x18] | (bios[0x19] << 8);
- if (bios[tmp + 0x14] != 0x0) {
- DRM_INFO("Not an x86 BIOS ROM\n");
- return false;
- }
-
bios_header_start = bios[0x48] | (bios[0x49] << 8);
if (!bios_header_start) {
DRM_INFO("Can't locate bios header\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 6c78623e1386..a57cec737c18 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1495,8 +1495,11 @@ out:
memset(wait, 0, sizeof(*wait));
wait->out.status = (r > 0);
wait->out.first_signaled = first;
- /* set return value 0 to indicate success */
- r = array[first]->error;
+
+ if (first < fence_count && array[first])
+ r = array[first]->error;
+ else
+ r = 0;
err_free_fence_array:
for (i = 0; i < fence_count; i++)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 2d792cdc094c..2c574374d9b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1837,6 +1837,9 @@ static int amdgpu_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false;
}
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
+ amdgpu_ucode_fini_bo(adev);
+
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.sw)
continue;
@@ -3261,9 +3264,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
pm_pg_lock = (*pos >> 23) & 1;
if (*pos & (1ULL << 62)) {
- se_bank = (*pos >> 24) & 0x3FF;
- sh_bank = (*pos >> 34) & 0x3FF;
- instance_bank = (*pos >> 44) & 0x3FF;
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
if (se_bank == 0x3FF)
se_bank = 0xFFFFFFFF;
@@ -3337,9 +3340,9 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
pm_pg_lock = (*pos >> 23) & 1;
if (*pos & (1ULL << 62)) {
- se_bank = (*pos >> 24) & 0x3FF;
- sh_bank = (*pos >> 34) & 0x3FF;
- instance_bank = (*pos >> 44) & 0x3FF;
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
if (se_bank == 0x3FF)
se_bank = 0xFFFFFFFF;
@@ -3687,12 +3690,12 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
return -EINVAL;
/* decode offset */
- offset = (*pos & 0x7F);
- se = ((*pos >> 7) & 0xFF);
- sh = ((*pos >> 15) & 0xFF);
- cu = ((*pos >> 23) & 0xFF);
- wave = ((*pos >> 31) & 0xFF);
- simd = ((*pos >> 37) & 0xFF);
+ offset = (*pos & GENMASK_ULL(6, 0));
+ se = (*pos & GENMASK_ULL(14, 7)) >> 7;
+ sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
+ cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
+ wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
+ simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
@@ -3737,14 +3740,14 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
return -EINVAL;
/* decode offset */
- offset = (*pos & 0xFFF); /* in dwords */
- se = ((*pos >> 12) & 0xFF);
- sh = ((*pos >> 20) & 0xFF);
- cu = ((*pos >> 28) & 0xFF);
- wave = ((*pos >> 36) & 0xFF);
- simd = ((*pos >> 44) & 0xFF);
- thread = ((*pos >> 52) & 0xFF);
- bank = ((*pos >> 60) & 1);
+ offset = *pos & GENMASK_ULL(11, 0);
+ se = (*pos & GENMASK_ULL(19, 12)) >> 12;
+ sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
+ cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
+ wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
+ simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
+ thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
+ bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
if (!data)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index a418df1b9422..e87eedcc0da9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -63,6 +63,11 @@ retry:
flags, NULL, resv, 0, &bo);
if (r) {
if (r != -ERESTARTSYS) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ }
+
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
goto retry;
@@ -323,7 +328,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
bo->tbo.ttm->pages);
if (r)
- goto unlock_mmap_sem;
+ goto release_object;
r = amdgpu_bo_reserve(bo, true);
if (r)
@@ -348,9 +353,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
free_pages:
release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
-unlock_mmap_sem:
- up_read(&current->mm->mmap_sem);
-
release_object:
drm_gem_object_put_unlocked(gobj);
@@ -556,9 +558,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
dev_err(&dev->pdev->dev,
- "va_address 0x%lX is in reserved area 0x%X\n",
- (unsigned long)args->va_address,
- AMDGPU_VA_RESERVED_SIZE);
+ "va_address 0x%LX is in reserved area 0x%LX\n",
+ args->va_address, AMDGPU_VA_RESERVED_SIZE);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 33535d347734..00e0ce10862f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -71,12 +71,6 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
- spin_lock(&mgr->lock);
- if (!drm_mm_clean(&mgr->mm)) {
- spin_unlock(&mgr->lock);
- return -EBUSY;
- }
-
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index d6df5728df7f..6c570d4e4516 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -946,6 +946,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode;
+ /* no skipping for powerplay */
+ if (adev->powerplay.cgs_device)
+ return effective_mode;
+
/* Skip limit attributes if DPM is not enabled */
if (!adev->pm.dpm_enabled &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 5f5aa5fddc16..033fba2def6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -164,9 +164,6 @@ static int amdgpu_pp_hw_fini(void *handle)
ret = adev->powerplay.ip_funcs->hw_fini(
adev->powerplay.pp_handle);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
- amdgpu_ucode_fini_bo(adev);
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 90af8e82b16a..ae9c106979d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -169,10 +169,14 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
int flags)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+ struct dma_buf *buf;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM);
- return drm_gem_prime_export(dev, gobj, flags);
+ buf = drm_gem_prime_export(dev, gobj, flags);
+ if (!IS_ERR(buf))
+ buf->file->f_mapping = dev->anon_inode->i_mapping;
+ return buf;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 447d446b5015..7714f4a6c8b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -442,8 +442,6 @@ static int psp_hw_fini(void *handle)
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
return 0;
- amdgpu_ucode_fini_bo(adev);
-
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index aa914256b4bc..bae77353447b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -94,7 +94,8 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_MMHUB 1
/* hardcode that limit for now */
-#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
+#define AMDGPU_VA_RESERVED_SIZE (8ULL << 20)
+
/* max vmids dedicated for process */
#define AMDGPU_VM_MAX_RESERVED_VMID 1
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 26e900627971..4acca92f6a52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -68,11 +68,6 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
struct amdgpu_vram_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
- if (!drm_mm_clean(&mgr->mm)) {
- spin_unlock(&mgr->lock);
- return -EBUSY;
- }
-
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 00868764a0dd..5c8a7a48a4ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4670,6 +4670,14 @@ static int gfx_v7_0_sw_fini(void *handle)
gfx_v7_0_cp_compute_fini(adev);
gfx_v7_0_rlc_fini(adev);
gfx_v7_0_mec_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (adev->gfx.rlc.cp_table_size) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ }
gfx_v7_0_free_microcode(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b8002ac3e536..9ecdf621a74a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -2118,6 +2118,15 @@ static int gfx_v8_0_sw_fini(void *handle)
gfx_v8_0_mec_fini(adev);
gfx_v8_0_rlc_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ }
gfx_v8_0_free_microcode(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 7f15bb2c5233..da43813d67a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -207,6 +207,12 @@ static const u32 golden_settings_gc_9_1_rv1[] =
SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x01bd9f33, 0x00000800
};
+static const u32 golden_settings_gc_9_x_common[] =
+{
+ SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_INDEX), 0xffffffff, 0x00000000,
+ SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_DATA), 0xffffffff, 0x2544c382
+};
+
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -242,6 +248,9 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
default:
break;
}
+
+ amdgpu_program_register_sequence(adev, golden_settings_gc_9_x_common,
+ (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
}
static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
@@ -988,12 +997,22 @@ static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
+static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t thread,
+ uint32_t start, uint32_t size,
+ uint32_t *dst)
+{
+ wave_read_regs(
+ adev, simd, wave, thread,
+ start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
+}
static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v9_0_select_se_sh,
.read_wave_data = &gfx_v9_0_read_wave_data,
.read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
+ .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
};
static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -1449,6 +1468,14 @@ static int gfx_v9_0_sw_fini(void *handle)
gfx_v9_0_mec_fini(adev);
gfx_v9_0_ngg_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (adev->asic_type == CHIP_RAVEN) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ }
gfx_v9_0_free_microcode(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 621699331e09..c8f1aebeac7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -392,7 +392,16 @@ static int gmc_v9_0_early_init(void *handle)
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 };
+ /*
+ * The latest engine allocation on gfx9 is:
+ * Engine 0, 1: idle
+ * Engine 2, 3: firmware
+ * Engine 4~13: amdgpu ring, subject to change when ring number changes
+ * Engine 14~15: idle
+ * Engine 16: kfd tlb invalidation
+ * Engine 17: Gart flushes
+ */
+ unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
unsigned i;
for(i = 0; i < adev->num_rings; ++i) {
@@ -405,9 +414,9 @@ static int gmc_v9_0_late_init(void *handle)
ring->funcs->vmhub);
}
- /* Engine 17 is used for GART flushes */
+ /* Engine 16 is used for KFD and 17 for GART flushes */
for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
- BUG_ON(vm_inv_eng[i] > 17);
+ BUG_ON(vm_inv_eng[i] > 16);
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index a129bc5b1844..c6febbf0bf69 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1486,7 +1486,7 @@ int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
if (vddci_id_buf[i] == virtual_voltage_id) {
for (j = 0; j < profile->ucLeakageBinNum; j++) {
if (efuse_voltage_id <= leakage_bin[j]) {
- *vddci = vddci_buf[j * profile->ucElbVDDC_Num + i];
+ *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
break;
}
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index d1af1483c69b..a651ebcf44fd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -830,9 +830,9 @@ static int init_over_drive_limits(
const ATOM_Tonga_POWERPLAYTABLE *powerplay_table)
{
hwmgr->platform_descriptor.overdriveLimit.engineClock =
- le16_to_cpu(powerplay_table->ulMaxODEngineClock);
+ le32_to_cpu(powerplay_table->ulMaxODEngineClock);
hwmgr->platform_descriptor.overdriveLimit.memoryClock =
- le16_to_cpu(powerplay_table->ulMaxODMemoryClock);
+ le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
hwmgr->platform_descriptor.minOverdriveVDDC = 0;
hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 4466469cf8ab..e33ec7fc5d09 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3778,7 +3778,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
"Trying to Unfreeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+ PPSMC_MSG_MCLKDPM_UnfreezeLevel),
"Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
return -EINVAL);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 4f79c21f27ed..f8d838c2c8ee 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -753,6 +753,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
uint32_t config_telemetry = 0;
struct pp_atomfwctrl_voltage_table vol_table;
struct cgs_system_info sys_info = {0};
+ uint32_t reg;
data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
if (data == NULL)
@@ -859,6 +860,16 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
advanceFanControlParameters.usFanPWMMinLimit *
hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
+ reg = soc15_get_register_offset(DF_HWID, 0,
+ mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
+ mmDF_CS_AON0_DramBaseAddress0);
+ data->mem_channels = (cgs_read_register(hwmgr->device, reg) &
+ DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
+ DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
+ PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
+ "Mem Channel Index Exceeded maximum!",
+ return -EINVAL);
+
return result;
}
@@ -1777,7 +1788,7 @@ static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
struct vega10_single_dpm_table *dpm_table =
&(data->dpm_table.mem_table);
int result = 0;
- uint32_t i, j, reg, mem_channels;
+ uint32_t i, j;
for (i = 0; i < dpm_table->count; i++) {
result = vega10_populate_single_memory_level(hwmgr,
@@ -1801,20 +1812,10 @@ static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
i++;
}
- reg = soc15_get_register_offset(DF_HWID, 0,
- mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
- mmDF_CS_AON0_DramBaseAddress0);
- mem_channels = (cgs_read_register(hwmgr->device, reg) &
- DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
- DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
- PP_ASSERT_WITH_CODE(mem_channels < ARRAY_SIZE(channel_number),
- "Mem Channel Index Exceeded maximum!",
- return -1);
-
- pp_table->NumMemoryChannels = cpu_to_le16(mem_channels);
+ pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels);
pp_table->MemoryChannelWidth =
- cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH *
- channel_number[mem_channels]);
+ (uint16_t)(HBM_MEMORY_CHANNEL_WIDTH *
+ channel_number[data->mem_channels]);
pp_table->LowestUclkReservedForUlv =
(uint8_t)(data->lowest_uclk_reserved_for_ulv);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index b4b461c3b8ee..8f7358cc3327 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -389,6 +389,7 @@ struct vega10_hwmgr {
uint32_t config_telemetry;
uint32_t smu_version;
uint32_t acg_loop_state;
+ uint32_t mem_channels;
};
#define VEGA10_DPM2_NEAR_TDP_DEC 10
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 704fc8934616..25f4b2e9a44f 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -234,6 +234,10 @@ int drm_connector_init(struct drm_device *dev,
config->link_status_property,
0);
+ drm_object_attach_property(&connector->base,
+ config->non_desktop_property,
+ 0);
+
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
}
@@ -763,6 +767,10 @@ DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
* value of link-status is "GOOD". If something fails during or after modeset,
* the kernel driver may set this to "BAD" and issue a hotplug uevent. Drivers
* should update this value using drm_mode_connector_set_link_status_property().
+ * non_desktop:
+ * Indicates the output should be ignored for purposes of displaying a
+ * standard desktop environment or console. This is most likely because
+ * the output device is not rectilinear.
*
* Connectors also have one standardized atomic property:
*
@@ -811,6 +819,11 @@ int drm_connector_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.link_status_property = prop;
+ prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE, "non-desktop");
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.non_desktop_property = prop;
+
return 0;
}
@@ -1194,6 +1207,10 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
if (edid)
size = EDID_LENGTH * (1 + edid->extensions);
+ drm_object_property_set_value(&connector->base,
+ dev->mode_config.non_desktop_property,
+ connector->display_info.non_desktop);
+
ret = drm_property_replace_global_blob(dev,
&connector->edid_blob_ptr,
size,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 00ddabfbf980..2e8fb51282ef 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -82,6 +82,8 @@
#define EDID_QUIRK_FORCE_6BPC (1 << 10)
/* Force 10bpc */
#define EDID_QUIRK_FORCE_10BPC (1 << 11)
+/* Non desktop display (i.e. HMD) */
+#define EDID_QUIRK_NON_DESKTOP (1 << 12)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -157,6 +159,9 @@ static const struct edid_quirk {
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
+
+ /* HTC Vive VR Headset */
+ { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
};
/*
@@ -4393,7 +4398,7 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
}
static void drm_add_display_info(struct drm_connector *connector,
- struct edid *edid)
+ struct edid *edid, u32 quirks)
{
struct drm_display_info *info = &connector->display_info;
@@ -4407,6 +4412,8 @@ static void drm_add_display_info(struct drm_connector *connector,
info->max_tmds_clock = 0;
info->dvi_dual = false;
+ info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
+
if (edid->revision < 3)
return;
@@ -4627,7 +4634,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
* To avoid multiple parsing of same block, lets parse that map
* from sink info, before parsing CEA modes.
*/
- drm_add_display_info(connector, edid);
+ drm_add_display_info(connector, edid, quirks);
/*
* EDID spec says modes should be preferred in this order:
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 116d1f1337c7..07374008f146 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -2033,6 +2033,9 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
+ if (connector->display_info.non_desktop)
+ return false;
+
if (strict)
enable = connector->status == connector_status_connected;
else
@@ -2052,7 +2055,8 @@ static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
connector = fb_helper->connector_info[i]->connector;
enabled[i] = drm_connector_enabled(connector, true);
DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
- enabled[i] ? "yes" : "no");
+ connector->display_info.non_desktop ? "non desktop" : enabled[i] ? "yes" : "no");
+
any_enabled |= enabled[i];
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 58e9e0601a61..faf17b83b910 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -210,7 +210,6 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev)
return PTR_ERR(fsl_dev->state);
}
- clk_disable_unprepare(fsl_dev->pix_clk);
clk_disable_unprepare(fsl_dev->clk);
return 0;
@@ -233,6 +232,7 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
if (fsl_dev->tcon)
fsl_tcon_bypass_enable(fsl_dev->tcon);
fsl_dcu_drm_init_planes(fsl_dev->drm);
+ enable_irq(fsl_dev->irq);
drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
console_lock();
@@ -240,7 +240,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
console_unlock();
drm_kms_helper_poll_enable(fsl_dev->drm);
- enable_irq(fsl_dev->irq);
return 0;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index edd7d8127d19..c54806d08dd7 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -102,7 +102,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
{
struct drm_encoder *encoder = &fsl_dev->encoder;
struct drm_connector *connector = &fsl_dev->connector.base;
- struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config;
int ret;
fsl_dev->connector.encoder = encoder;
@@ -122,10 +121,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
if (ret < 0)
goto err_sysfs;
- drm_object_property_set_value(&connector->base,
- mode_config->dpms_property,
- DRM_MODE_DPMS_OFF);
-
ret = drm_panel_attach(panel, connector);
if (ret) {
dev_err(fsl_dev->dev, "failed to attach panel\n");
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 53e0b24beda6..9a9961802f5c 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -115,7 +115,7 @@ static void imx_drm_crtc_reset(struct drm_crtc *crtc)
if (crtc->state) {
if (crtc->state->mode_blob)
- drm_property_unreference_blob(crtc->state->mode_blob);
+ drm_property_blob_put(crtc->state->mode_blob);
state = to_imx_crtc_state(crtc->state);
memset(state, 0, sizeof(*state));
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 8def97d75030..aedecda9728a 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -183,7 +183,7 @@ static int imx_pd_register(struct drm_device *drm,
&imx_pd_connector_helper_funcs);
drm_connector_init(drm, &imxpd->connector,
&imx_pd_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ DRM_MODE_CONNECTOR_DPI);
}
if (imxpd->panel)
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 2fcf805d3a16..33b821d6d018 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -245,7 +245,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
}
info->par = rfbdev;
- info->skip_vt_switch = true;
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 4bcacd3f4861..b0a1dedac802 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -174,9 +174,9 @@ struct tegra_sor {
struct reset_control *rst;
struct clk *clk_parent;
- struct clk *clk_brick;
struct clk *clk_safe;
- struct clk *clk_src;
+ struct clk *clk_out;
+ struct clk *clk_pad;
struct clk *clk_dp;
struct clk *clk;
@@ -255,7 +255,7 @@ static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent)
clk_disable_unprepare(sor->clk);
- err = clk_set_parent(sor->clk, parent);
+ err = clk_set_parent(sor->clk_out, parent);
if (err < 0)
return err;
@@ -266,24 +266,24 @@ static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent)
return 0;
}
-struct tegra_clk_sor_brick {
+struct tegra_clk_sor_pad {
struct clk_hw hw;
struct tegra_sor *sor;
};
-static inline struct tegra_clk_sor_brick *to_brick(struct clk_hw *hw)
+static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw)
{
- return container_of(hw, struct tegra_clk_sor_brick, hw);
+ return container_of(hw, struct tegra_clk_sor_pad, hw);
}
-static const char * const tegra_clk_sor_brick_parents[] = {
+static const char * const tegra_clk_sor_pad_parents[] = {
"pll_d2_out0", "pll_dp"
};
-static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index)
+static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index)
{
- struct tegra_clk_sor_brick *brick = to_brick(hw);
- struct tegra_sor *sor = brick->sor;
+ struct tegra_clk_sor_pad *pad = to_pad(hw);
+ struct tegra_sor *sor = pad->sor;
u32 value;
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
@@ -304,10 +304,10 @@ static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index)
return 0;
}
-static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw)
+static u8 tegra_clk_sor_pad_get_parent(struct clk_hw *hw)
{
- struct tegra_clk_sor_brick *brick = to_brick(hw);
- struct tegra_sor *sor = brick->sor;
+ struct tegra_clk_sor_pad *pad = to_pad(hw);
+ struct tegra_sor *sor = pad->sor;
u8 parent = U8_MAX;
u32 value;
@@ -328,33 +328,33 @@ static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw)
return parent;
}
-static const struct clk_ops tegra_clk_sor_brick_ops = {
- .set_parent = tegra_clk_sor_brick_set_parent,
- .get_parent = tegra_clk_sor_brick_get_parent,
+static const struct clk_ops tegra_clk_sor_pad_ops = {
+ .set_parent = tegra_clk_sor_pad_set_parent,
+ .get_parent = tegra_clk_sor_pad_get_parent,
};
-static struct clk *tegra_clk_sor_brick_register(struct tegra_sor *sor,
- const char *name)
+static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
+ const char *name)
{
- struct tegra_clk_sor_brick *brick;
+ struct tegra_clk_sor_pad *pad;
struct clk_init_data init;
struct clk *clk;
- brick = devm_kzalloc(sor->dev, sizeof(*brick), GFP_KERNEL);
- if (!brick)
+ pad = devm_kzalloc(sor->dev, sizeof(*pad), GFP_KERNEL);
+ if (!pad)
return ERR_PTR(-ENOMEM);
- brick->sor = sor;
+ pad->sor = sor;
init.name = name;
init.flags = 0;
- init.parent_names = tegra_clk_sor_brick_parents;
- init.num_parents = ARRAY_SIZE(tegra_clk_sor_brick_parents);
- init.ops = &tegra_clk_sor_brick_ops;
+ init.parent_names = tegra_clk_sor_pad_parents;
+ init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents);
+ init.ops = &tegra_clk_sor_pad_ops;
- brick->hw.init = &init;
+ pad->hw.init = &init;
- clk = devm_clk_register(sor->dev, &brick->hw);
+ clk = devm_clk_register(sor->dev, &pad->hw);
return clk;
}
@@ -998,8 +998,10 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
- if (err < 0)
+ if (err < 0) {
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+ return err;
+ }
value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
@@ -2007,8 +2009,10 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
- if (err < 0)
+ if (err < 0) {
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+ return;
+ }
div = clk_get_rate(sor->clk) / 1000000 * 4;
@@ -2111,13 +2115,17 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
/* switch to parent clock */
- err = clk_set_parent(sor->clk_src, sor->clk_parent);
- if (err < 0)
- dev_err(sor->dev, "failed to set source clock: %d\n", err);
-
- err = tegra_sor_set_parent_clock(sor, sor->clk_src);
- if (err < 0)
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0) {
dev_err(sor->dev, "failed to set parent clock: %d\n", err);
+ return;
+ }
+
+ err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to set pad clock: %d\n", err);
+ return;
+ }
value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe);
@@ -2628,11 +2636,24 @@ static int tegra_sor_probe(struct platform_device *pdev)
}
if (sor->soc->supports_hdmi || sor->soc->supports_dp) {
- sor->clk_src = devm_clk_get(&pdev->dev, "source");
- if (IS_ERR(sor->clk_src)) {
- err = PTR_ERR(sor->clk_src);
- dev_err(sor->dev, "failed to get source clock: %d\n",
- err);
+ struct device_node *np = pdev->dev.of_node;
+ const char *name;
+
+ /*
+ * For backwards compatibility with Tegra210 device trees,
+ * fall back to the old clock name "source" if the new "out"
+ * clock is not available.
+ */
+ if (of_property_match_string(np, "clock-names", "out") < 0)
+ name = "source";
+ else
+ name = "out";
+
+ sor->clk_out = devm_clk_get(&pdev->dev, name);
+ if (IS_ERR(sor->clk_out)) {
+ err = PTR_ERR(sor->clk_out);
+ dev_err(sor->dev, "failed to get %s clock: %d\n",
+ name, err);
goto remove;
}
}
@@ -2658,16 +2679,60 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
+ /*
+ * Starting with Tegra186, the BPMP provides an implementation for
+ * the pad output clock, so we have to look it up from device tree.
+ */
+ sor->clk_pad = devm_clk_get(&pdev->dev, "pad");
+ if (IS_ERR(sor->clk_pad)) {
+ if (sor->clk_pad != ERR_PTR(-ENOENT)) {
+ err = PTR_ERR(sor->clk_pad);
+ goto remove;
+ }
+
+ /*
+ * If the pad output clock is not available, then we assume
+ * we're on Tegra210 or earlier and have to provide our own
+ * implementation.
+ */
+ sor->clk_pad = NULL;
+ }
+
+ /*
+ * The bootloader may have set up the SOR such that it's module clock
+ * is sourced by one of the display PLLs. However, that doesn't work
+ * without properly having set up other bits of the SOR.
+ */
+ err = clk_set_parent(sor->clk_out, sor->clk_safe);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to use safe clock: %d\n", err);
+ goto remove;
+ }
+
platform_set_drvdata(pdev, sor);
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
- sor->clk_brick = tegra_clk_sor_brick_register(sor, "sor1_brick");
- pm_runtime_put(&pdev->dev);
+ /*
+ * On Tegra210 and earlier, provide our own implementation for the
+ * pad output clock.
+ */
+ if (!sor->clk_pad) {
+ err = pm_runtime_get_sync(&pdev->dev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get runtime PM: %d\n",
+ err);
+ goto remove;
+ }
+
+ sor->clk_pad = tegra_clk_sor_pad_register(sor,
+ "sor1_pad_clkout");
+ pm_runtime_put(&pdev->dev);
+ }
- if (IS_ERR(sor->clk_brick)) {
- err = PTR_ERR(sor->clk_brick);
- dev_err(&pdev->dev, "failed to register SOR clock: %d\n", err);
+ if (IS_ERR(sor->clk_pad)) {
+ err = PTR_ERR(sor->clk_pad);
+ dev_err(&pdev->dev, "failed to register SOR pad clock: %d\n",
+ err);
goto remove;
}
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 28fed7e206d0..81ac82455ce4 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -12,14 +12,3 @@ config DRM_TILCDC
controller, for example AM33xx in beagle-bone, DA8xx, or
OMAP-L1xx. This driver replaces the FB_DA8XX fbdev driver.
-config DRM_TILCDC_SLAVE_COMPAT
- bool "Support device tree blobs using TI LCDC Slave binding"
- depends on DRM_TILCDC
- default y
- select OF_RESOLVE
- select OF_OVERLAY
- help
- Choose this option if you need a kernel that is compatible
- with device tree blobs using the obsolete "ti,tilcdc,slave"
- binding. If you find "ti,tilcdc,slave"-string from your DTB,
- you probably need this. Otherwise you do not.
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
index b9e1108e5b4e..87f9480e43b0 100644
--- a/drivers/gpu/drm/tilcdc/Makefile
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -3,9 +3,6 @@ ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
ccflags-y += -Werror
endif
-obj-$(CONFIG_DRM_TILCDC_SLAVE_COMPAT) += tilcdc_slave_compat.o \
- tilcdc_slave_compat.dtb.o
-
tilcdc-y := \
tilcdc_plane.o \
tilcdc_crtc.o \
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
deleted file mode 100644
index d2b9e5f04724..000000000000
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright (C) 2015 Texas Instruments
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- */
-
-/*
- * To support the old "ti,tilcdc,slave" binding the binding has to be
- * transformed to the new external encoder binding.
- */
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/of_fdt.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-
-#include "tilcdc_slave_compat.h"
-
-struct kfree_table {
- int total;
- int num;
- void **table;
-};
-
-static int __init kfree_table_init(struct kfree_table *kft)
-{
- kft->total = 32;
- kft->num = 0;
- kft->table = kmalloc(kft->total * sizeof(*kft->table),
- GFP_KERNEL);
- if (!kft->table)
- return -ENOMEM;
-
- return 0;
-}
-
-static int __init kfree_table_add(struct kfree_table *kft, void *p)
-{
- if (kft->num == kft->total) {
- void **old = kft->table;
-
- kft->total *= 2;
- kft->table = krealloc(old, kft->total * sizeof(*kft->table),
- GFP_KERNEL);
- if (!kft->table) {
- kft->table = old;
- kfree(p);
- return -ENOMEM;
- }
- }
- kft->table[kft->num++] = p;
- return 0;
-}
-
-static void __init kfree_table_free(struct kfree_table *kft)
-{
- int i;
-
- for (i = 0; i < kft->num; i++)
- kfree(kft->table[i]);
-
- kfree(kft->table);
-}
-
-static
-struct property * __init tilcdc_prop_dup(const struct property *prop,
- struct kfree_table *kft)
-{
- struct property *nprop;
-
- nprop = kzalloc(sizeof(*nprop), GFP_KERNEL);
- if (!nprop || kfree_table_add(kft, nprop))
- return NULL;
-
- nprop->name = kstrdup(prop->name, GFP_KERNEL);
- if (!nprop->name || kfree_table_add(kft, nprop->name))
- return NULL;
-
- nprop->value = kmemdup(prop->value, prop->length, GFP_KERNEL);
- if (!nprop->value || kfree_table_add(kft, nprop->value))
- return NULL;
-
- nprop->length = prop->length;
-
- return nprop;
-}
-
-static void __init tilcdc_copy_props(struct device_node *from,
- struct device_node *to,
- const char * const props[],
- struct kfree_table *kft)
-{
- struct property *prop;
- int i;
-
- for (i = 0; props[i]; i++) {
- prop = of_find_property(from, props[i], NULL);
- if (!prop)
- continue;
-
- prop = tilcdc_prop_dup(prop, kft);
- if (!prop)
- continue;
-
- prop->next = to->properties;
- to->properties = prop;
- }
-}
-
-static int __init tilcdc_prop_str_update(struct property *prop,
- const char *str,
- struct kfree_table *kft)
-{
- prop->value = kstrdup(str, GFP_KERNEL);
- if (kfree_table_add(kft, prop->value) || !prop->value)
- return -ENOMEM;
- prop->length = strlen(str)+1;
- return 0;
-}
-
-static void __init tilcdc_node_disable(struct device_node *node)
-{
- struct property *prop;
-
- prop = kzalloc(sizeof(*prop), GFP_KERNEL);
- if (!prop)
- return;
-
- prop->name = "status";
- prop->value = "disabled";
- prop->length = strlen((char *)prop->value)+1;
-
- of_update_property(node, prop);
-}
-
-static struct device_node * __init tilcdc_get_overlay(struct kfree_table *kft)
-{
- const int size = __dtb_tilcdc_slave_compat_end -
- __dtb_tilcdc_slave_compat_begin;
- static void *overlay_data;
- struct device_node *overlay;
-
- if (!size) {
- pr_warn("%s: No overlay data\n", __func__);
- return NULL;
- }
-
- overlay_data = kmemdup(__dtb_tilcdc_slave_compat_begin,
- size, GFP_KERNEL);
- if (!overlay_data || kfree_table_add(kft, overlay_data))
- return NULL;
-
- of_fdt_unflatten_tree(overlay_data, NULL, &overlay);
- if (!overlay) {
- pr_warn("%s: Unfattening overlay tree failed\n", __func__);
- return NULL;
- }
-
- return overlay;
-}
-
-static const struct of_device_id tilcdc_slave_of_match[] __initconst = {
- { .compatible = "ti,tilcdc,slave", },
- {},
-};
-
-static const struct of_device_id tilcdc_of_match[] __initconst = {
- { .compatible = "ti,am33xx-tilcdc", },
- {},
-};
-
-static const struct of_device_id tilcdc_tda998x_of_match[] __initconst = {
- { .compatible = "nxp,tda998x", },
- {},
-};
-
-static const char * const tilcdc_slave_props[] __initconst = {
- "pinctrl-names",
- "pinctrl-0",
- "pinctrl-1",
- NULL
-};
-
-static void __init tilcdc_convert_slave_node(void)
-{
- struct device_node *slave = NULL, *lcdc = NULL;
- struct device_node *i2c = NULL, *fragment = NULL;
- struct device_node *overlay, *encoder;
- struct property *prop;
- /* For all memory needed for the overlay tree. This memory can
- be freed after the overlay has been applied. */
- struct kfree_table kft;
- int ovcs_id, ret;
-
- if (kfree_table_init(&kft))
- return;
-
- lcdc = of_find_matching_node(NULL, tilcdc_of_match);
- slave = of_find_matching_node(NULL, tilcdc_slave_of_match);
-
- if (!slave || !of_device_is_available(lcdc))
- goto out;
-
- i2c = of_parse_phandle(slave, "i2c", 0);
- if (!i2c) {
- pr_err("%s: Can't find i2c node trough phandle\n", __func__);
- goto out;
- }
-
- overlay = tilcdc_get_overlay(&kft);
- if (!overlay)
- goto out;
-
- encoder = of_find_matching_node(overlay, tilcdc_tda998x_of_match);
- if (!encoder) {
- pr_err("%s: Failed to find tda998x node\n", __func__);
- goto out;
- }
-
- tilcdc_copy_props(slave, encoder, tilcdc_slave_props, &kft);
-
- for_each_child_of_node(overlay, fragment) {
- prop = of_find_property(fragment, "target-path", NULL);
- if (!prop)
- continue;
- if (!strncmp("i2c", (char *)prop->value, prop->length))
- if (tilcdc_prop_str_update(prop, i2c->full_name, &kft))
- goto out;
- if (!strncmp("lcdc", (char *)prop->value, prop->length))
- if (tilcdc_prop_str_update(prop, lcdc->full_name, &kft))
- goto out;
- }
-
- tilcdc_node_disable(slave);
-
- ovcs_id = 0;
- ret = of_overlay_apply(overlay, &ovcs_id);
- if (ret)
- pr_err("%s: Applying overlay changeset failed: %d\n",
- __func__, ret);
- else
- pr_info("%s: ti,tilcdc,slave node successfully converted\n",
- __func__);
-out:
- kfree_table_free(&kft);
- of_node_put(i2c);
- of_node_put(slave);
- of_node_put(lcdc);
- of_node_put(fragment);
-}
-
-static int __init tilcdc_slave_compat_init(void)
-{
- tilcdc_convert_slave_node();
- return 0;
-}
-
-subsys_initcall(tilcdc_slave_compat_init);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts
deleted file mode 100644
index 693f8b0aea2d..000000000000
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * DTS overlay for converting ti,tilcdc,slave binding to new binding.
- *
- * Copyright (C) 2015 Texas Instruments Inc.
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- */
-
-/*
- * target-path property values are simple tags that are replaced with
- * correct values in tildcdc_slave_compat.c. Some properties are also
- * copied over from the ti,tilcdc,slave node.
- */
-
-/dts-v1/;
-/ {
- fragment@0 {
- target-path = "i2c";
- __overlay__ {
- #address-cells = <1>;
- #size-cells = <0>;
- tda19988 {
- compatible = "nxp,tda998x";
- reg = <0x70>;
- status = "okay";
-
- port {
- hdmi_0: endpoint@0 {
- remote-endpoint = <&lcd_0>;
- };
- };
- };
- };
- };
-
- fragment@1 {
- target-path = "lcdc";
- __overlay__ {
- port {
- lcd_0: endpoint@0 {
- remote-endpoint = <&hdmi_0>;
- };
- };
- };
- };
-
- __local_fixups__ {
- fragment@0 {
- __overlay__ {
- tda19988 {
- port {
- endpoint@0 {
- remote-endpoint = <0>;
- };
- };
- };
- };
- };
- fragment@1 {
- __overlay__ {
- port {
- endpoint@0 {
- remote-endpoint = <0>;
- };
- };
- };
- };
- };
-};
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h
deleted file mode 100644
index 403d35d87d0b..000000000000
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2015 Texas Instruments
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-/* This header declares the symbols defined in tilcdc_slave_compat.dts */
-
-#ifndef __TILCDC_SLAVE_COMPAT_H__
-#define __TILCDC_SLAVE_COMPAT_H__
-
-extern uint8_t __dtb_tilcdc_slave_compat_begin[];
-extern uint8_t __dtb_tilcdc_slave_compat_end[];
-
-#endif /* __TILCDC_SLAVE_COMPAT_H__ */
diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
index 7a4b8362dda8..49bfe6e7d005 100644
--- a/drivers/gpu/ipu-v3/ipu-dc.c
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -249,11 +249,8 @@ EXPORT_SYMBOL_GPL(ipu_dc_enable);
void ipu_dc_enable_channel(struct ipu_dc *dc)
{
- int di;
u32 reg;
- di = dc->di;
-
reg = readl(dc->base + DC_WR_CH_CONF);
reg |= DC_WR_CH_CONF_PROG_TYPE_NORMAL;
writel(reg, dc->base + DC_WR_CH_CONF);
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 5a2d71729b9a..2a8ac6829d42 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -1,6 +1,5 @@
menuconfig MTD
tristate "Memory Technology Device (MTD) support"
- depends on GENERIC_IO
help
Memory Technology Devices are flash, RAM and similar chips, often
used for solid state file systems on embedded devices. This option
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index afb43d5e1782..1cd0fff0e940 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -20,8 +20,9 @@ static int mapram_write (struct mtd_info *, loff_t, size_t, size_t *, const u_ch
static int mapram_erase (struct mtd_info *, struct erase_info *);
static void mapram_nop (struct mtd_info *);
static struct mtd_info *map_ram_probe(struct map_info *map);
-static unsigned long mapram_unmapped_area(struct mtd_info *, unsigned long,
- unsigned long, unsigned long);
+static int mapram_point (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys);
+static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
static struct mtd_chip_driver mapram_chipdrv = {
@@ -65,11 +66,12 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
mtd->type = MTD_RAM;
mtd->size = map->size;
mtd->_erase = mapram_erase;
- mtd->_get_unmapped_area = mapram_unmapped_area;
mtd->_read = mapram_read;
mtd->_write = mapram_write;
mtd->_panic_write = mapram_write;
+ mtd->_point = mapram_point;
mtd->_sync = mapram_nop;
+ mtd->_unpoint = mapram_unpoint;
mtd->flags = MTD_CAP_RAM;
mtd->writesize = 1;
@@ -81,19 +83,23 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
return mtd;
}
-
-/*
- * Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
-static unsigned long mapram_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
+static int mapram_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
- return (unsigned long) map->virt + offset;
+
+ if (!map->virt)
+ return -EINVAL;
+ *virt = map->virt + from;
+ if (phys)
+ *phys = map->phys + from;
+ *retlen = len;
+ return 0;
+}
+
+static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ return 0;
}
static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index e67f73ab44c9..20e3604b4d71 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -20,8 +20,10 @@ static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_ch
static void maprom_nop (struct mtd_info *);
static struct mtd_info *map_rom_probe(struct map_info *map);
static int maprom_erase (struct mtd_info *mtd, struct erase_info *info);
-static unsigned long maprom_unmapped_area(struct mtd_info *, unsigned long,
- unsigned long, unsigned long);
+static int maprom_point (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys);
+static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
+
static struct mtd_chip_driver maprom_chipdrv = {
.probe = map_rom_probe,
@@ -51,7 +53,8 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
mtd->name = map->name;
mtd->type = MTD_ROM;
mtd->size = map->size;
- mtd->_get_unmapped_area = maprom_unmapped_area;
+ mtd->_point = maprom_point;
+ mtd->_unpoint = maprom_unpoint;
mtd->_read = maprom_read;
mtd->_write = maprom_write;
mtd->_sync = maprom_nop;
@@ -66,18 +69,23 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
}
-/*
- * Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
-static unsigned long maprom_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
+static int maprom_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
- return (unsigned long) map->virt + offset;
+
+ if (!map->virt)
+ return -EINVAL;
+ *virt = map->virt + from;
+ if (phys)
+ *phys = map->phys + from;
+ *retlen = len;
+ return 0;
+}
+
+static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ return 0;
}
static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 84b16133554b..0806f72102c0 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1814,8 +1814,13 @@ static void __init doc_dbg_register(struct mtd_info *floor)
struct dentry *root = floor->dbg.dfs_dir;
struct docg3 *docg3 = floor->priv;
- if (IS_ERR_OR_NULL(root))
+ if (IS_ERR_OR_NULL(root)) {
+ if (IS_ENABLED(CONFIG_DEBUG_FS) &&
+ !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ dev_warn(floor->dev.parent,
+ "CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
return;
+ }
debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3,
&flashcontrol_fops);
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 268aae45b514..555b94406e0b 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -583,7 +583,7 @@ static struct mtd_erase_region_info erase_regions[] = {
}
};
-static struct mtd_partition lart_partitions[] = {
+static const struct mtd_partition lart_partitions[] = {
/* blob */
{
.name = "blob",
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 00eea6fd379c..dbe6a1de2bb8 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -359,6 +359,7 @@ static const struct spi_device_id m25p_ids[] = {
{"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
/* Everspin MRAMs (non-JEDEC) */
+ { "mr25h128" }, /* 128 Kib, 40 MHz */
{ "mr25h256" }, /* 256 Kib, 40 MHz */
{ "mr25h10" }, /* 1 Mib, 40 MHz */
{ "mr25h40" }, /* 4 Mib, 40 MHz */
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index cbd8547d7aad..0bf4aeaf0cb8 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/mtdram.h>
@@ -69,6 +70,27 @@ static int ram_point(struct mtd_info *mtd, loff_t from, size_t len,
{
*virt = mtd->priv + from;
*retlen = len;
+
+ if (phys) {
+ /* limit retlen to the number of contiguous physical pages */
+ unsigned long page_ofs = offset_in_page(*virt);
+ void *addr = *virt - page_ofs;
+ unsigned long pfn1, pfn0 = vmalloc_to_pfn(addr);
+
+ *phys = __pfn_to_phys(pfn0) + page_ofs;
+ len += page_ofs;
+ while (len > PAGE_SIZE) {
+ len -= PAGE_SIZE;
+ addr += PAGE_SIZE;
+ pfn0++;
+ pfn1 = vmalloc_to_pfn(addr);
+ if (pfn1 != pfn0) {
+ *retlen = addr - *virt;
+ break;
+ }
+ }
+ }
+
return 0;
}
@@ -77,19 +99,6 @@ static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
return 0;
}
-/*
- * Allow NOMMU mmap() to directly map the device (if not NULL)
- * - return the address to which the offset maps
- * - return -ENOSYS to indicate refusal to do the mapping
- */
-static unsigned long ram_get_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
-{
- return (unsigned long) mtd->priv + offset;
-}
-
static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
@@ -134,7 +143,6 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
mtd->_erase = ram_erase;
mtd->_point = ram_point;
mtd->_unpoint = ram_unpoint;
- mtd->_get_unmapped_area = ram_get_unmapped_area;
mtd->_read = ram_read;
mtd->_write = ram_write;
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 8087c36dc693..0ec85f316d24 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -163,8 +163,9 @@ static int register_device(char *name, unsigned long start, unsigned long length
}
if (!(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start =
- ioremap(start, length))) {
- E("slram: ioremap failed\n");
+ memremap(start, length,
+ MEMREMAP_WB | MEMREMAP_WT | MEMREMAP_WC))) {
+ E("slram: memremap failed\n");
return -EIO;
}
((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end =
@@ -186,7 +187,7 @@ static int register_device(char *name, unsigned long start, unsigned long length
if (mtd_device_register((*curmtd)->mtdinfo, NULL, 0)) {
E("slram: Failed to register new device\n");
- iounmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
+ memunmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start);
kfree((*curmtd)->mtdinfo->priv);
kfree((*curmtd)->mtdinfo);
return(-EAGAIN);
@@ -206,7 +207,7 @@ static void unregister_devices(void)
while (slram_mtdlist) {
nextitem = slram_mtdlist->next;
mtd_device_unregister(slram_mtdlist->mtdinfo);
- iounmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
+ memunmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start);
kfree(slram_mtdlist->mtdinfo->priv);
kfree(slram_mtdlist->mtdinfo);
kfree(slram_mtdlist);
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index d504b3d1791d..70f488628464 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -61,7 +61,7 @@ static struct map_info flagadm_map = {
.bankwidth = 2,
};
-static struct mtd_partition flagadm_parts[] = {
+static const struct mtd_partition flagadm_parts[] = {
{
.name = "Bootloader",
.offset = FLASH_PARTITION0_ADDR,
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 15bbda03be65..a0b8fa7849a9 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -47,7 +47,7 @@ static struct map_info impa7_map[NUM_FLASHBANKS] = {
/*
* MTD partitioning stuff
*/
-static struct mtd_partition partitions[] =
+static const struct mtd_partition partitions[] =
{
{
.name = "FileSystem",
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
index 81dc2598bc0a..3528497f96c7 100644
--- a/drivers/mtd/maps/netsc520.c
+++ b/drivers/mtd/maps/netsc520.c
@@ -52,7 +52,7 @@
/* partition_info gives details on the logical partitions that the split the
* single flash device into. If the size if zero we use up to the end of the
* device. */
-static struct mtd_partition partition_info[]={
+static const struct mtd_partition partition_info[] = {
{
.name = "NetSc520 boot kernel",
.offset = 0,
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index a577ef8553d0..729579fb654f 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -107,7 +107,7 @@ static struct map_info nettel_amd_map = {
.bankwidth = AMD_BUSWIDTH,
};
-static struct mtd_partition nettel_amd_partitions[] = {
+static const struct mtd_partition nettel_amd_partitions[] = {
{
.name = "SnapGear BIOS config",
.offset = 0x000e0000,
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 51572895c02c..6d9a4d6f9839 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -43,7 +43,6 @@ struct platram_info {
struct device *dev;
struct mtd_info *mtd;
struct map_info map;
- struct resource *area;
struct platdata_mtd_ram *pdata;
};
@@ -97,16 +96,6 @@ static int platram_remove(struct platform_device *pdev)
platram_setrw(info, PLATRAM_RO);
- /* release resources */
-
- if (info->area) {
- release_resource(info->area);
- kfree(info->area);
- }
-
- if (info->map.virt != NULL)
- iounmap(info->map.virt);
-
kfree(info);
return 0;
@@ -147,12 +136,11 @@ static int platram_probe(struct platform_device *pdev)
info->pdata = pdata;
/* get the resource for the memory mapping */
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- if (res == NULL) {
- dev_err(&pdev->dev, "no memory resource specified\n");
- err = -ENOENT;
+ info->map.virt = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(info->map.virt)) {
+ err = PTR_ERR(info->map.virt);
+ dev_err(&pdev->dev, "failed to ioremap() region\n");
goto exit_free;
}
@@ -167,26 +155,8 @@ static int platram_probe(struct platform_device *pdev)
(char *)pdata->mapname : (char *)pdev->name;
info->map.bankwidth = pdata->bankwidth;
- /* register our usage of the memory area */
-
- info->area = request_mem_region(res->start, info->map.size, pdev->name);
- if (info->area == NULL) {
- dev_err(&pdev->dev, "failed to request memory region\n");
- err = -EIO;
- goto exit_free;
- }
-
- /* remap the memory area */
-
- info->map.virt = ioremap(res->start, info->map.size);
dev_dbg(&pdev->dev, "virt %p, %lu bytes\n", info->map.virt, info->map.size);
- if (info->map.virt == NULL) {
- dev_err(&pdev->dev, "failed to ioremap() region\n");
- err = -EIO;
- goto exit_free;
- }
-
simple_map_init(&info->map);
dev_dbg(&pdev->dev, "initialised map, probing for mtd\n");
diff --git a/drivers/mtd/maps/sbc_gxx.c b/drivers/mtd/maps/sbc_gxx.c
index 556a2dfe94c5..4337d279ad83 100644
--- a/drivers/mtd/maps/sbc_gxx.c
+++ b/drivers/mtd/maps/sbc_gxx.c
@@ -87,7 +87,7 @@ static DEFINE_SPINLOCK(sbc_gxx_spin);
/* partition_info gives details on the logical partitions that the split the
* single flash device into. If the size if zero we use up to the end of the
* device. */
-static struct mtd_partition partition_info[]={
+static const struct mtd_partition partition_info[] = {
{ .name = "SBC-GXx flash boot partition",
.offset = 0,
.size = BOOT_PARTITION_SIZE_KiB*1024 },
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index 9969fedb1f13..8f177e0acb8c 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -43,7 +43,7 @@ static struct map_info ts5500_map = {
.phys = WINDOW_ADDR
};
-static struct mtd_partition ts5500_partitions[] = {
+static const struct mtd_partition ts5500_partitions[] = {
{
.name = "Drive A",
.offset = 0,
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 00a8190797ec..aef030ca8601 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -49,7 +49,7 @@ static struct mtd_info *uclinux_ram_mtdinfo;
/****************************************************************************/
-static struct mtd_partition uclinux_romfs[] = {
+static const struct mtd_partition uclinux_romfs[] = {
{ .name = "ROMfs" }
};
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index d573606b91c2..60bf53df5454 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -644,32 +644,6 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
}
/*
- * try to support NOMMU mmaps on concatenated devices
- * - we don't support subdev spanning as we can't guarantee it'll work
- */
-static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
-{
- struct mtd_concat *concat = CONCAT(mtd);
- int i;
-
- for (i = 0; i < concat->num_subdev; i++) {
- struct mtd_info *subdev = concat->subdev[i];
-
- if (offset >= subdev->size) {
- offset -= subdev->size;
- continue;
- }
-
- return mtd_get_unmapped_area(subdev, len, offset, flags);
- }
-
- return (unsigned long) -ENOSYS;
-}
-
-/*
* This function constructs a virtual MTD device by concatenating
* num_devs MTD devices. A pointer to the new device object is
* stored to *new_dev upon success. This function does _not_
@@ -790,7 +764,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->mtd._unlock = concat_unlock;
concat->mtd._suspend = concat_suspend;
concat->mtd._resume = concat_resume;
- concat->mtd._get_unmapped_area = concat_get_unmapped_area;
/*
* Combine the erase block size info of the subdevices:
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index e7ea842ba3db..f80e911b8843 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -1022,11 +1022,18 @@ EXPORT_SYMBOL_GPL(mtd_unpoint);
unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
unsigned long offset, unsigned long flags)
{
- if (!mtd->_get_unmapped_area)
- return -EOPNOTSUPP;
- if (offset >= mtd->size || len > mtd->size - offset)
- return -EINVAL;
- return mtd->_get_unmapped_area(mtd, len, offset, flags);
+ size_t retlen;
+ void *virt;
+ int ret;
+
+ ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
+ if (ret)
+ return ret;
+ if (retlen != len) {
+ mtd_unpoint(mtd, offset, retlen);
+ return -ENOSYS;
+ }
+ return (unsigned long)virt;
}
EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
@@ -1093,6 +1100,39 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
}
EXPORT_SYMBOL_GPL(mtd_panic_write);
+static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
+ struct mtd_oob_ops *ops)
+{
+ /*
+ * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
+ * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
+ * this case.
+ */
+ if (!ops->datbuf)
+ ops->len = 0;
+
+ if (!ops->oobbuf)
+ ops->ooblen = 0;
+
+ if (offs < 0 || offs + ops->len >= mtd->size)
+ return -EINVAL;
+
+ if (ops->ooblen) {
+ u64 maxooblen;
+
+ if (ops->ooboffs >= mtd_oobavail(mtd, ops))
+ return -EINVAL;
+
+ maxooblen = ((mtd_div_by_ws(mtd->size, mtd) -
+ mtd_div_by_ws(offs, mtd)) *
+ mtd_oobavail(mtd, ops)) - ops->ooboffs;
+ if (ops->ooblen > maxooblen)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
{
int ret_code;
@@ -1100,6 +1140,10 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
if (!mtd->_read_oob)
return -EOPNOTSUPP;
+ ret_code = mtd_check_oob_ops(mtd, from, ops);
+ if (ret_code)
+ return ret_code;
+
ledtrig_mtd_activity();
/*
* In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
@@ -1119,11 +1163,18 @@ EXPORT_SYMBOL_GPL(mtd_read_oob);
int mtd_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
+ int ret;
+
ops->retlen = ops->oobretlen = 0;
if (!mtd->_write_oob)
return -EOPNOTSUPP;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
+
+ ret = mtd_check_oob_ops(mtd, to, ops);
+ if (ret)
+ return ret;
+
ledtrig_mtd_activity();
return mtd->_write_oob(mtd, to, ops);
}
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index a308e707392d..be088bccd593 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -101,18 +101,6 @@ static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
return part->parent->_unpoint(part->parent, from + part->offset, len);
}
-static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
- unsigned long len,
- unsigned long offset,
- unsigned long flags)
-{
- struct mtd_part *part = mtd_to_part(mtd);
-
- offset += part->offset;
- return part->parent->_get_unmapped_area(part->parent, len, offset,
- flags);
-}
-
static int part_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
@@ -458,8 +446,6 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
slave->mtd._unpoint = part_unpoint;
}
- if (parent->_get_unmapped_area)
- slave->mtd._get_unmapped_area = part_get_unmapped_area;
if (parent->_read_oob)
slave->mtd._read_oob = part_read_oob;
if (parent->_write_oob)
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index 7d9080e33865..f07492c6f4b2 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -50,7 +50,7 @@
* Number of free eraseblocks below which GC can also collect low frag
* blocks.
*/
-#define LOW_FRAG_GC_TRESHOLD 5
+#define LOW_FRAG_GC_THRESHOLD 5
/*
* Wear level cost amortization. We want to do wear leveling on the background
@@ -805,7 +805,7 @@ static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d)
{
int idx, stopat;
- if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_TRESHOLD)
+ if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_THRESHOLD)
stopat = MTDSWAP_LOWFRAG;
else
stopat = MTDSWAP_HIFRAG;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 3f2036f31da4..bb48aafed9a2 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -317,8 +317,11 @@ config MTD_NAND_PXA3xx
tristate "NAND support on PXA3xx and Armada 370/XP"
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU
help
+
This enables the driver for the NAND flash device found on
- PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).
+ PXA3xx processors (NFCv1) and also on 32-bit Armada
+ platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada
+ platforms (7K, 8K) (NFCv2).
config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC Controller"
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 6e2db700d923..118a1349aad3 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -59,7 +59,7 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
-obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o
+obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o
nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_amd.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index dcec9cf4983f..d60ada45c549 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -41,7 +41,7 @@ static struct mtd_info *ams_delta_mtd = NULL;
* Define partitions for flash devices
*/
-static struct mtd_partition partition_info[] = {
+static const struct mtd_partition partition_info[] = {
{ .name = "Kernel",
.offset = 0,
.size = 3 * SZ_1M + SZ_512K },
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
index f25eca79f4e5..90a71a56bc23 100644
--- a/drivers/mtd/nand/atmel/nand-controller.c
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -718,8 +718,7 @@ static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
nc->op.addrs[nc->op.naddrs++] = page;
nc->op.addrs[nc->op.naddrs++] = page >> 8;
- if ((mtd->writesize > 512 && chip->chipsize > SZ_128M) ||
- (mtd->writesize <= 512 && chip->chipsize > SZ_32M))
+ if (chip->options & NAND_ROW_ADDR_3)
nc->op.addrs[nc->op.naddrs++] = page >> 16;
}
}
@@ -2530,6 +2529,9 @@ static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
struct atmel_nand_controller *nc = dev_get_drvdata(dev);
struct atmel_nand *nand;
+ if (nc->pmecc)
+ atmel_pmecc_reset(nc->pmecc);
+
list_for_each_entry(nand, &nc->chips, node) {
int i;
@@ -2547,6 +2549,7 @@ static struct platform_driver atmel_nand_controller_driver = {
.driver = {
.name = "atmel-nand-controller",
.of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
+ .pm = &atmel_nand_controller_pm_ops,
},
.probe = atmel_nand_controller_probe,
.remove = atmel_nand_controller_remove,
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
index 8268636675ef..fcbe4fd6e684 100644
--- a/drivers/mtd/nand/atmel/pmecc.c
+++ b/drivers/mtd/nand/atmel/pmecc.c
@@ -765,6 +765,13 @@ void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user,
}
EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes);
+void atmel_pmecc_reset(struct atmel_pmecc *pmecc)
+{
+ writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+}
+EXPORT_SYMBOL_GPL(atmel_pmecc_reset);
+
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op)
{
struct atmel_pmecc *pmecc = user->pmecc;
@@ -797,10 +804,7 @@ EXPORT_SYMBOL_GPL(atmel_pmecc_enable);
void atmel_pmecc_disable(struct atmel_pmecc_user *user)
{
- struct atmel_pmecc *pmecc = user->pmecc;
-
- writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
- writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ atmel_pmecc_reset(user->pmecc);
mutex_unlock(&user->pmecc->lock);
}
EXPORT_SYMBOL_GPL(atmel_pmecc_disable);
@@ -855,10 +859,7 @@ static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev,
/* Disable all interrupts before registering the PMECC handler. */
writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR);
-
- /* Reset the ECC engine */
- writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
- writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
+ atmel_pmecc_reset(pmecc);
return pmecc;
}
diff --git a/drivers/mtd/nand/atmel/pmecc.h b/drivers/mtd/nand/atmel/pmecc.h
index a8ddbfca2ea5..817e0dd9fd15 100644
--- a/drivers/mtd/nand/atmel/pmecc.h
+++ b/drivers/mtd/nand/atmel/pmecc.h
@@ -61,6 +61,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
struct atmel_pmecc_user_req *req);
void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user);
+void atmel_pmecc_reset(struct atmel_pmecc *pmecc);
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op);
void atmel_pmecc_disable(struct atmel_pmecc_user *user);
int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user);
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 9d4a28fa6b73..8ab827edf94e 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -331,8 +331,7 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
ctx->write_byte(mtd, (u8)(page_addr >> 8));
- /* One more address cycle for devices > 32MiB */
- if (this->chipsize > (32 << 20))
+ if (this->options & NAND_ROW_ADDR_3)
ctx->write_byte(mtd,
((page_addr >> 16) & 0x0f));
}
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 1fc435f994e1..b01c9804590e 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -42,7 +42,7 @@ static void __iomem *cmx270_nand_io;
/*
* Define static partitions for flash device
*/
-static struct mtd_partition partition_info[] = {
+static const struct mtd_partition partition_info[] = {
[0] = {
.name = "cmx270-0",
.offset = 0,
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 3087b0ba7b7f..5124f8ae8c04 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -10,20 +10,18 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
-#include <linux/interrupt.h>
-#include <linux/delay.h>
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
#include <linux/dma-mapping.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/mtd/mtd.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include "denali.h"
@@ -31,9 +29,9 @@ MODULE_LICENSE("GPL");
#define DENALI_NAND_NAME "denali-nand"
-/* Host Data/Command Interface */
-#define DENALI_HOST_ADDR 0x00
-#define DENALI_HOST_DATA 0x10
+/* for Indexed Addressing */
+#define DENALI_INDEXED_CTRL 0x00
+#define DENALI_INDEXED_DATA 0x10
#define DENALI_MAP00 (0 << 26) /* direct access to buffer */
#define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
@@ -61,31 +59,55 @@ MODULE_LICENSE("GPL");
*/
#define DENALI_CLK_X_MULT 6
-/*
- * this macro allows us to convert from an MTD structure to our own
- * device context (denali) structure.
- */
static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
{
return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
}
-static void denali_host_write(struct denali_nand_info *denali,
- uint32_t addr, uint32_t data)
+/*
+ * Direct Addressing - the slave address forms the control information (command
+ * type, bank, block, and page address). The slave data is the actual data to
+ * be transferred. This mode requires 28 bits of address region allocated.
+ */
+static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
+{
+ return ioread32(denali->host + addr);
+}
+
+static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
+ u32 data)
{
- iowrite32(addr, denali->host + DENALI_HOST_ADDR);
- iowrite32(data, denali->host + DENALI_HOST_DATA);
+ iowrite32(data, denali->host + addr);
+}
+
+/*
+ * Indexed Addressing - address translation module intervenes in passing the
+ * control information. This mode reduces the required address range. The
+ * control information and transferred data are latched by the registers in
+ * the translation module.
+ */
+static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
+{
+ iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
+ return ioread32(denali->host + DENALI_INDEXED_DATA);
+}
+
+static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
+ u32 data)
+{
+ iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
+ iowrite32(data, denali->host + DENALI_INDEXED_DATA);
}
/*
* Use the configuration feature register to determine the maximum number of
* banks that the hardware supports.
*/
-static void detect_max_banks(struct denali_nand_info *denali)
+static void denali_detect_max_banks(struct denali_nand_info *denali)
{
uint32_t features = ioread32(denali->reg + FEATURES);
- denali->max_banks = 1 << (features & FEATURES__N_BANKS);
+ denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
/* the encoding changed from rev 5.0 to 5.1 */
if (denali->revision < 0x0501)
@@ -189,7 +211,7 @@ static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
msecs_to_jiffies(1000));
if (!time_left) {
dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
- denali->irq_mask);
+ irq_mask);
return 0;
}
@@ -208,73 +230,47 @@ static uint32_t denali_check_irq(struct denali_nand_info *denali)
return irq_status;
}
-/*
- * This helper function setups the registers for ECC and whether or not
- * the spare area will be transferred.
- */
-static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
- bool transfer_spare)
-{
- int ecc_en_flag, transfer_spare_flag;
-
- /* set ECC, transfer spare bits if needed */
- ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
- transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
-
- /* Enable spare area/ECC per user's request. */
- iowrite32(ecc_en_flag, denali->reg + ECC_ENABLE);
- iowrite32(transfer_spare_flag, denali->reg + TRANSFER_SPARE_REG);
-}
-
static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len; i++)
- buf[i] = ioread32(denali->host + DENALI_HOST_DATA);
+ buf[i] = denali->host_read(denali, addr);
}
static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len; i++)
- iowrite32(buf[i], denali->host + DENALI_HOST_DATA);
+ denali->host_write(denali, addr, buf[i]);
}
static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
uint16_t *buf16 = (uint16_t *)buf;
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len / 2; i++)
- buf16[i] = ioread32(denali->host + DENALI_HOST_DATA);
+ buf16[i] = denali->host_read(denali, addr);
}
static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
int len)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
const uint16_t *buf16 = (const uint16_t *)buf;
int i;
- iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
- denali->host + DENALI_HOST_ADDR);
-
for (i = 0; i < len / 2; i++)
- iowrite32(buf16[i], denali->host + DENALI_HOST_DATA);
+ denali->host_write(denali, addr, buf16[i]);
}
static uint8_t denali_read_byte(struct mtd_info *mtd)
@@ -319,7 +315,7 @@ static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
if (ctrl & NAND_CTRL_CHANGE)
denali_reset_irq(denali);
- denali_host_write(denali, DENALI_BANK(denali) | type, dat);
+ denali->host_write(denali, DENALI_BANK(denali) | type, dat);
}
static int denali_dev_ready(struct mtd_info *mtd)
@@ -389,7 +385,7 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
return 0;
}
- max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS;
+ max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
/*
* The register holds the maximum of per-sector corrected bitflips.
@@ -402,13 +398,6 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
return max_bitflips;
}
-#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
-#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
-#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
-#define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE)
-#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
-#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
-
static int denali_sw_ecc_fixup(struct mtd_info *mtd,
struct denali_nand_info *denali,
unsigned long *uncor_ecc_flags, uint8_t *buf)
@@ -426,18 +415,20 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
do {
err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
- err_sector = ECC_SECTOR(err_addr);
- err_byte = ECC_BYTE(err_addr);
+ err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
+ err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
- err_cor_value = ECC_CORRECTION_VALUE(err_cor_info);
- err_device = ECC_ERR_DEVICE(err_cor_info);
+ err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
+ err_cor_info);
+ err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
+ err_cor_info);
/* reset the bitflip counter when crossing ECC sector */
if (err_sector != prev_sector)
bitflips = 0;
- if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) {
+ if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
/*
* Check later if this is a real ECC error, or
* an erased sector.
@@ -467,12 +458,11 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
}
prev_sector = err_sector;
- } while (!ECC_LAST_ERR(err_cor_info));
+ } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
/*
- * Once handle all ecc errors, controller will trigger a
- * ECC_TRANSACTION_DONE interrupt, so here just wait for
- * a while for this interrupt
+ * Once handle all ECC errors, controller will trigger an
+ * ECC_TRANSACTION_DONE interrupt.
*/
irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
@@ -481,13 +471,6 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
return max_bitflips;
}
-/* programs the controller to either enable/disable DMA transfers */
-static void denali_enable_dma(struct denali_nand_info *denali, bool en)
-{
- iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->reg + DMA_ENABLE);
- ioread32(denali->reg + DMA_ENABLE);
-}
-
static void denali_setup_dma64(struct denali_nand_info *denali,
dma_addr_t dma_addr, int page, int write)
{
@@ -502,14 +485,14 @@ static void denali_setup_dma64(struct denali_nand_info *denali,
* 1. setup transfer type, interrupt when complete,
* burst len = 64 bytes, the number of pages
*/
- denali_host_write(denali, mode,
- 0x01002000 | (64 << 16) | (write << 8) | page_count);
+ denali->host_write(denali, mode,
+ 0x01002000 | (64 << 16) | (write << 8) | page_count);
/* 2. set memory low address */
- denali_host_write(denali, mode, dma_addr);
+ denali->host_write(denali, mode, lower_32_bits(dma_addr));
/* 3. set memory high address */
- denali_host_write(denali, mode, (uint64_t)dma_addr >> 32);
+ denali->host_write(denali, mode, upper_32_bits(dma_addr));
}
static void denali_setup_dma32(struct denali_nand_info *denali,
@@ -523,32 +506,23 @@ static void denali_setup_dma32(struct denali_nand_info *denali,
/* DMA is a four step process */
/* 1. setup transfer type and # of pages */
- denali_host_write(denali, mode | page,
- 0x2000 | (write << 8) | page_count);
+ denali->host_write(denali, mode | page,
+ 0x2000 | (write << 8) | page_count);
/* 2. set memory high address bits 23:8 */
- denali_host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
+ denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
/* 3. set memory low address bits 23:8 */
- denali_host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
+ denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
/* 4. interrupt when complete, burst len = 64 bytes */
- denali_host_write(denali, mode | 0x14000, 0x2400);
-}
-
-static void denali_setup_dma(struct denali_nand_info *denali,
- dma_addr_t dma_addr, int page, int write)
-{
- if (denali->caps & DENALI_CAP_DMA_64BIT)
- denali_setup_dma64(denali, dma_addr, page, write);
- else
- denali_setup_dma32(denali, dma_addr, page, write);
+ denali->host_write(denali, mode | 0x14000, 0x2400);
}
static int denali_pio_read(struct denali_nand_info *denali, void *buf,
size_t size, int page, int raw)
{
- uint32_t addr = DENALI_BANK(denali) | page;
+ u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
uint32_t *buf32 = (uint32_t *)buf;
uint32_t irq_status, ecc_err_mask;
int i;
@@ -560,9 +534,8 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
denali_reset_irq(denali);
- iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
for (i = 0; i < size / 4; i++)
- *buf32++ = ioread32(denali->host + DENALI_HOST_DATA);
+ *buf32++ = denali->host_read(denali, addr);
irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
if (!(irq_status & INTR__PAGE_XFER_INC))
@@ -577,16 +550,15 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
static int denali_pio_write(struct denali_nand_info *denali,
const void *buf, size_t size, int page, int raw)
{
- uint32_t addr = DENALI_BANK(denali) | page;
+ u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
const uint32_t *buf32 = (uint32_t *)buf;
uint32_t irq_status;
int i;
denali_reset_irq(denali);
- iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
for (i = 0; i < size / 4; i++)
- iowrite32(*buf32++, denali->host + DENALI_HOST_DATA);
+ denali->host_write(denali, addr, *buf32++);
irq_status = denali_wait_for_irq(denali,
INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
@@ -635,19 +607,19 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
ecc_err_mask = INTR__ECC_ERR;
}
- denali_enable_dma(denali, true);
+ iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
denali_reset_irq(denali);
- denali_setup_dma(denali, dma_addr, page, write);
+ denali->setup_dma(denali, dma_addr, page, write);
- /* wait for operation to complete */
irq_status = denali_wait_for_irq(denali, irq_mask);
if (!(irq_status & INTR__DMA_CMD_COMP))
ret = -EIO;
else if (irq_status & ecc_err_mask)
ret = -EBADMSG;
- denali_enable_dma(denali, false);
+ iowrite32(0, denali->reg + DMA_ENABLE);
+
dma_unmap_single(denali->dev, dma_addr, size, dir);
if (irq_status & INTR__ERASED_PAGE)
@@ -659,7 +631,9 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
size_t size, int page, int raw, int write)
{
- setup_ecc_for_xfer(denali, !raw, raw);
+ iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
+ iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
+ denali->reg + TRANSFER_SPARE_REG);
if (denali->dma_avail)
return denali_dma_xfer(denali, buf, size, page, raw, write);
@@ -970,8 +944,8 @@ static int denali_erase(struct mtd_info *mtd, int page)
denali_reset_irq(denali);
- denali_host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
- DENALI_ERASE);
+ denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
+ DENALI_ERASE);
/* wait for erase to complete or failure to occur */
irq_status = denali_wait_for_irq(denali,
@@ -1009,7 +983,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + ACC_CLKS);
tmp &= ~ACC_CLKS__VALUE;
- tmp |= acc_clks;
+ tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
iowrite32(tmp, denali->reg + ACC_CLKS);
/* tRWH -> RE_2_WE */
@@ -1018,7 +992,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RE_2_WE);
tmp &= ~RE_2_WE__VALUE;
- tmp |= re_2_we;
+ tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
iowrite32(tmp, denali->reg + RE_2_WE);
/* tRHZ -> RE_2_RE */
@@ -1027,16 +1001,22 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RE_2_RE);
tmp &= ~RE_2_RE__VALUE;
- tmp |= re_2_re;
+ tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
iowrite32(tmp, denali->reg + RE_2_RE);
- /* tWHR -> WE_2_RE */
- we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk);
+ /*
+ * tCCS, tWHR -> WE_2_RE
+ *
+ * With WE_2_RE properly set, the Denali controller automatically takes
+ * care of the delay; the driver need not set NAND_WAIT_TCCS.
+ */
+ we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min),
+ t_clk);
we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
- tmp |= we_2_re;
+ tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
/* tADL -> ADDR_2_DATA */
@@ -1050,8 +1030,8 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
- tmp &= ~addr_2_data_mask;
- tmp |= addr_2_data;
+ tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
+ tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
/* tREH, tWH -> RDWR_EN_HI_CNT */
@@ -1061,7 +1041,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
tmp &= ~RDWR_EN_HI_CNT__VALUE;
- tmp |= rdwr_en_hi;
+ tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
/* tRP, tWP -> RDWR_EN_LO_CNT */
@@ -1075,7 +1055,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
tmp &= ~RDWR_EN_LO_CNT__VALUE;
- tmp |= rdwr_en_lo;
+ tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
/* tCS, tCEA -> CS_SETUP_CNT */
@@ -1086,7 +1066,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + CS_SETUP_CNT);
tmp &= ~CS_SETUP_CNT__VALUE;
- tmp |= cs_setup;
+ tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
iowrite32(tmp, denali->reg + CS_SETUP_CNT);
return 0;
@@ -1131,15 +1111,11 @@ static void denali_hw_init(struct denali_nand_info *denali)
* if this value is 0, just let it be.
*/
denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
- detect_max_banks(denali);
+ denali_detect_max_banks(denali);
iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
-
- /* Should set value for these registers when init */
- iowrite32(0, denali->reg + TWO_ROW_ADDR_CYCLES);
- iowrite32(1, denali->reg + ECC_ENABLE);
}
int denali_calc_ecc_bytes(int step_size, int strength)
@@ -1211,22 +1187,6 @@ static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
.free = denali_ooblayout_free,
};
-/* initialize driver data structures */
-static void denali_drv_init(struct denali_nand_info *denali)
-{
- /*
- * the completion object will be used to notify
- * the callee that the interrupt is done
- */
- init_completion(&denali->complete);
-
- /*
- * the spinlock will be used to synchronize the ISR with any
- * element that might be access shared data (interrupt status)
- */
- spin_lock_init(&denali->irq_lock);
-}
-
static int denali_multidev_fixup(struct denali_nand_info *denali)
{
struct nand_chip *chip = &denali->nand;
@@ -1282,15 +1242,17 @@ int denali_init(struct denali_nand_info *denali)
{
struct nand_chip *chip = &denali->nand;
struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 features = ioread32(denali->reg + FEATURES);
int ret;
mtd->dev.parent = denali->dev;
denali_hw_init(denali);
- denali_drv_init(denali);
+
+ init_completion(&denali->complete);
+ spin_lock_init(&denali->irq_lock);
denali_clear_irq_all(denali);
- /* Request IRQ after all the hardware initialization is finished */
ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
IRQF_SHARED, DENALI_NAND_NAME, denali);
if (ret) {
@@ -1308,7 +1270,6 @@ int denali_init(struct denali_nand_info *denali)
if (!mtd->name)
mtd->name = "denali-nand";
- /* register the driver with the NAND core subsystem */
chip->select_chip = denali_select_chip;
chip->read_byte = denali_read_byte;
chip->write_byte = denali_write_byte;
@@ -1317,15 +1278,18 @@ int denali_init(struct denali_nand_info *denali)
chip->dev_ready = denali_dev_ready;
chip->waitfunc = denali_waitfunc;
+ if (features & FEATURES__INDEX_ADDR) {
+ denali->host_read = denali_indexed_read;
+ denali->host_write = denali_indexed_write;
+ } else {
+ denali->host_read = denali_direct_read;
+ denali->host_write = denali_direct_write;
+ }
+
/* clk rate info is needed for setup_data_interface */
if (denali->clk_x_rate)
chip->setup_data_interface = denali_setup_data_interface;
- /*
- * scan for NAND devices attached to the controller
- * this is the first stage in a two step process to register
- * with the nand subsystem
- */
ret = nand_scan_ident(mtd, denali->max_banks, NULL);
if (ret)
goto disable_irq;
@@ -1347,20 +1311,15 @@ int denali_init(struct denali_nand_info *denali)
if (denali->dma_avail) {
chip->options |= NAND_USE_BOUNCE_BUFFER;
chip->buf_align = 16;
+ if (denali->caps & DENALI_CAP_DMA_64BIT)
+ denali->setup_dma = denali_setup_dma64;
+ else
+ denali->setup_dma = denali_setup_dma32;
}
- /*
- * second stage of the NAND scan
- * this stage requires information regarding ECC and
- * bad block management.
- */
-
chip->bbt_options |= NAND_BBT_USE_FLASH;
chip->bbt_options |= NAND_BBT_NO_OOB;
-
chip->ecc.mode = NAND_ECC_HW_SYNDROME;
-
- /* no subpage writes on denali */
chip->options |= NAND_NO_SUBPAGE_WRITE;
ret = denali_ecc_setup(mtd, chip, denali);
@@ -1373,12 +1332,15 @@ int denali_init(struct denali_nand_info *denali)
"chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
- iowrite32(MAKE_ECC_CORRECTION(chip->ecc.strength, 1),
+ iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
+ FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
denali->reg + ECC_CORRECTION);
iowrite32(mtd->erasesize / mtd->writesize,
denali->reg + PAGES_PER_BLOCK);
iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
denali->reg + DEVICE_WIDTH);
+ iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
+ denali->reg + TWO_ROW_ADDR_CYCLES);
iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
@@ -1441,7 +1403,6 @@ disable_irq:
}
EXPORT_SYMBOL(denali_init);
-/* driver exit point */
void denali_remove(struct denali_nand_info *denali)
{
struct mtd_info *mtd = nand_to_mtd(&denali->nand);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index 9239e6793e6e..2911066dacac 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -10,18 +10,16 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
#ifndef __DENALI_H__
#define __DENALI_H__
#include <linux/bitops.h>
+#include <linux/completion.h>
#include <linux/mtd/rawnand.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
#define DEVICE_RESET 0x0
#define DEVICE_RESET__BANK(bank) BIT(bank)
@@ -111,9 +109,6 @@
#define ECC_CORRECTION 0x1b0
#define ECC_CORRECTION__VALUE GENMASK(4, 0)
#define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16)
-#define MAKE_ECC_CORRECTION(val, thresh) \
- (((val) & (ECC_CORRECTION__VALUE)) | \
- (((thresh) << 16) & (ECC_CORRECTION__ERASE_THRESHOLD)))
#define READ_MODE 0x1c0
#define READ_MODE__VALUE GENMASK(3, 0)
@@ -255,13 +250,13 @@
#define ECC_ERROR_ADDRESS 0x630
#define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0)
-#define ECC_ERROR_ADDRESS__SECTOR_NR GENMASK(15, 12)
+#define ECC_ERROR_ADDRESS__SECTOR GENMASK(15, 12)
#define ERR_CORRECTION_INFO 0x640
-#define ERR_CORRECTION_INFO__BYTEMASK GENMASK(7, 0)
-#define ERR_CORRECTION_INFO__DEVICE_NR GENMASK(11, 8)
-#define ERR_CORRECTION_INFO__ERROR_TYPE BIT(14)
-#define ERR_CORRECTION_INFO__LAST_ERR_INFO BIT(15)
+#define ERR_CORRECTION_INFO__BYTE GENMASK(7, 0)
+#define ERR_CORRECTION_INFO__DEVICE GENMASK(11, 8)
+#define ERR_CORRECTION_INFO__UNCOR BIT(14)
+#define ERR_CORRECTION_INFO__LAST_ERR BIT(15)
#define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10)
#define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8)
@@ -310,23 +305,24 @@ struct denali_nand_info {
struct device *dev;
void __iomem *reg; /* Register Interface */
void __iomem *host; /* Host Data/Command Interface */
-
- /* elements used by ISR */
struct completion complete;
- spinlock_t irq_lock;
- uint32_t irq_mask;
- uint32_t irq_status;
+ spinlock_t irq_lock; /* protect irq_mask and irq_status */
+ u32 irq_mask; /* interrupts we are waiting for */
+ u32 irq_status; /* interrupts that have happened */
int irq;
-
- void *buf;
+ void *buf; /* for syndrome layout conversion */
dma_addr_t dma_addr;
- int dma_avail;
+ int dma_avail; /* can support DMA? */
int devs_per_cs; /* devices connected in parallel */
- int oob_skip_bytes;
+ int oob_skip_bytes; /* number of bytes reserved for BBM */
int max_banks;
- unsigned int revision;
- unsigned int caps;
+ unsigned int revision; /* IP revision */
+ unsigned int caps; /* IP capability (or quirk) */
const struct nand_ecc_caps *ecc_caps;
+ u32 (*host_read)(struct denali_nand_info *denali, u32 addr);
+ void (*host_write)(struct denali_nand_info *denali, u32 addr, u32 data);
+ void (*setup_dma)(struct denali_nand_info *denali, dma_addr_t dma_addr,
+ int page, int write);
};
#define DENALI_CAP_HW_ECC_FIXUP BIT(0)
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
index 56e2e177644d..cfd33e6ca77f 100644
--- a/drivers/mtd/nand/denali_dt.c
+++ b/drivers/mtd/nand/denali_dt.c
@@ -12,15 +12,16 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
+
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include "denali.h"
@@ -155,7 +156,6 @@ static struct platform_driver denali_dt_driver = {
.of_match_table = denali_nand_dt_ids,
},
};
-
module_platform_driver(denali_dt_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index 81370c79aa48..57fb7ae31412 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -11,6 +11,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
+
+#include <linux/errno.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -106,7 +109,6 @@ failed_remap_reg:
return ret;
}
-/* driver exit point */
static void denali_pci_remove(struct pci_dev *dev)
{
struct denali_nand_info *denali = pci_get_drvdata(dev);
@@ -122,5 +124,4 @@ static struct pci_driver denali_pci_driver = {
.probe = denali_pci_probe,
.remove = denali_pci_remove,
};
-
module_pci_driver(denali_pci_driver);
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index c3aa53caab5c..72671dc52e2e 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -705,8 +705,7 @@ static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int colu
if (page_addr != -1) {
WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress);
WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
- /* One more address cycle for higher density devices */
- if (this->chipsize & 0x0c000000) {
+ if (this->options & NAND_ROW_ADDR_3) {
WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
printk("high density\n");
}
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index fd3648952b5a..484f7fbc3f7d 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -23,7 +23,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
@@ -31,12 +31,16 @@
#include <linux/mtd/nand-gpio.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_gpio.h>
struct gpiomtd {
void __iomem *io_sync;
struct nand_chip nand_chip;
struct gpio_nand_platdata plat;
+ struct gpio_desc *nce; /* Optional chip enable */
+ struct gpio_desc *cle;
+ struct gpio_desc *ale;
+ struct gpio_desc *rdy;
+ struct gpio_desc *nwp; /* Optional write protection */
};
static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd)
@@ -78,11 +82,10 @@ static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
gpio_nand_dosync(gpiomtd);
if (ctrl & NAND_CTRL_CHANGE) {
- if (gpio_is_valid(gpiomtd->plat.gpio_nce))
- gpio_set_value(gpiomtd->plat.gpio_nce,
- !(ctrl & NAND_NCE));
- gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE));
- gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE));
+ if (gpiomtd->nce)
+ gpiod_set_value(gpiomtd->nce, !(ctrl & NAND_NCE));
+ gpiod_set_value(gpiomtd->cle, !!(ctrl & NAND_CLE));
+ gpiod_set_value(gpiomtd->ale, !!(ctrl & NAND_ALE));
gpio_nand_dosync(gpiomtd);
}
if (cmd == NAND_CMD_NONE)
@@ -96,7 +99,7 @@ static int gpio_nand_devready(struct mtd_info *mtd)
{
struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
- return gpio_get_value(gpiomtd->plat.gpio_rdy);
+ return gpiod_get_value(gpiomtd->rdy);
}
#ifdef CONFIG_OF
@@ -123,12 +126,6 @@ static int gpio_nand_get_config_of(const struct device *dev,
}
}
- plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
- plat->gpio_nce = of_get_gpio(dev->of_node, 1);
- plat->gpio_ale = of_get_gpio(dev->of_node, 2);
- plat->gpio_cle = of_get_gpio(dev->of_node, 3);
- plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
-
if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
plat->chip_delay = val;
@@ -201,10 +198,11 @@ static int gpio_nand_remove(struct platform_device *pdev)
nand_release(nand_to_mtd(&gpiomtd->nand_chip));
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
- if (gpio_is_valid(gpiomtd->plat.gpio_nce))
- gpio_set_value(gpiomtd->plat.gpio_nce, 1);
+ /* Enable write protection and disable the chip */
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_set_value(gpiomtd->nwp, 0);
+ if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
+ gpiod_set_value(gpiomtd->nce, 0);
return 0;
}
@@ -215,66 +213,66 @@ static int gpio_nand_probe(struct platform_device *pdev)
struct nand_chip *chip;
struct mtd_info *mtd;
struct resource *res;
+ struct device *dev = &pdev->dev;
int ret = 0;
- if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev))
+ if (!dev->of_node && !dev_get_platdata(dev))
return -EINVAL;
- gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL);
+ gpiomtd = devm_kzalloc(dev, sizeof(*gpiomtd), GFP_KERNEL);
if (!gpiomtd)
return -ENOMEM;
chip = &gpiomtd->nand_chip;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+ chip->IO_ADDR_R = devm_ioremap_resource(dev, res);
if (IS_ERR(chip->IO_ADDR_R))
return PTR_ERR(chip->IO_ADDR_R);
res = gpio_nand_get_io_sync(pdev);
if (res) {
- gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res);
+ gpiomtd->io_sync = devm_ioremap_resource(dev, res);
if (IS_ERR(gpiomtd->io_sync))
return PTR_ERR(gpiomtd->io_sync);
}
- ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat);
+ ret = gpio_nand_get_config(dev, &gpiomtd->plat);
if (ret)
return ret;
- if (gpio_is_valid(gpiomtd->plat.gpio_nce)) {
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce,
- "NAND NCE");
- if (ret)
- return ret;
- gpio_direction_output(gpiomtd->plat.gpio_nce, 1);
+ /* Just enable the chip */
+ gpiomtd->nce = devm_gpiod_get_optional(dev, "nce", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiomtd->nce))
+ return PTR_ERR(gpiomtd->nce);
+
+ /* We disable write protection once we know probe() will succeed */
+ gpiomtd->nwp = devm_gpiod_get_optional(dev, "nwp", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->nwp)) {
+ ret = PTR_ERR(gpiomtd->nwp);
+ goto out_ce;
}
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) {
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp,
- "NAND NWP");
- if (ret)
- return ret;
+ gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->nwp)) {
+ ret = PTR_ERR(gpiomtd->nwp);
+ goto out_ce;
}
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE");
- if (ret)
- return ret;
- gpio_direction_output(gpiomtd->plat.gpio_ale, 0);
+ gpiomtd->cle = devm_gpiod_get(dev, "cle", GPIOD_OUT_LOW);
+ if (IS_ERR(gpiomtd->cle)) {
+ ret = PTR_ERR(gpiomtd->cle);
+ goto out_ce;
+ }
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE");
- if (ret)
- return ret;
- gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
-
- if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) {
- ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy,
- "NAND RDY");
- if (ret)
- return ret;
- gpio_direction_input(gpiomtd->plat.gpio_rdy);
- chip->dev_ready = gpio_nand_devready;
+ gpiomtd->rdy = devm_gpiod_get_optional(dev, "rdy", GPIOD_IN);
+ if (IS_ERR(gpiomtd->rdy)) {
+ ret = PTR_ERR(gpiomtd->rdy);
+ goto out_ce;
}
+ /* Using RDY pin */
+ if (gpiomtd->rdy)
+ chip->dev_ready = gpio_nand_devready;
nand_set_flash_node(chip, pdev->dev.of_node);
chip->IO_ADDR_W = chip->IO_ADDR_R;
@@ -285,12 +283,13 @@ static int gpio_nand_probe(struct platform_device *pdev)
chip->cmd_ctrl = gpio_nand_cmd_ctrl;
mtd = nand_to_mtd(chip);
- mtd->dev.parent = &pdev->dev;
+ mtd->dev.parent = dev;
platform_set_drvdata(pdev, gpiomtd);
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
+ /* Disable write protection, if wired up */
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_direction_output(gpiomtd->nwp, 1);
ret = nand_scan(mtd, 1);
if (ret)
@@ -305,8 +304,11 @@ static int gpio_nand_probe(struct platform_device *pdev)
return 0;
err_wp:
- if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
- gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
+ if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
+ gpiod_set_value(gpiomtd->nwp, 0);
+out_ce:
+ if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
+ gpiod_set_value(gpiomtd->nce, 0);
return ret;
}
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index d9ee1a7e6956..0897261c3e17 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -432,8 +432,7 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr)
host->addr_value[0] |= (page_addr & 0xffff)
<< (host->addr_cycle * 8);
host->addr_cycle += 2;
- /* One more address cycle for devices > 128MiB */
- if (chip->chipsize > (128 << 20)) {
+ if (chip->options & NAND_ROW_ADDR_3) {
host->addr_cycle += 1;
if (host->command == NAND_CMD_ERASE1)
host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index 7f3b065b6b8f..c51d214d169e 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -115,6 +115,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
op = ECC_DECODE;
dec = readw(ecc->regs + ECC_DECDONE);
if (dec & ecc->sectors) {
+ /*
+ * Clear decode IRQ status once again to ensure that
+ * there will be no extra IRQ.
+ */
+ readw(ecc->regs + ECC_DECIRQ_STA);
ecc->sectors = 0;
complete(&ecc->done);
} else {
@@ -130,8 +135,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
}
}
- writel(0, ecc->regs + ECC_IRQ_REG(op));
-
return IRQ_HANDLED;
}
@@ -307,6 +310,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
/* disable it */
mtk_ecc_wait_idle(ecc, op);
+ if (op == ECC_DECODE)
+ /*
+ * Clear decode IRQ status in case there is a timeout to wait
+ * decode IRQ.
+ */
+ readw(ecc->regs + ECC_DECIRQ_STA);
writew(0, ecc->regs + ECC_IRQ_REG(op));
writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 53e5e0337c3e..f3be0b2a8869 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -415,7 +415,7 @@ static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
* waits for completion. */
static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
{
- pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq);
+ dev_dbg(host->dev, "send_cmd(host, 0x%x, %d)\n", cmd, useirq);
writew(cmd, NFC_V1_V2_FLASH_CMD);
writew(NFC_CMD, NFC_V1_V2_CONFIG2);
@@ -431,7 +431,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
udelay(1);
}
if (max_retries < 0)
- pr_debug("%s: RESET failed\n", __func__);
+ dev_dbg(host->dev, "%s: RESET failed\n", __func__);
} else {
/* Wait for operation to complete */
wait_op_done(host, useirq);
@@ -454,7 +454,7 @@ static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
* a NAND command. */
static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
{
- pr_debug("send_addr(host, 0x%x %d)\n", addr, islast);
+ dev_dbg(host->dev, "send_addr(host, 0x%x %d)\n", addr, islast);
writew(addr, NFC_V1_V2_FLASH_ADDR);
writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
@@ -607,7 +607,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
uint16_t ecc_status = get_ecc_status_v1(host);
if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
- pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
+ dev_dbg(host->dev, "HWECC uncorrectable 2-bit ECC error\n");
return -EBADMSG;
}
@@ -634,7 +634,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
do {
err = ecc_stat & ecc_bit_mask;
if (err > err_limit) {
- printk(KERN_WARNING "UnCorrectable RS-ECC Error\n");
+ dev_dbg(host->dev, "UnCorrectable RS-ECC Error\n");
return -EBADMSG;
} else {
ret += err;
@@ -642,7 +642,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
ecc_stat >>= 4;
} while (--no_subpages);
- pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
+ dev_dbg(host->dev, "%d Symbol Correctable RS-ECC Error\n", ret);
return ret;
}
@@ -673,7 +673,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
host->buf_start++;
}
- pr_debug("%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
+ dev_dbg(host->dev, "%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
return ret;
}
@@ -859,8 +859,7 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
host->devtype_data->send_addr(host,
(page_addr >> 8) & 0xff, true);
} else {
- /* One more address cycle for higher density devices */
- if (mtd->size >= 0x4000000) {
+ if (nand_chip->options & NAND_ROW_ADDR_3) {
/* paddr_8 - paddr_15 */
host->devtype_data->send_addr(host,
(page_addr >> 8) & 0xff,
@@ -1212,7 +1211,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
- pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
+ dev_dbg(host->dev, "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
command, column, page_addr);
/* Reset command state information */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 12edaae17d81..6135d007a068 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -115,7 +115,7 @@ static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- if (section)
+ if (section || !ecc->total)
return -ERANGE;
oobregion->length = ecc->total;
@@ -727,8 +727,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, page_addr, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
- /* One more address cycle for devices > 32MiB */
- if (chip->chipsize > (32 << 20))
+ if (chip->options & NAND_ROW_ADDR_3)
chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
}
chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
@@ -854,8 +853,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, page_addr, ctrl);
chip->cmd_ctrl(mtd, page_addr >> 8,
NAND_NCE | NAND_ALE);
- /* One more address cycle for devices > 128MiB */
- if (chip->chipsize > (128 << 20))
+ if (chip->options & NAND_ROW_ADDR_3)
chip->cmd_ctrl(mtd, page_addr >> 16,
NAND_NCE | NAND_ALE);
}
@@ -1246,6 +1244,7 @@ int nand_reset(struct nand_chip *chip, int chipnr)
return 0;
}
+EXPORT_SYMBOL_GPL(nand_reset);
/**
* nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
@@ -2799,15 +2798,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ int chipnr = (int)(to >> chip->chip_shift);
struct mtd_oob_ops ops;
int ret;
- /* Wait for the device to get ready */
- panic_nand_wait(mtd, chip, 400);
-
/* Grab the device */
panic_nand_get_device(chip, mtd, FL_WRITING);
+ chip->select_chip(mtd, chipnr);
+
+ /* Wait for the device to get ready */
+ panic_nand_wait(mtd, chip, 400);
+
memset(&ops, 0, sizeof(ops));
ops.len = len;
ops.datbuf = (uint8_t *)buf;
@@ -3999,6 +4001,9 @@ ident_done:
chip->chip_shift += 32 - 1;
}
+ if (chip->chip_shift - chip->page_shift > 16)
+ chip->options |= NAND_ROW_ADDR_3;
+
chip->badblockbits = 8;
chip->erase = single_erase;
@@ -4700,6 +4705,19 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
break;
default:
+ /*
+ * Expose the whole OOB area to users if ECC_NONE
+ * is passed. We could do that for all kind of
+ * ->oobsize, but we must keep the old large/small
+ * page with ECC layout when ->oobsize <= 128 for
+ * compatibility reasons.
+ */
+ if (ecc->mode == NAND_ECC_NONE) {
+ mtd_set_ooblayout(mtd,
+ &nand_ooblayout_lp_ops);
+ break;
+ }
+
WARN(1, "No oob scheme defined for oobsize %d\n",
mtd->oobsize);
ret = -EINVAL;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 246b4393118e..44322a363ba5 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -520,11 +520,16 @@ static int nandsim_debugfs_create(struct nandsim *dev)
struct dentry *root = nsmtd->dbg.dfs_dir;
struct dentry *dent;
- if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ /*
+ * Just skip debugfs initialization when the debugfs directory is
+ * missing.
+ */
+ if (IS_ERR_OR_NULL(root)) {
+ if (IS_ENABLED(CONFIG_DEBUG_FS) &&
+ !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n");
return 0;
-
- if (IS_ERR_OR_NULL(root))
- return -1;
+ }
dent = debugfs_create_file("nandsim_wear_report", S_IRUSR,
root, dev, &dfs_fops);
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 7bb4d2ea9342..af5b32c9a791 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -154,7 +154,7 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
if (page_addr != -1) {
write_addr_reg(nand, page_addr);
- if (chip->chipsize > (128 << 20)) {
+ if (chip->options & NAND_ROW_ADDR_3) {
write_addr_reg(nand, page_addr >> 8);
write_addr_reg(nand, page_addr >> 16 | ENDADDR);
} else {
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 54540c8fa1a2..dad438c4906a 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1133,129 +1133,172 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
0x97, 0x79, 0xe5, 0x24, 0xb5};
/**
- * omap_calculate_ecc_bch - Generate bytes of ECC bytes
+ * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
* @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer
+ * @i: The sector number (for a multi sector page)
*
- * Support calculating of BCH4/8 ecc vectors for the page
+ * Support calculating of BCH4/8/16 ECC vectors for one sector
+ * within a page. Sector number is in @i.
*/
-static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
- const u_char *dat, u_char *ecc_calc)
+static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc, int i)
{
struct omap_nand_info *info = mtd_to_omap(mtd);
int eccbytes = info->nand.ecc.bytes;
struct gpmc_nand_regs *gpmc_regs = &info->reg;
u8 *ecc_code;
- unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
+ unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
u32 val;
- int i, j;
+ int j;
+
+ ecc_code = ecc_calc;
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH8_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
+ bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
+ *ecc_code++ = (bch_val4 & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val3 & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val2 & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val1 & 0xFF);
+ break;
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH4_CODE_HW:
+ bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+ bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+ *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val2 & 0xF) << 4) |
+ ((bch_val1 >> 28) & 0xF);
+ *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val1 & 0xF) << 4);
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ val = readl(gpmc_regs->gpmc_bch_result6[i]);
+ ecc_code[0] = ((val >> 8) & 0xFF);
+ ecc_code[1] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result5[i]);
+ ecc_code[2] = ((val >> 24) & 0xFF);
+ ecc_code[3] = ((val >> 16) & 0xFF);
+ ecc_code[4] = ((val >> 8) & 0xFF);
+ ecc_code[5] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result4[i]);
+ ecc_code[6] = ((val >> 24) & 0xFF);
+ ecc_code[7] = ((val >> 16) & 0xFF);
+ ecc_code[8] = ((val >> 8) & 0xFF);
+ ecc_code[9] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result3[i]);
+ ecc_code[10] = ((val >> 24) & 0xFF);
+ ecc_code[11] = ((val >> 16) & 0xFF);
+ ecc_code[12] = ((val >> 8) & 0xFF);
+ ecc_code[13] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result2[i]);
+ ecc_code[14] = ((val >> 24) & 0xFF);
+ ecc_code[15] = ((val >> 16) & 0xFF);
+ ecc_code[16] = ((val >> 8) & 0xFF);
+ ecc_code[17] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result1[i]);
+ ecc_code[18] = ((val >> 24) & 0xFF);
+ ecc_code[19] = ((val >> 16) & 0xFF);
+ ecc_code[20] = ((val >> 8) & 0xFF);
+ ecc_code[21] = ((val >> 0) & 0xFF);
+ val = readl(gpmc_regs->gpmc_bch_result0[i]);
+ ecc_code[22] = ((val >> 24) & 0xFF);
+ ecc_code[23] = ((val >> 16) & 0xFF);
+ ecc_code[24] = ((val >> 8) & 0xFF);
+ ecc_code[25] = ((val >> 0) & 0xFF);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* ECC scheme specific syndrome customizations */
+ switch (info->ecc_opt) {
+ case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back
+ */
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch4_polynomial[j];
+ break;
+ case OMAP_ECC_BCH4_CODE_HW:
+ /* Set 8th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ /* Add constant polynomial to remainder, so that
+ * ECC of blank pages results in 0x0 on reading back
+ */
+ for (j = 0; j < eccbytes; j++)
+ ecc_calc[j] ^= bch8_polynomial[j];
+ break;
+ case OMAP_ECC_BCH8_CODE_HW:
+ /* Set 14th ECC byte as 0x0 for ROM compatibility */
+ ecc_calc[eccbytes - 1] = 0x0;
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
+ * when SW based correction is required as ECC is required for one sector
+ * at a time.
+ */
+static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc)
+{
+ return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0);
+}
+
+/**
+ * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
+ */
+static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_calc)
+{
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ int eccbytes = info->nand.ecc.bytes;
+ unsigned long nsectors;
+ int i, ret;
nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
for (i = 0; i < nsectors; i++) {
- ecc_code = ecc_calc;
- switch (info->ecc_opt) {
- case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
- case OMAP_ECC_BCH8_CODE_HW:
- bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
- bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
- bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
- bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
- *ecc_code++ = (bch_val4 & 0xFF);
- *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
- *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
- *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
- *ecc_code++ = (bch_val3 & 0xFF);
- *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
- *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
- *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
- *ecc_code++ = (bch_val2 & 0xFF);
- *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
- *ecc_code++ = (bch_val1 & 0xFF);
- break;
- case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
- case OMAP_ECC_BCH4_CODE_HW:
- bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
- bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
- *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
- *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
- *ecc_code++ = ((bch_val2 & 0xF) << 4) |
- ((bch_val1 >> 28) & 0xF);
- *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
- *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
- *ecc_code++ = ((bch_val1 & 0xF) << 4);
- break;
- case OMAP_ECC_BCH16_CODE_HW:
- val = readl(gpmc_regs->gpmc_bch_result6[i]);
- ecc_code[0] = ((val >> 8) & 0xFF);
- ecc_code[1] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result5[i]);
- ecc_code[2] = ((val >> 24) & 0xFF);
- ecc_code[3] = ((val >> 16) & 0xFF);
- ecc_code[4] = ((val >> 8) & 0xFF);
- ecc_code[5] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result4[i]);
- ecc_code[6] = ((val >> 24) & 0xFF);
- ecc_code[7] = ((val >> 16) & 0xFF);
- ecc_code[8] = ((val >> 8) & 0xFF);
- ecc_code[9] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result3[i]);
- ecc_code[10] = ((val >> 24) & 0xFF);
- ecc_code[11] = ((val >> 16) & 0xFF);
- ecc_code[12] = ((val >> 8) & 0xFF);
- ecc_code[13] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result2[i]);
- ecc_code[14] = ((val >> 24) & 0xFF);
- ecc_code[15] = ((val >> 16) & 0xFF);
- ecc_code[16] = ((val >> 8) & 0xFF);
- ecc_code[17] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result1[i]);
- ecc_code[18] = ((val >> 24) & 0xFF);
- ecc_code[19] = ((val >> 16) & 0xFF);
- ecc_code[20] = ((val >> 8) & 0xFF);
- ecc_code[21] = ((val >> 0) & 0xFF);
- val = readl(gpmc_regs->gpmc_bch_result0[i]);
- ecc_code[22] = ((val >> 24) & 0xFF);
- ecc_code[23] = ((val >> 16) & 0xFF);
- ecc_code[24] = ((val >> 8) & 0xFF);
- ecc_code[25] = ((val >> 0) & 0xFF);
- break;
- default:
- return -EINVAL;
- }
-
- /* ECC scheme specific syndrome customizations */
- switch (info->ecc_opt) {
- case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
- /* Add constant polynomial to remainder, so that
- * ECC of blank pages results in 0x0 on reading back */
- for (j = 0; j < eccbytes; j++)
- ecc_calc[j] ^= bch4_polynomial[j];
- break;
- case OMAP_ECC_BCH4_CODE_HW:
- /* Set 8th ECC byte as 0x0 for ROM compatibility */
- ecc_calc[eccbytes - 1] = 0x0;
- break;
- case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
- /* Add constant polynomial to remainder, so that
- * ECC of blank pages results in 0x0 on reading back */
- for (j = 0; j < eccbytes; j++)
- ecc_calc[j] ^= bch8_polynomial[j];
- break;
- case OMAP_ECC_BCH8_CODE_HW:
- /* Set 14th ECC byte as 0x0 for ROM compatibility */
- ecc_calc[eccbytes - 1] = 0x0;
- break;
- case OMAP_ECC_BCH16_CODE_HW:
- break;
- default:
- return -EINVAL;
- }
+ ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
+ if (ret)
+ return ret;
- ecc_calc += eccbytes;
+ ecc_calc += eccbytes;
}
return 0;
@@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->write_buf(mtd, buf, mtd->writesize);
/* Update ecc vector from GPMC result registers */
- chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
+ omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
@@ -1509,6 +1552,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
}
/**
+ * omap_write_subpage_bch - BCH hardware ECC based subpage write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @offset: column address of subpage within the page
+ * @data_len: data length
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * OMAP optimized subpage write method.
+ */
+static int omap_write_subpage_bch(struct mtd_info *mtd,
+ struct nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf,
+ int oob_required, int page)
+{
+ u8 *ecc_calc = chip->buffers->ecccalc;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ int ecc_steps = chip->ecc.steps;
+ u32 start_step = offset / ecc_size;
+ u32 end_step = (offset + data_len - 1) / ecc_size;
+ int step, ret = 0;
+
+ /*
+ * Write entire page at one go as it would be optimal
+ * as ECC is calculated by hardware.
+ * ECC is calculated for all subpages but we choose
+ * only what we want.
+ */
+
+ /* Enable GPMC ECC engine */
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ /* Write data */
+ chip->write_buf(mtd, buf, mtd->writesize);
+
+ for (step = 0; step < ecc_steps; step++) {
+ /* mask ECC of un-touched subpages by padding 0xFF */
+ if (step < start_step || step > end_step)
+ memset(ecc_calc, 0xff, ecc_bytes);
+ else
+ ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
+
+ if (ret)
+ return ret;
+
+ buf += ecc_size;
+ ecc_calc += ecc_bytes;
+ }
+
+ /* copy calculated ECC for whole page to chip->buffer->oob */
+ /* this include masked-value(0xFF) for unwritten subpages */
+ ecc_calc = chip->buffers->ecccalc;
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ /* write OOB buffer to NAND device */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/**
* omap_read_page_bch - BCH ecc based page read function for entire page
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->ecc.total);
/* Calculate ecc bytes */
- chip->ecc.calculate(mtd, buf, ecc_calc);
+ omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
@@ -1588,8 +1697,7 @@ static bool is_elm_present(struct omap_nand_info *info,
return true;
}
-static bool omap2_nand_ecc_check(struct omap_nand_info *info,
- struct omap_nand_platform_data *pdata)
+static bool omap2_nand_ecc_check(struct omap_nand_info *info)
{
bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
@@ -1804,7 +1912,6 @@ static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
static int omap_nand_probe(struct platform_device *pdev)
{
struct omap_nand_info *info;
- struct omap_nand_platform_data *pdata = NULL;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
int err;
@@ -1821,29 +1928,10 @@ static int omap_nand_probe(struct platform_device *pdev)
info->pdev = pdev;
- if (dev->of_node) {
- if (omap_get_dt_info(dev, info))
- return -EINVAL;
- } else {
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "platform data missing\n");
- return -EINVAL;
- }
-
- info->gpmc_cs = pdata->cs;
- info->reg = pdata->reg;
- info->ecc_opt = pdata->ecc_opt;
- if (pdata->dev_ready)
- dev_info(&pdev->dev, "pdata->dev_ready is deprecated\n");
-
- info->xfer_type = pdata->xfer_type;
- info->devsize = pdata->devsize;
- info->elm_of_node = pdata->elm_of_node;
- info->flash_bbt = pdata->flash_bbt;
- }
+ err = omap_get_dt_info(dev, info);
+ if (err)
+ return err;
- platform_set_drvdata(pdev, info);
info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
if (!info->ops) {
dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
@@ -2002,7 +2090,7 @@ static int omap_nand_probe(struct platform_device *pdev)
goto return_error;
}
- if (!omap2_nand_ecc_check(info, pdata)) {
+ if (!omap2_nand_ecc_check(info)) {
err = -EINVAL;
goto return_error;
}
@@ -2044,7 +2132,7 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 4;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
oobbytes_per_step = nand_chip->ecc.bytes + 1;
@@ -2066,9 +2154,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 4;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
+ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes;
@@ -2087,7 +2175,7 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 8;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
+ nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */
oobbytes_per_step = nand_chip->ecc.bytes + 1;
@@ -2109,9 +2197,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 8;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
+ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes;
@@ -2131,9 +2219,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 16;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data;
- nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
+ nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes;
@@ -2167,10 +2255,9 @@ scan_tail:
if (err)
goto return_error;
- if (dev->of_node)
- mtd_device_register(mtd, NULL, 0);
- else
- mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
+ err = mtd_device_register(mtd, NULL, 0);
+ if (err)
+ goto return_error;
platform_set_drvdata(pdev, mtd);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 85cff68643e0..90b9a9ccbe60 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -30,6 +30,8 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_data/mtd-nand-pxa3xx.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
#define NAND_STOP_DELAY msecs_to_jiffies(40)
@@ -45,6 +47,10 @@
*/
#define INIT_BUFFER_SIZE 2048
+/* System control register and bit to enable NAND on some SoCs */
+#define GENCONF_SOC_DEVICE_MUX 0x208
+#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
+
/* registers and bit definitions */
#define NDCR (0x00) /* Control register */
#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
@@ -174,6 +180,7 @@ enum {
enum pxa3xx_nand_variant {
PXA3XX_NAND_VARIANT_PXA,
PXA3XX_NAND_VARIANT_ARMADA370,
+ PXA3XX_NAND_VARIANT_ARMADA_8K,
};
struct pxa3xx_nand_host {
@@ -425,6 +432,10 @@ static const struct of_device_id pxa3xx_nand_dt_ids[] = {
.compatible = "marvell,armada370-nand",
.data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
},
+ {
+ .compatible = "marvell,armada-8k-nand",
+ .data = (void *)PXA3XX_NAND_VARIANT_ARMADA_8K,
+ },
{}
};
MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
@@ -825,7 +836,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
info->retcode = ERR_UNCORERR;
if (status & NDSR_CORERR) {
info->retcode = ERR_CORERR;
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
+ if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) &&
info->ecc_bch)
info->ecc_err_cnt = NDSR_ERR_CNT(status);
else
@@ -888,7 +900,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
nand_writel(info, NDCB0, info->ndcb2);
/* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
nand_writel(info, NDCB0, info->ndcb3);
}
@@ -1671,7 +1684,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
chip->options |= NAND_BUSWIDTH_16;
/* Device detection must be done with ECC disabled */
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
nand_writel(info, NDECCCTRL, 0x0);
if (pdata->flash_bbt)
@@ -1709,7 +1723,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
* (aka splitted) command handling,
*/
if (mtd->writesize > PAGE_CHUNK_SIZE) {
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) {
chip->cmdfunc = nand_cmdfunc_extended;
} else {
dev_err(&info->pdev->dev,
@@ -1928,6 +1943,24 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
if (!of_id)
return 0;
+ /*
+ * Some SoCs like A7k/A8k need to enable manually the NAND
+ * controller to avoid being bootloader dependent. This is done
+ * through the use of a single bit in the System Functions registers.
+ */
+ if (pxa3xx_nand_get_variant(pdev) == PXA3XX_NAND_VARIANT_ARMADA_8K) {
+ struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(
+ pdev->dev.of_node, "marvell,system-controller");
+ u32 reg;
+
+ if (IS_ERR(sysctrl_base))
+ return PTR_ERR(sysctrl_base);
+
+ regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, &reg);
+ reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN;
+ regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
+ }
+
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index 3baddfc997d1..2656c1ac5646 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -22,6 +22,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/delay.h>
+#include <linux/dma/qcom_bam_dma.h>
/* NANDc reg offsets */
#define NAND_FLASH_CMD 0x00
@@ -199,6 +200,15 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
*/
#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
+/* Returns the NAND register physical address */
+#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
+
+/* Returns the dma address for reg read buffer */
+#define reg_buf_dma_addr(chip, vaddr) \
+ ((chip)->reg_read_dma + \
+ ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
+
+#define QPIC_PER_CW_CMD_ELEMENTS 32
#define QPIC_PER_CW_CMD_SGL 32
#define QPIC_PER_CW_DATA_SGL 8
@@ -221,8 +231,13 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
/*
* This data type corresponds to the BAM transaction which will be used for all
* NAND transfers.
+ * @bam_ce - the array of BAM command elements
* @cmd_sgl - sgl for NAND BAM command pipe
* @data_sgl - sgl for NAND BAM consumer/producer pipe
+ * @bam_ce_pos - the index in bam_ce which is available for next sgl
+ * @bam_ce_start - the index in bam_ce which marks the start position ce
+ * for current sgl. It will be used for size calculation
+ * for current sgl
* @cmd_sgl_pos - current index in command sgl.
* @cmd_sgl_start - start index in command sgl.
* @tx_sgl_pos - current index in data sgl for tx.
@@ -231,8 +246,11 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
* @rx_sgl_start - start index in data sgl for rx.
*/
struct bam_transaction {
+ struct bam_cmd_element *bam_ce;
struct scatterlist *cmd_sgl;
struct scatterlist *data_sgl;
+ u32 bam_ce_pos;
+ u32 bam_ce_start;
u32 cmd_sgl_pos;
u32 cmd_sgl_start;
u32 tx_sgl_pos;
@@ -307,7 +325,8 @@ struct nandc_regs {
* controller
* @dev: parent device
* @base: MMIO base
- * @base_dma: physical base address of controller registers
+ * @base_phys: physical base address of controller registers
+ * @base_dma: dma base address of controller registers
* @core_clk: controller clock
* @aon_clk: another controller clock
*
@@ -340,6 +359,7 @@ struct qcom_nand_controller {
struct device *dev;
void __iomem *base;
+ phys_addr_t base_phys;
dma_addr_t base_dma;
struct clk *core_clk;
@@ -462,7 +482,8 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn_size =
sizeof(*bam_txn) + num_cw *
- ((sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
+ ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
+ (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
(sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
@@ -472,6 +493,10 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn = bam_txn_buf;
bam_txn_buf += sizeof(*bam_txn);
+ bam_txn->bam_ce = bam_txn_buf;
+ bam_txn_buf +=
+ sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
+
bam_txn->cmd_sgl = bam_txn_buf;
bam_txn_buf +=
sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
@@ -489,6 +514,8 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
if (!nandc->props->is_bam)
return;
+ bam_txn->bam_ce_pos = 0;
+ bam_txn->bam_ce_start = 0;
bam_txn->cmd_sgl_pos = 0;
bam_txn->cmd_sgl_start = 0;
bam_txn->tx_sgl_pos = 0;
@@ -734,6 +761,66 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
}
/*
+ * Prepares the command descriptor for BAM DMA which will be used for NAND
+ * register reads and writes. The command descriptor requires the command
+ * to be formed in command element type so this function uses the command
+ * element from bam transaction ce array and fills the same with required
+ * data. A single SGL can contain multiple command elements so
+ * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
+ * after the current command element.
+ */
+static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr,
+ int size, unsigned int flags)
+{
+ int bam_ce_size;
+ int i, ret;
+ struct bam_cmd_element *bam_ce_buffer;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
+
+ /* fill the command desc */
+ for (i = 0; i < size; i++) {
+ if (read)
+ bam_prep_ce(&bam_ce_buffer[i],
+ nandc_reg_phys(nandc, reg_off + 4 * i),
+ BAM_READ_COMMAND,
+ reg_buf_dma_addr(nandc,
+ (__le32 *)vaddr + i));
+ else
+ bam_prep_ce_le32(&bam_ce_buffer[i],
+ nandc_reg_phys(nandc, reg_off + 4 * i),
+ BAM_WRITE_COMMAND,
+ *((__le32 *)vaddr + i));
+ }
+
+ bam_txn->bam_ce_pos += size;
+
+ /* use the separate sgl after this command */
+ if (flags & NAND_BAM_NEXT_SGL) {
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
+ bam_ce_size = (bam_txn->bam_ce_pos -
+ bam_txn->bam_ce_start) *
+ sizeof(struct bam_cmd_element);
+ sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
+ bam_ce_buffer, bam_ce_size);
+ bam_txn->cmd_sgl_pos++;
+ bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+
+ if (flags & NAND_BAM_NWD) {
+ ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_FENCE |
+ DMA_PREP_CMD);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
* Prepares the data descriptor for BAM DMA which will be used for NAND
* data reads and writes.
*/
@@ -851,19 +938,22 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
{
bool flow_control = false;
void *vaddr;
- int size;
- if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
- flow_control = true;
+ vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+ nandc->reg_read_pos += num_regs;
if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
first = dev_cmd_reg_addr(nandc, first);
- size = num_regs * sizeof(u32);
- vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
- nandc->reg_read_pos += num_regs;
+ if (nandc->props->is_bam)
+ return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+ flow_control = true;
- return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control);
+ return prep_adm_dma_desc(nandc, true, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
}
/*
@@ -880,13 +970,9 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
bool flow_control = false;
struct nandc_regs *regs = nandc->regs;
void *vaddr;
- int size;
vaddr = offset_to_nandc_reg(regs, first);
- if (first == NAND_FLASH_CMD)
- flow_control = true;
-
if (first == NAND_ERASED_CW_DETECT_CFG) {
if (flags & NAND_ERASED_CW_SET)
vaddr = &regs->erased_cw_detect_cfg_set;
@@ -903,10 +989,15 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
- size = num_regs * sizeof(u32);
+ if (nandc->props->is_bam)
+ return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_FLASH_CMD)
+ flow_control = true;
- return prep_adm_dma_desc(nandc, false, first, vaddr, size,
- flow_control);
+ return prep_adm_dma_desc(nandc, false, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
}
/*
@@ -1170,7 +1261,8 @@ static int submit_descs(struct qcom_nand_controller *nandc)
}
if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
- r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0);
+ r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_CMD);
if (r)
return r;
}
@@ -2705,6 +2797,7 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (IS_ERR(nandc->base))
return PTR_ERR(nandc->base);
+ nandc->base_phys = res->start;
nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
nandc->core_clk = devm_clk_get(dev, "core");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index e7f3c98487e6..3c5008a4f5f3 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -1094,14 +1094,11 @@ MODULE_DEVICE_TABLE(of, of_flctl_match);
static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
{
- const struct of_device_id *match;
- struct flctl_soc_config *config;
+ const struct flctl_soc_config *config;
struct sh_flctl_platform_data *pdata;
- match = of_match_device(of_flctl_match, dev);
- if (match)
- config = (struct flctl_soc_config *)match->data;
- else {
+ config = of_device_get_match_data(dev);
+ if (!config) {
dev_err(dev, "%s: no OF configuration attached\n", __func__);
return NULL;
}
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index d206b3c533bc..ee5ab994132f 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -6,3 +6,11 @@ config MTD_PARSER_TRX
may contain up to 3/4 partitions (depending on the version).
This driver will parse TRX header and report at least two partitions:
kernel and rootfs.
+
+config MTD_SHARPSL_PARTS
+ tristate "Sharp SL Series NAND flash partition parser"
+ depends on MTD_NAND_SHARPSL || MTD_NAND_TMIO || COMPILE_TEST
+ help
+ This provides the read-only FTL logic necessary to read the partition
+ table from the NAND flash of Sharp SL Series (Zaurus) and the MTD
+ partition parser using this code.
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile
index 4d9024e0be3b..5b1bcc3d90d9 100644
--- a/drivers/mtd/parsers/Makefile
+++ b/drivers/mtd/parsers/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o
+obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o
diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c
new file mode 100644
index 000000000000..5fe0079ea5ed
--- /dev/null
+++ b/drivers/mtd/parsers/sharpslpart.c
@@ -0,0 +1,398 @@
+/*
+ * sharpslpart.c - MTD partition parser for NAND flash using the SHARP FTL
+ * for logical addressing, as used on the PXA models of the SHARP SL Series.
+ *
+ * Copyright (C) 2017 Andrea Adami <andrea.adami@gmail.com>
+ *
+ * Based on SHARP GPL 2.4 sources:
+ * http://support.ezaurus.com/developer/source/source_dl.asp
+ * drivers/mtd/nand/sharp_sl_logical.c
+ * linux/include/asm-arm/sharp_nand_logical.h
+ *
+ * Copyright (C) 2002 SHARP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/sizes.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+/* oob structure */
+#define NAND_NOOB_LOGADDR_00 8
+#define NAND_NOOB_LOGADDR_01 9
+#define NAND_NOOB_LOGADDR_10 10
+#define NAND_NOOB_LOGADDR_11 11
+#define NAND_NOOB_LOGADDR_20 12
+#define NAND_NOOB_LOGADDR_21 13
+
+#define BLOCK_IS_RESERVED 0xffff
+#define BLOCK_UNMASK_COMPLEMENT 1
+
+/* factory defaults */
+#define SHARPSL_NAND_PARTS 3
+#define SHARPSL_FTL_PART_SIZE (7 * SZ_1M)
+#define SHARPSL_PARTINFO1_LADDR 0x00060000
+#define SHARPSL_PARTINFO2_LADDR 0x00064000
+
+#define BOOT_MAGIC 0x424f4f54
+#define FSRO_MAGIC 0x4653524f
+#define FSRW_MAGIC 0x46535257
+
+/**
+ * struct sharpsl_ftl - Sharp FTL Logical Table
+ * @logmax: number of logical blocks
+ * @log2phy: the logical-to-physical table
+ *
+ * Structure containing the logical-to-physical translation table
+ * used by the SHARP SL FTL.
+ */
+struct sharpsl_ftl {
+ unsigned int logmax;
+ unsigned int *log2phy;
+};
+
+/* verify that the OOB bytes 8 to 15 are free and available for the FTL */
+static int sharpsl_nand_check_ooblayout(struct mtd_info *mtd)
+{
+ u8 freebytes = 0;
+ int section = 0;
+
+ while (true) {
+ struct mtd_oob_region oobfree = { };
+ int ret, i;
+
+ ret = mtd_ooblayout_free(mtd, section++, &oobfree);
+ if (ret)
+ break;
+
+ if (!oobfree.length || oobfree.offset > 15 ||
+ (oobfree.offset + oobfree.length) < 8)
+ continue;
+
+ i = oobfree.offset >= 8 ? oobfree.offset : 8;
+ for (; i < oobfree.offset + oobfree.length && i < 16; i++)
+ freebytes |= BIT(i - 8);
+
+ if (freebytes == 0xff)
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int sharpsl_nand_read_oob(struct mtd_info *mtd, loff_t offs, u8 *buf)
+{
+ struct mtd_oob_ops ops = { };
+ int ret;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = buf;
+
+ ret = mtd_read_oob(mtd, offs, &ops);
+ if (ret != 0 || mtd->oobsize != ops.oobretlen)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * The logical block number assigned to a physical block is stored in the OOB
+ * of the first page, in 3 16-bit copies with the following layout:
+ *
+ * 01234567 89abcdef
+ * -------- --------
+ * ECC BB xyxyxy
+ *
+ * When reading we check that the first two copies agree.
+ * In case of error, matching is tried using the following pairs.
+ * Reserved values 0xffff mean the block is kept for wear leveling.
+ *
+ * 01234567 89abcdef
+ * -------- --------
+ * ECC BB xyxy oob[8]==oob[10] && oob[9]==oob[11] -> byte0=8 byte1=9
+ * ECC BB xyxy oob[10]==oob[12] && oob[11]==oob[13] -> byte0=10 byte1=11
+ * ECC BB xy xy oob[12]==oob[8] && oob[13]==oob[9] -> byte0=12 byte1=13
+ */
+static int sharpsl_nand_get_logical_num(u8 *oob)
+{
+ u16 us;
+ int good0, good1;
+
+ if (oob[NAND_NOOB_LOGADDR_00] == oob[NAND_NOOB_LOGADDR_10] &&
+ oob[NAND_NOOB_LOGADDR_01] == oob[NAND_NOOB_LOGADDR_11]) {
+ good0 = NAND_NOOB_LOGADDR_00;
+ good1 = NAND_NOOB_LOGADDR_01;
+ } else if (oob[NAND_NOOB_LOGADDR_10] == oob[NAND_NOOB_LOGADDR_20] &&
+ oob[NAND_NOOB_LOGADDR_11] == oob[NAND_NOOB_LOGADDR_21]) {
+ good0 = NAND_NOOB_LOGADDR_10;
+ good1 = NAND_NOOB_LOGADDR_11;
+ } else if (oob[NAND_NOOB_LOGADDR_20] == oob[NAND_NOOB_LOGADDR_00] &&
+ oob[NAND_NOOB_LOGADDR_21] == oob[NAND_NOOB_LOGADDR_01]) {
+ good0 = NAND_NOOB_LOGADDR_20;
+ good1 = NAND_NOOB_LOGADDR_21;
+ } else {
+ return -EINVAL;
+ }
+
+ us = oob[good0] | oob[good1] << 8;
+
+ /* parity check */
+ if (hweight16(us) & BLOCK_UNMASK_COMPLEMENT)
+ return -EINVAL;
+
+ /* reserved */
+ if (us == BLOCK_IS_RESERVED)
+ return BLOCK_IS_RESERVED;
+
+ return (us >> 1) & GENMASK(9, 0);
+}
+
+static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
+{
+ unsigned int block_num, log_num, phymax;
+ loff_t block_adr;
+ u8 *oob;
+ int i, ret;
+
+ oob = kzalloc(mtd->oobsize, GFP_KERNEL);
+ if (!oob)
+ return -ENOMEM;
+
+ phymax = mtd_div_by_eb(SHARPSL_FTL_PART_SIZE, mtd);
+
+ /* FTL reserves 5% of the blocks + 1 spare */
+ ftl->logmax = ((phymax * 95) / 100) - 1;
+
+ ftl->log2phy = kmalloc_array(ftl->logmax, sizeof(*ftl->log2phy),
+ GFP_KERNEL);
+ if (!ftl->log2phy) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* initialize ftl->log2phy */
+ for (i = 0; i < ftl->logmax; i++)
+ ftl->log2phy[i] = UINT_MAX;
+
+ /* create physical-logical table */
+ for (block_num = 0; block_num < phymax; block_num++) {
+ block_adr = block_num * mtd->erasesize;
+
+ if (mtd_block_isbad(mtd, block_adr))
+ continue;
+
+ if (sharpsl_nand_read_oob(mtd, block_adr, oob))
+ continue;
+
+ /* get logical block */
+ log_num = sharpsl_nand_get_logical_num(oob);
+
+ /* cut-off errors and skip the out-of-range values */
+ if (log_num > 0 && log_num < ftl->logmax) {
+ if (ftl->log2phy[log_num] == UINT_MAX)
+ ftl->log2phy[log_num] = block_num;
+ }
+ }
+
+ pr_info("Sharp SL FTL: %d blocks used (%d logical, %d reserved)\n",
+ phymax, ftl->logmax, phymax - ftl->logmax);
+
+ ret = 0;
+exit:
+ kfree(oob);
+ return ret;
+}
+
+void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
+{
+ kfree(ftl->log2phy);
+}
+
+static int sharpsl_nand_read_laddr(struct mtd_info *mtd,
+ loff_t from,
+ size_t len,
+ void *buf,
+ struct sharpsl_ftl *ftl)
+{
+ unsigned int log_num, final_log_num;
+ unsigned int block_num;
+ loff_t block_adr;
+ loff_t block_ofs;
+ size_t retlen;
+ int err;
+
+ log_num = mtd_div_by_eb((u32)from, mtd);
+ final_log_num = mtd_div_by_eb(((u32)from + len - 1), mtd);
+
+ if (len <= 0 || log_num >= ftl->logmax || final_log_num > log_num)
+ return -EINVAL;
+
+ block_num = ftl->log2phy[log_num];
+ block_adr = block_num * mtd->erasesize;
+ block_ofs = mtd_mod_by_eb((u32)from, mtd);
+
+ err = mtd_read(mtd, block_adr + block_ofs, len, &retlen, buf);
+ /* Ignore corrected ECC errors */
+ if (mtd_is_bitflip(err))
+ err = 0;
+
+ if (!err && retlen != len)
+ err = -EIO;
+
+ if (err)
+ pr_err("sharpslpart: error, read failed at %#llx\n",
+ block_adr + block_ofs);
+
+ return err;
+}
+
+/*
+ * MTD Partition Parser
+ *
+ * Sample values read from SL-C860
+ *
+ * # cat /proc/mtd
+ * dev: size erasesize name
+ * mtd0: 006d0000 00020000 "Filesystem"
+ * mtd1: 00700000 00004000 "smf"
+ * mtd2: 03500000 00004000 "root"
+ * mtd3: 04400000 00004000 "home"
+ *
+ * PARTITIONINFO1
+ * 0x00060000: 00 00 00 00 00 00 70 00 42 4f 4f 54 00 00 00 00 ......p.BOOT....
+ * 0x00060010: 00 00 70 00 00 00 c0 03 46 53 52 4f 00 00 00 00 ..p.....FSRO....
+ * 0x00060020: 00 00 c0 03 00 00 00 04 46 53 52 57 00 00 00 00 ........FSRW....
+ */
+struct sharpsl_nand_partinfo {
+ __le32 start;
+ __le32 end;
+ __be32 magic;
+ u32 reserved;
+};
+
+static int sharpsl_nand_read_partinfo(struct mtd_info *master,
+ loff_t from,
+ size_t len,
+ struct sharpsl_nand_partinfo *buf,
+ struct sharpsl_ftl *ftl)
+{
+ int ret;
+
+ ret = sharpsl_nand_read_laddr(master, from, len, buf, ftl);
+ if (ret)
+ return ret;
+
+ /* check for magics */
+ if (be32_to_cpu(buf[0].magic) != BOOT_MAGIC ||
+ be32_to_cpu(buf[1].magic) != FSRO_MAGIC ||
+ be32_to_cpu(buf[2].magic) != FSRW_MAGIC) {
+ pr_err("sharpslpart: magic values mismatch\n");
+ return -EINVAL;
+ }
+
+ /* fixup for hardcoded value 64 MiB (for older models) */
+ buf[2].end = cpu_to_le32(master->size);
+
+ /* extra sanity check */
+ if (le32_to_cpu(buf[0].end) <= le32_to_cpu(buf[0].start) ||
+ le32_to_cpu(buf[1].start) < le32_to_cpu(buf[0].end) ||
+ le32_to_cpu(buf[1].end) <= le32_to_cpu(buf[1].start) ||
+ le32_to_cpu(buf[2].start) < le32_to_cpu(buf[1].end) ||
+ le32_to_cpu(buf[2].end) <= le32_to_cpu(buf[2].start)) {
+ pr_err("sharpslpart: partition sizes mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sharpsl_parse_mtd_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct sharpsl_ftl ftl;
+ struct sharpsl_nand_partinfo buf[SHARPSL_NAND_PARTS];
+ struct mtd_partition *sharpsl_nand_parts;
+ int err;
+
+ /* check that OOB bytes 8 to 15 used by the FTL are actually free */
+ err = sharpsl_nand_check_ooblayout(master);
+ if (err)
+ return err;
+
+ /* init logical mgmt (FTL) */
+ err = sharpsl_nand_init_ftl(master, &ftl);
+ if (err)
+ return err;
+
+ /* read and validate first partition table */
+ pr_info("sharpslpart: try reading first partition table\n");
+ err = sharpsl_nand_read_partinfo(master,
+ SHARPSL_PARTINFO1_LADDR,
+ sizeof(buf), buf, &ftl);
+ if (err) {
+ /* fallback: read second partition table */
+ pr_warn("sharpslpart: first partition table is invalid, retry using the second\n");
+ err = sharpsl_nand_read_partinfo(master,
+ SHARPSL_PARTINFO2_LADDR,
+ sizeof(buf), buf, &ftl);
+ }
+
+ /* cleanup logical mgmt (FTL) */
+ sharpsl_nand_cleanup_ftl(&ftl);
+
+ if (err) {
+ pr_err("sharpslpart: both partition tables are invalid\n");
+ return err;
+ }
+
+ sharpsl_nand_parts = kzalloc(sizeof(*sharpsl_nand_parts) *
+ SHARPSL_NAND_PARTS, GFP_KERNEL);
+ if (!sharpsl_nand_parts)
+ return -ENOMEM;
+
+ /* original names */
+ sharpsl_nand_parts[0].name = "smf";
+ sharpsl_nand_parts[0].offset = le32_to_cpu(buf[0].start);
+ sharpsl_nand_parts[0].size = le32_to_cpu(buf[0].end) -
+ le32_to_cpu(buf[0].start);
+
+ sharpsl_nand_parts[1].name = "root";
+ sharpsl_nand_parts[1].offset = le32_to_cpu(buf[1].start);
+ sharpsl_nand_parts[1].size = le32_to_cpu(buf[1].end) -
+ le32_to_cpu(buf[1].start);
+
+ sharpsl_nand_parts[2].name = "home";
+ sharpsl_nand_parts[2].offset = le32_to_cpu(buf[2].start);
+ sharpsl_nand_parts[2].size = le32_to_cpu(buf[2].end) -
+ le32_to_cpu(buf[2].start);
+
+ *pparts = sharpsl_nand_parts;
+ return SHARPSL_NAND_PARTS;
+}
+
+static struct mtd_part_parser sharpsl_mtd_parser = {
+ .parse_fn = sharpsl_parse_mtd_partitions,
+ .name = "sharpslpart",
+};
+module_mtd_part_parser(sharpsl_mtd_parser);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrea Adami <andrea.adami@gmail.com>");
+MODULE_DESCRIPTION("MTD partitioning for NAND flash on Sharp SL Series");
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 69c638dd0484..89da88e59121 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -50,7 +50,7 @@ config SPI_ATMEL_QUADSPI
config SPI_CADENCE_QUADSPI
tristate "Cadence Quad SPI controller"
- depends on OF && (ARM || COMPILE_TEST)
+ depends on OF && (ARM || ARM64 || COMPILE_TEST)
help
Enable support for the Cadence Quad SPI Flash controller.
@@ -90,7 +90,7 @@ config SPI_INTEL_SPI
tristate
config SPI_INTEL_SPI_PCI
- tristate "Intel PCH/PCU SPI flash PCI driver" if EXPERT
+ tristate "Intel PCH/PCU SPI flash PCI driver"
depends on X86 && PCI
select SPI_INTEL_SPI
help
@@ -106,7 +106,7 @@ config SPI_INTEL_SPI_PCI
will be called intel-spi-pci.
config SPI_INTEL_SPI_PLATFORM
- tristate "Intel PCH/PCU SPI flash platform driver" if EXPERT
+ tristate "Intel PCH/PCU SPI flash platform driver"
depends on X86
select SPI_INTEL_SPI
help
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 53c7d8e0327a..75a2bc447a99 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -31,6 +31,7 @@
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/spi/spi.h>
#include <linux/timer.h>
@@ -38,6 +39,9 @@
#define CQSPI_NAME "cadence-qspi"
#define CQSPI_MAX_CHIPSELECT 16
+/* Quirks */
+#define CQSPI_NEEDS_WR_DELAY BIT(0)
+
struct cqspi_st;
struct cqspi_flash_pdata {
@@ -75,7 +79,9 @@ struct cqspi_st {
bool is_decoded_cs;
u32 fifo_depth;
u32 fifo_width;
+ bool rclk_en;
u32 trigger_address;
+ u32 wr_delay;
struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
};
@@ -608,6 +614,15 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor,
reinit_completion(&cqspi->transfer_complete);
writel(CQSPI_REG_INDIRECTWR_START_MASK,
reg_base + CQSPI_REG_INDIRECTWR);
+ /*
+ * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
+ * Controller programming sequence, couple of cycles of
+ * QSPI_REF_CLK delay is required for the above bit to
+ * be internally synchronized by the QSPI module. Provide 5
+ * cycles of delay.
+ */
+ if (cqspi->wr_delay)
+ ndelay(cqspi->wr_delay);
while (remaining > 0) {
write_bytes = remaining > page_size ? page_size : remaining;
@@ -775,7 +790,7 @@ static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
}
static void cqspi_readdata_capture(struct cqspi_st *cqspi,
- const unsigned int bypass,
+ const bool bypass,
const unsigned int delay)
{
void __iomem *reg_base = cqspi->iobase;
@@ -839,7 +854,8 @@ static void cqspi_configure(struct spi_nor *nor)
cqspi->sclk = sclk;
cqspi_config_baudrate_div(cqspi);
cqspi_delay(nor);
- cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay);
+ cqspi_readdata_capture(cqspi, !cqspi->rclk_en,
+ f_pdata->read_delay);
}
if (switch_cs || switch_ck)
@@ -1036,6 +1052,8 @@ static int cqspi_of_get_pdata(struct platform_device *pdev)
return -ENXIO;
}
+ cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
+
return 0;
}
@@ -1156,6 +1174,7 @@ static int cqspi_probe(struct platform_device *pdev)
struct cqspi_st *cqspi;
struct resource *res;
struct resource *res_ahb;
+ unsigned long data;
int ret;
int irq;
@@ -1206,13 +1225,24 @@ static int cqspi_probe(struct platform_device *pdev)
return -ENXIO;
}
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
ret = clk_prepare_enable(cqspi->clk);
if (ret) {
dev_err(dev, "Cannot enable QSPI clock.\n");
- return ret;
+ goto probe_clk_failed;
}
cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
+ data = (unsigned long)of_device_get_match_data(dev);
+ if (data & CQSPI_NEEDS_WR_DELAY)
+ cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC,
+ cqspi->master_ref_clk_hz);
ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
pdev->name, cqspi);
@@ -1233,10 +1263,13 @@ static int cqspi_probe(struct platform_device *pdev)
}
return ret;
-probe_irq_failed:
- cqspi_controller_enable(cqspi, 0);
probe_setup_failed:
+ cqspi_controller_enable(cqspi, 0);
+probe_irq_failed:
clk_disable_unprepare(cqspi->clk);
+probe_clk_failed:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return ret;
}
@@ -1253,6 +1286,9 @@ static int cqspi_remove(struct platform_device *pdev)
clk_disable_unprepare(cqspi->clk);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
return 0;
}
@@ -1284,7 +1320,14 @@ static const struct dev_pm_ops cqspi__dev_pm_ops = {
#endif
static const struct of_device_id cqspi_dt_ids[] = {
- {.compatible = "cdns,qspi-nor",},
+ {
+ .compatible = "cdns,qspi-nor",
+ .data = (void *)0,
+ },
+ {
+ .compatible = "ti,k2g-qspi",
+ .data = (void *)CQSPI_NEEDS_WR_DELAY,
+ },
{ /* end of table */ }
};
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c
index e82652335ede..c0976f2e3dd1 100644
--- a/drivers/mtd/spi-nor/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/intel-spi-pci.c
@@ -63,7 +63,10 @@ static void intel_spi_pci_remove(struct pci_dev *pdev)
}
static const struct pci_device_id intel_spi_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
{ },
};
MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index 8a596bfeddff..ef034d898a23 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -67,8 +67,6 @@
#define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT)
#define PR_RPE BIT(15)
#define PR_BASE_MASK 0x3fff
-/* Last PR is GPR0 */
-#define PR_NUM (5 + 1)
/* Offsets are from @ispi->sregs */
#define SSFSTS_CTL 0x00
@@ -90,20 +88,35 @@
#define OPMENU0 0x08
#define OPMENU1 0x0c
+#define OPTYPE_READ_NO_ADDR 0
+#define OPTYPE_WRITE_NO_ADDR 1
+#define OPTYPE_READ_WITH_ADDR 2
+#define OPTYPE_WRITE_WITH_ADDR 3
+
/* CPU specifics */
#define BYT_PR 0x74
#define BYT_SSFSTS_CTL 0x90
#define BYT_BCR 0xfc
#define BYT_BCR_WPD BIT(0)
#define BYT_FREG_NUM 5
+#define BYT_PR_NUM 5
#define LPT_PR 0x74
#define LPT_SSFSTS_CTL 0x90
#define LPT_FREG_NUM 5
+#define LPT_PR_NUM 5
#define BXT_PR 0x84
#define BXT_SSFSTS_CTL 0xa0
#define BXT_FREG_NUM 12
+#define BXT_PR_NUM 6
+
+#define LVSCC 0xc4
+#define UVSCC 0xc8
+#define ERASE_OPCODE_SHIFT 8
+#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
+#define ERASE_64K_OPCODE_SHIFT 16
+#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
#define INTEL_SPI_TIMEOUT 5000 /* ms */
#define INTEL_SPI_FIFO_SZ 64
@@ -117,8 +130,11 @@
* @pregs: Start of protection registers
* @sregs: Start of software sequencer registers
* @nregions: Maximum number of regions
+ * @pr_num: Maximum number of protected range registers
* @writeable: Is the chip writeable
- * @swseq: Use SW sequencer in register reads/writes
+ * @locked: Is SPI setting locked
+ * @swseq_reg: Use SW sequencer in register reads/writes
+ * @swseq_erase: Use SW sequencer in erase operation
* @erase_64k: 64k erase supported
* @opcodes: Opcodes which are supported. This are programmed by BIOS
* before it locks down the controller.
@@ -132,8 +148,11 @@ struct intel_spi {
void __iomem *pregs;
void __iomem *sregs;
size_t nregions;
+ size_t pr_num;
bool writeable;
- bool swseq;
+ bool locked;
+ bool swseq_reg;
+ bool swseq_erase;
bool erase_64k;
u8 opcodes[8];
u8 preopcodes[2];
@@ -167,7 +186,7 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
for (i = 0; i < ispi->nregions; i++)
dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i,
readl(ispi->base + FREG(i)));
- for (i = 0; i < PR_NUM; i++)
+ for (i = 0; i < ispi->pr_num; i++)
dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i,
readl(ispi->pregs + PR(i)));
@@ -181,8 +200,11 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
if (ispi->info->type == INTEL_SPI_BYT)
dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR));
+ dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC));
+ dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC));
+
dev_dbg(ispi->dev, "Protected regions:\n");
- for (i = 0; i < PR_NUM; i++) {
+ for (i = 0; i < ispi->pr_num; i++) {
u32 base, limit;
value = readl(ispi->pregs + PR(i));
@@ -214,7 +236,9 @@ static void intel_spi_dump_regs(struct intel_spi *ispi)
}
dev_dbg(ispi->dev, "Using %cW sequencer for register access\n",
- ispi->swseq ? 'S' : 'H');
+ ispi->swseq_reg ? 'S' : 'H');
+ dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n",
+ ispi->swseq_erase ? 'S' : 'H');
}
/* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
@@ -278,7 +302,7 @@ static int intel_spi_wait_sw_busy(struct intel_spi *ispi)
static int intel_spi_init(struct intel_spi *ispi)
{
- u32 opmenu0, opmenu1, val;
+ u32 opmenu0, opmenu1, lvscc, uvscc, val;
int i;
switch (ispi->info->type) {
@@ -286,6 +310,8 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->sregs = ispi->base + BYT_SSFSTS_CTL;
ispi->pregs = ispi->base + BYT_PR;
ispi->nregions = BYT_FREG_NUM;
+ ispi->pr_num = BYT_PR_NUM;
+ ispi->swseq_reg = true;
if (writeable) {
/* Disable write protection */
@@ -305,12 +331,15 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->sregs = ispi->base + LPT_SSFSTS_CTL;
ispi->pregs = ispi->base + LPT_PR;
ispi->nregions = LPT_FREG_NUM;
+ ispi->pr_num = LPT_PR_NUM;
+ ispi->swseq_reg = true;
break;
case INTEL_SPI_BXT:
ispi->sregs = ispi->base + BXT_SSFSTS_CTL;
ispi->pregs = ispi->base + BXT_PR;
ispi->nregions = BXT_FREG_NUM;
+ ispi->pr_num = BXT_PR_NUM;
ispi->erase_64k = true;
break;
@@ -318,42 +347,64 @@ static int intel_spi_init(struct intel_spi *ispi)
return -EINVAL;
}
- /* Disable #SMI generation */
+ /* Disable #SMI generation from HW sequencer */
val = readl(ispi->base + HSFSTS_CTL);
val &= ~HSFSTS_CTL_FSMIE;
writel(val, ispi->base + HSFSTS_CTL);
/*
- * BIOS programs allowed opcodes and then locks down the register.
- * So read back what opcodes it decided to support. That's the set
- * we are going to support as well.
+ * Determine whether erase operation should use HW or SW sequencer.
+ *
+ * The HW sequencer has a predefined list of opcodes, with only the
+ * erase opcode being programmable in LVSCC and UVSCC registers.
+ * If these registers don't contain a valid erase opcode, erase
+ * cannot be done using HW sequencer.
*/
- opmenu0 = readl(ispi->sregs + OPMENU0);
- opmenu1 = readl(ispi->sregs + OPMENU1);
+ lvscc = readl(ispi->base + LVSCC);
+ uvscc = readl(ispi->base + UVSCC);
+ if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK))
+ ispi->swseq_erase = true;
+ /* SPI controller on Intel BXT supports 64K erase opcode */
+ if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase)
+ if (!(lvscc & ERASE_64K_OPCODE_MASK) ||
+ !(uvscc & ERASE_64K_OPCODE_MASK))
+ ispi->erase_64k = false;
/*
* Some controllers can only do basic operations using hardware
* sequencer. All other operations are supposed to be carried out
- * using software sequencer. If we find that BIOS has programmed
- * opcodes for the software sequencer we use that over the hardware
- * sequencer.
+ * using software sequencer.
*/
- if (opmenu0 && opmenu1) {
- for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
- ispi->opcodes[i] = opmenu0 >> i * 8;
- ispi->opcodes[i + 4] = opmenu1 >> i * 8;
- }
-
- val = readl(ispi->sregs + PREOP_OPTYPE);
- ispi->preopcodes[0] = val;
- ispi->preopcodes[1] = val >> 8;
-
+ if (ispi->swseq_reg) {
/* Disable #SMI generation from SW sequencer */
val = readl(ispi->sregs + SSFSTS_CTL);
val &= ~SSFSTS_CTL_FSMIE;
writel(val, ispi->sregs + SSFSTS_CTL);
+ }
+
+ /* Check controller's lock status */
+ val = readl(ispi->base + HSFSTS_CTL);
+ ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN);
+
+ if (ispi->locked) {
+ /*
+ * BIOS programs allowed opcodes and then locks down the
+ * register. So read back what opcodes it decided to support.
+ * That's the set we are going to support as well.
+ */
+ opmenu0 = readl(ispi->sregs + OPMENU0);
+ opmenu1 = readl(ispi->sregs + OPMENU1);
- ispi->swseq = true;
+ if (opmenu0 && opmenu1) {
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) {
+ ispi->opcodes[i] = opmenu0 >> i * 8;
+ ispi->opcodes[i + 4] = opmenu1 >> i * 8;
+ }
+
+ val = readl(ispi->sregs + PREOP_OPTYPE);
+ ispi->preopcodes[0] = val;
+ ispi->preopcodes[1] = val >> 8;
+ }
}
intel_spi_dump_regs(ispi);
@@ -361,18 +412,28 @@ static int intel_spi_init(struct intel_spi *ispi)
return 0;
}
-static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode)
+static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype)
{
int i;
+ int preop;
- for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
- if (ispi->opcodes[i] == opcode)
- return i;
- return -EINVAL;
+ if (ispi->locked) {
+ for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++)
+ if (ispi->opcodes[i] == opcode)
+ return i;
+
+ return -EINVAL;
+ }
+
+ /* The lock is off, so just use index 0 */
+ writel(opcode, ispi->sregs + OPMENU0);
+ preop = readw(ispi->sregs + PREOP_OPTYPE);
+ writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE);
+
+ return 0;
}
-static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
- int len)
+static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len)
{
u32 val, status;
int ret;
@@ -394,6 +455,9 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
return -EINVAL;
}
+ if (len > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT;
val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE;
val |= HSFSTS_CTL_FGO;
@@ -412,27 +476,39 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
return 0;
}
-static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf,
- int len)
+static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
+ int optype)
{
- u32 val, status;
+ u32 val = 0, status;
+ u16 preop;
int ret;
- ret = intel_spi_opcode_index(ispi, opcode);
+ ret = intel_spi_opcode_index(ispi, opcode, optype);
if (ret < 0)
return ret;
- val = (len << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
+ if (len > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
+ /* Only mark 'Data Cycle' bit when there is data to be transferred */
+ if (len > 0)
+ val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
val |= ret << SSFSTS_CTL_COP_SHIFT;
val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
val |= SSFSTS_CTL_SCGO;
+ preop = readw(ispi->sregs + PREOP_OPTYPE);
+ if (preop) {
+ val |= SSFSTS_CTL_ACS;
+ if (preop >> 8)
+ val |= SSFSTS_CTL_SPOP;
+ }
writel(val, ispi->sregs + SSFSTS_CTL);
ret = intel_spi_wait_sw_busy(ispi);
if (ret)
return ret;
- status = readl(ispi->base + SSFSTS_CTL);
+ status = readl(ispi->sregs + SSFSTS_CTL);
if (status & SSFSTS_CTL_FCERR)
return -EIO;
else if (status & SSFSTS_CTL_AEL)
@@ -449,10 +525,11 @@ static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
/* Address of the first chip */
writel(0, ispi->base + FADDR);
- if (ispi->swseq)
- ret = intel_spi_sw_cycle(ispi, opcode, buf, len);
+ if (ispi->swseq_reg)
+ ret = intel_spi_sw_cycle(ispi, opcode, len,
+ OPTYPE_READ_NO_ADDR);
else
- ret = intel_spi_hw_cycle(ispi, opcode, buf, len);
+ ret = intel_spi_hw_cycle(ispi, opcode, len);
if (ret)
return ret;
@@ -467,10 +544,15 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
/*
* This is handled with atomic operation and preop code in Intel
- * controller so skip it here now.
+ * controller so skip it here now. If the controller is not locked,
+ * program the opcode to the PREOP register for later use.
*/
- if (opcode == SPINOR_OP_WREN)
+ if (opcode == SPINOR_OP_WREN) {
+ if (!ispi->locked)
+ writel(opcode, ispi->sregs + PREOP_OPTYPE);
+
return 0;
+ }
writel(0, ispi->base + FADDR);
@@ -479,9 +561,10 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
if (ret)
return ret;
- if (ispi->swseq)
- return intel_spi_sw_cycle(ispi, opcode, buf, len);
- return intel_spi_hw_cycle(ispi, opcode, buf, len);
+ if (ispi->swseq_reg)
+ return intel_spi_sw_cycle(ispi, opcode, len,
+ OPTYPE_WRITE_NO_ADDR);
+ return intel_spi_hw_cycle(ispi, opcode, len);
}
static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
@@ -561,12 +644,6 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT;
val |= HSFSTS_CTL_FCYCLE_WRITE;
- /* Write enable */
- if (ispi->preopcodes[1] == SPINOR_OP_WREN)
- val |= SSFSTS_CTL_SPOP;
- val |= SSFSTS_CTL_ACS;
- writel(val, ispi->base + HSFSTS_CTL);
-
ret = intel_spi_write_block(ispi, write_buf, block_size);
if (ret) {
dev_err(ispi->dev, "failed to write block\n");
@@ -574,8 +651,8 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
}
/* Start the write now */
- val = readl(ispi->base + HSFSTS_CTL);
- writel(val | HSFSTS_CTL_FGO, ispi->base + HSFSTS_CTL);
+ val |= HSFSTS_CTL_FGO;
+ writel(val, ispi->base + HSFSTS_CTL);
ret = intel_spi_wait_hw_busy(ispi);
if (ret) {
@@ -620,6 +697,22 @@ static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
erase_size = SZ_4K;
}
+ if (ispi->swseq_erase) {
+ while (len > 0) {
+ writel(offs, ispi->base + FADDR);
+
+ ret = intel_spi_sw_cycle(ispi, nor->erase_opcode,
+ 0, OPTYPE_WRITE_WITH_ADDR);
+ if (ret)
+ return ret;
+
+ offs += erase_size;
+ len -= erase_size;
+ }
+
+ return 0;
+ }
+
while (len > 0) {
writel(offs, ispi->base + FADDR);
@@ -652,7 +745,7 @@ static bool intel_spi_is_protected(const struct intel_spi *ispi,
{
int i;
- for (i = 0; i < PR_NUM; i++) {
+ for (i = 0; i < ispi->pr_num; i++) {
u32 pr_base, pr_limit, pr_value;
pr_value = readl(ispi->pregs + PR(i));
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index c258c7adf1c5..abe455ccd68b 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -404,6 +404,29 @@ static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
return ret;
}
+static void mt8173_nor_disable_clk(struct mt8173_nor *mt8173_nor)
+{
+ clk_disable_unprepare(mt8173_nor->spi_clk);
+ clk_disable_unprepare(mt8173_nor->nor_clk);
+}
+
+static int mt8173_nor_enable_clk(struct mt8173_nor *mt8173_nor)
+{
+ int ret;
+
+ ret = clk_prepare_enable(mt8173_nor->spi_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(mt8173_nor->nor_clk);
+ if (ret) {
+ clk_disable_unprepare(mt8173_nor->spi_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
struct device_node *flash_node)
{
@@ -468,15 +491,11 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
return PTR_ERR(mt8173_nor->nor_clk);
mt8173_nor->dev = &pdev->dev;
- ret = clk_prepare_enable(mt8173_nor->spi_clk);
+
+ ret = mt8173_nor_enable_clk(mt8173_nor);
if (ret)
return ret;
- ret = clk_prepare_enable(mt8173_nor->nor_clk);
- if (ret) {
- clk_disable_unprepare(mt8173_nor->spi_clk);
- return ret;
- }
/* only support one attached flash */
flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
if (!flash_np) {
@@ -487,10 +506,9 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
ret = mtk_nor_init(mt8173_nor, flash_np);
nor_free:
- if (ret) {
- clk_disable_unprepare(mt8173_nor->spi_clk);
- clk_disable_unprepare(mt8173_nor->nor_clk);
- }
+ if (ret)
+ mt8173_nor_disable_clk(mt8173_nor);
+
return ret;
}
@@ -498,11 +516,38 @@ static int mtk_nor_drv_remove(struct platform_device *pdev)
{
struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev);
- clk_disable_unprepare(mt8173_nor->spi_clk);
- clk_disable_unprepare(mt8173_nor->nor_clk);
+ mt8173_nor_disable_clk(mt8173_nor);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_nor_suspend(struct device *dev)
+{
+ struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+
+ mt8173_nor_disable_clk(mt8173_nor);
+
return 0;
}
+static int mtk_nor_resume(struct device *dev)
+{
+ struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+
+ return mt8173_nor_enable_clk(mt8173_nor);
+}
+
+static const struct dev_pm_ops mtk_nor_dev_pm_ops = {
+ .suspend = mtk_nor_suspend,
+ .resume = mtk_nor_resume,
+};
+
+#define MTK_NOR_DEV_PM_OPS (&mtk_nor_dev_pm_ops)
+#else
+#define MTK_NOR_DEV_PM_OPS NULL
+#endif
+
static const struct of_device_id mtk_nor_of_ids[] = {
{ .compatible = "mediatek,mt8173-nor"},
{ /* sentinel */ }
@@ -514,6 +559,7 @@ static struct platform_driver mtk_nor_driver = {
.remove = mtk_nor_drv_remove,
.driver = {
.name = "mtk-nor",
+ .pm = MTK_NOR_DEV_PM_OPS,
.of_match_table = mtk_nor_of_ids,
},
};
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 19c000722cbc..bc266f70a15b 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -89,6 +89,8 @@ struct flash_info {
#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
#define USE_CLSR BIT(14) /* use CLSR command */
+
+ int (*quad_enable)(struct spi_nor *nor);
};
#define JEDEC_MFR(info) ((info)->id[0])
@@ -870,6 +872,8 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
+static int macronix_quad_enable(struct spi_nor *nor);
+
/* Used when the "_ext_id" is two bytes at most */
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
.id = { \
@@ -964,6 +968,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
/* Everspin */
+ { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
{ "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
@@ -983,6 +988,11 @@ static const struct flash_info spi_nor_ids[] = {
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
{
+ "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
"gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
@@ -997,6 +1007,12 @@ static const struct flash_info spi_nor_ids[] = {
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
+ {
+ "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ .quad_enable = macronix_quad_enable,
+ },
/* Intel/Numonyx -- xxxs33b */
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
@@ -1024,7 +1040,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
- { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
{ "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
@@ -1137,6 +1153,11 @@ static const struct flash_info spi_nor_ids[] = {
{ "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
{ "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
+ {
+ "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
{ "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
@@ -2288,8 +2309,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
/* Check the SFDP header version. */
if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
- header.major != SFDP_JESD216_MAJOR ||
- header.minor < SFDP_JESD216_MINOR)
+ header.major != SFDP_JESD216_MAJOR)
return -EINVAL;
/*
@@ -2427,6 +2447,15 @@ static int spi_nor_init_params(struct spi_nor *nor,
params->quad_enable = spansion_quad_enable;
break;
}
+
+ /*
+ * Some manufacturer like GigaDevice may use different
+ * bit to set QE on different memories, so the MFR can't
+ * indicate the quad_enable method for this case, we need
+ * set it in flash info list.
+ */
+ if (info->quad_enable)
+ params->quad_enable = info->quad_enable;
}
/* Override the parameters with data read from SFDP tables. */
@@ -2630,17 +2659,60 @@ static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info,
/* Enable Quad I/O if needed. */
enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 ||
spi_nor_get_protocol_width(nor->write_proto) == 4);
- if (enable_quad_io && params->quad_enable) {
- err = params->quad_enable(nor);
+ if (enable_quad_io && params->quad_enable)
+ nor->quad_enable = params->quad_enable;
+ else
+ nor->quad_enable = NULL;
+
+ return 0;
+}
+
+static int spi_nor_init(struct spi_nor *nor)
+{
+ int err;
+
+ /*
+ * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
+ * with the software protection bits set
+ */
+ if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
+ JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
+ JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
+ nor->info->flags & SPI_NOR_HAS_LOCK) {
+ write_enable(nor);
+ write_sr(nor, 0);
+ spi_nor_wait_till_ready(nor);
+ }
+
+ if (nor->quad_enable) {
+ err = nor->quad_enable(nor);
if (err) {
dev_err(nor->dev, "quad mode not supported\n");
return err;
}
}
+ if ((nor->addr_width == 4) &&
+ (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
+ !(nor->info->flags & SPI_NOR_4B_OPCODES))
+ set_4byte(nor, nor->info, 1);
+
return 0;
}
+/* mtd resume handler */
+static void spi_nor_resume(struct mtd_info *mtd)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ struct device *dev = nor->dev;
+ int ret;
+
+ /* re-initialize the nor chip */
+ ret = spi_nor_init(nor);
+ if (ret)
+ dev_err(dev, "resume() failed\n");
+}
+
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
@@ -2708,20 +2780,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (ret)
return ret;
- /*
- * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
- * with the software protection bits set
- */
-
- if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
- JEDEC_MFR(info) == SNOR_MFR_INTEL ||
- JEDEC_MFR(info) == SNOR_MFR_SST ||
- info->flags & SPI_NOR_HAS_LOCK) {
- write_enable(nor);
- write_sr(nor, 0);
- spi_nor_wait_till_ready(nor);
- }
-
if (!mtd->name)
mtd->name = dev_name(dev);
mtd->priv = nor;
@@ -2731,6 +2789,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
mtd->size = params.size;
mtd->_erase = spi_nor_erase;
mtd->_read = spi_nor_read;
+ mtd->_resume = spi_nor_resume;
/* NOR protection support for STmicro/Micron chips and similar */
if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
@@ -2804,8 +2863,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
info->flags & SPI_NOR_4B_OPCODES)
spi_nor_set_4byte_opcodes(nor, info);
- else
- set_4byte(nor, info, 1);
} else {
nor->addr_width = 3;
}
@@ -2822,6 +2879,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
return ret;
}
+ /* Send all the required SPI flash commands to initialize device */
+ nor->info = info;
+ ret = spi_nor_init(nor);
+ if (ret)
+ return ret;
+
dev_info(dev, "%s (%lld Kbytes)\n", info->name,
(long long)mtd->size >> 10);
diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c
index 86c0931543c5..b3c7f6addba7 100644
--- a/drivers/mtd/spi-nor/stm32-quadspi.c
+++ b/drivers/mtd/spi-nor/stm32-quadspi.c
@@ -1,9 +1,22 @@
/*
- * stm32_quadspi.c
+ * Driver for stm32 quadspi controller
*
- * Copyright (C) 2017, Ludovic Barre
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Ludovic Barre author <ludovic.barre@st.com>.
*
- * License terms: GNU General Public License (GPL), version 2
+ * License terms: GPL V2.0.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * This program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
#include <linux/errno.h>
@@ -113,6 +126,7 @@
#define STM32_MAX_MMAP_SZ SZ_256M
#define STM32_MAX_NORCHIP 2
+#define STM32_QSPI_FIFO_SZ 32
#define STM32_QSPI_FIFO_TIMEOUT_US 30000
#define STM32_QSPI_BUSY_TIMEOUT_US 100000
@@ -124,6 +138,7 @@ struct stm32_qspi_flash {
u32 presc;
u32 read_mode;
bool registered;
+ u32 prefetch_limit;
};
struct stm32_qspi {
@@ -240,12 +255,12 @@ static int stm32_qspi_tx_poll(struct stm32_qspi *qspi,
STM32_QSPI_FIFO_TIMEOUT_US);
if (ret) {
dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr);
- break;
+ return ret;
}
tx_fifo(buf++, qspi->io_base + QUADSPI_DR);
}
- return ret;
+ return 0;
}
static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
@@ -272,6 +287,7 @@ static int stm32_qspi_send(struct stm32_qspi_flash *flash,
{
struct stm32_qspi *qspi = flash->qspi;
u32 ccr, dcr, cr;
+ u32 last_byte;
int err;
err = stm32_qspi_wait_nobusy(qspi);
@@ -314,6 +330,10 @@ static int stm32_qspi_send(struct stm32_qspi_flash *flash,
if (err)
goto abort;
writel_relaxed(FCR_CTCF, qspi->io_base + QUADSPI_FCR);
+ } else {
+ last_byte = cmd->addr + cmd->len;
+ if (last_byte > flash->prefetch_limit)
+ goto abort;
}
return err;
@@ -322,7 +342,9 @@ abort:
cr = readl_relaxed(qspi->io_base + QUADSPI_CR) | CR_ABORT;
writel_relaxed(cr, qspi->io_base + QUADSPI_CR);
- dev_err(qspi->dev, "%s abort err:%d\n", __func__, err);
+ if (err)
+ dev_err(qspi->dev, "%s abort err:%d\n", __func__, err);
+
return err;
}
@@ -550,6 +572,7 @@ static int stm32_qspi_flash_setup(struct stm32_qspi *qspi,
}
flash->fsize = FSIZE_VAL(mtd->size);
+ flash->prefetch_limit = mtd->size - STM32_QSPI_FIFO_SZ;
flash->read_mode = CCR_FMODE_MM;
if (mtd->size > qspi->mm_size)
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 93faa1fed6f2..ea01f24f15e7 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -95,7 +95,7 @@ static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
reg = reg_readl(priv, REG_SPHY_CNTRL);
if (enable) {
reg |= PHY_RESET;
- reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
+ reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS);
reg_writel(priv, reg, REG_SPHY_CNTRL);
udelay(21);
reg = reg_readl(priv, REG_SPHY_CNTRL);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index dbd69310f263..538b42d5c187 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1231,7 +1231,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 5829715fa342..e019baa905c5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -90,7 +90,6 @@
#define I40E_AQ_LEN 256
#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
#define I40E_MAX_USER_PRIORITY 8
-#define I40E_MAX_QUEUES_PER_CH 64
#define I40E_DEFAULT_TRAFFIC_CLASS BIT(0)
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 9dcb2a961197..9af74253c3f7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -613,6 +613,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
}
+ /* Newer versions of firmware require lock when reading the NVM */
+ if (hw->aq.api_maj_ver > 1 ||
+ (hw->aq.api_maj_ver == 1 &&
+ hw->aq.api_min_ver >= 5))
+ hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
/* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
if (hw->aq.api_maj_ver > 1 ||
(hw->aq.api_maj_ver == 1 &&
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 0203665cb53c..095965f268bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -948,7 +948,8 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
hw->pf_id = (u8)(func_rid & 0x7);
if (hw->mac.type == I40E_MAC_X722)
- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+ I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
status = i40e_init_nvm(hw);
return status;
@@ -1268,6 +1269,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
* we don't need to do the PF Reset
*/
if (!cnt) {
+ u32 reg2 = 0;
if (hw->revision_id == 0)
cnt = I40E_PF_RESET_WAIT_COUNT_A0;
else
@@ -1279,6 +1281,12 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
reg = rd32(hw, I40E_PFGEN_CTRL);
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
+ reg2 = rd32(hw, I40E_GLGEN_RSTAT);
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ hw_dbg(hw, "Core reset upcoming. Skipping PF reset request.\n");
+ hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg2);
+ return I40E_ERR_NOT_READY;
+ }
usleep_range(1000, 2000);
}
if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 4a964d6e4a9e..4c08cc86463e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2167,6 +2167,73 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
}
/**
+ * i40e_set_promiscuous - set promiscuous mode
+ * @pf: board private structure
+ * @promisc: promisc on or off
+ *
+ * There are different ways of setting promiscuous mode on a PF depending on
+ * what state/environment we're in. This identifies and sets it appropriately.
+ * Returns 0 on success.
+ **/
+static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status aq_ret;
+
+ if (vsi->type == I40E_VSI_MAIN &&
+ pf->lan_veb != I40E_NO_VEB &&
+ !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
+ /* set defport ON for Main VSI instead of true promisc
+ * this way we will get all unicast/multicast and VLAN
+ * promisc behavior but will not get VF or VMDq traffic
+ * replicated on the Main VSI.
+ */
+ if (promisc)
+ aq_ret = i40e_aq_set_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ else
+ aq_ret = i40e_aq_clear_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "Set default VSI failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ } else {
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
+ hw,
+ vsi->seid,
+ promisc, NULL,
+ true);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "set unicast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
+ hw,
+ vsi->seid,
+ promisc, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "set multicast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ }
+
+ if (!aq_ret)
+ pf->cur_promisc = promisc;
+
+ return aq_ret;
+}
+
+/**
* i40e_sync_vsi_filters - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
*
@@ -2467,81 +2534,16 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
test_bit(__I40E_VSI_OVERFLOW_PROMISC,
vsi->state));
- if ((vsi->type == I40E_VSI_MAIN) &&
- (pf->lan_veb != I40E_NO_VEB) &&
- !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
- /* set defport ON for Main VSI instead of true promisc
- * this way we will get all unicast/multicast and VLAN
- * promisc behavior but will not get VF or VMDq traffic
- * replicated on the Main VSI.
- */
- if (pf->cur_promisc != cur_promisc) {
- pf->cur_promisc = cur_promisc;
- if (cur_promisc)
- aq_ret =
- i40e_aq_set_default_vsi(hw,
- vsi->seid,
- NULL);
- else
- aq_ret =
- i40e_aq_clear_default_vsi(hw,
- vsi->seid,
- NULL);
- if (aq_ret) {
- retval = i40e_aq_rc_to_posix(aq_ret,
- hw->aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "Set default VSI failed on %s, err %s, aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
- }
- }
- } else {
- aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
- hw,
- vsi->seid,
- cur_promisc, NULL,
- true);
- if (aq_ret) {
- retval =
- i40e_aq_rc_to_posix(aq_ret,
- hw->aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "set unicast promisc failed on %s, err %s, aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
- }
- aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
- hw,
- vsi->seid,
- cur_promisc, NULL);
- if (aq_ret) {
- retval =
- i40e_aq_rc_to_posix(aq_ret,
- hw->aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "set multicast promisc failed on %s, err %s, aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
- }
- }
- aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL);
+ aq_ret = i40e_set_promiscuous(pf, cur_promisc);
if (aq_ret) {
retval = i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
+ hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set brdcast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
+ "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
+ cur_promisc ? "on" : "off",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
out:
@@ -3964,7 +3966,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->cmd_type_offset_bsz &
@@ -5629,14 +5631,6 @@ static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
return -EINVAL;
*reconfig_rss = false;
-
- if (num_queues > I40E_MAX_QUEUES_PER_CH) {
- dev_err(&pf->pdev->dev,
- "Failed to create VMDq VSI. User requested num_queues (%d) > I40E_MAX_QUEUES_PER_VSI (%u)\n",
- num_queues, I40E_MAX_QUEUES_PER_CH);
- return -EINVAL;
- }
-
if (vsi->current_rss_size) {
if (num_queues > vsi->current_rss_size) {
dev_dbg(&pf->pdev->dev,
@@ -9429,6 +9423,15 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
if (!lock_acquired)
rtnl_unlock();
+ /* Restore promiscuous settings */
+ ret = i40e_set_promiscuous(pf, pf->cur_promisc);
+ if (ret)
+ dev_warn(&pf->pdev->dev,
+ "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
+ pf->cur_promisc ? "on" : "off",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
i40e_reset_all_vfs(pf, true);
/* tell the firmware that we're starting */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 0ccab0a5d717..7689c2ee0d46 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -328,15 +328,17 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
{
- i40e_status ret_code;
+ i40e_status ret_code = 0;
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret_code)
return ret_code;
ret_code = __i40e_read_nvm_word(hw, offset, data);
- i40e_release_nvm(hw);
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ i40e_release_nvm(hw);
return ret_code;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index d6d352a6e6ea..4566d66ffc7c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -759,7 +759,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* we have caught up to head, no work left to do */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 00d4833e9925..0e8568719b4e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -629,6 +629,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
+#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
u64 flags;
/* Used in set switch config AQ command */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index f8a794b72462..a3dc9b932946 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2218,18 +2218,19 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_mac_filter *f;
f = i40e_find_mac(vsi, al->list[i].addr);
- if (!f)
+ if (!f) {
f = i40e_add_mac_filter(vsi, al->list[i].addr);
- if (!f) {
- dev_err(&pf->pdev->dev,
- "Unable to add MAC filter %pM for VF %d\n",
- al->list[i].addr, vf->vf_id);
- ret = I40E_ERR_PARAM;
- spin_unlock_bh(&vsi->mac_filter_hash_lock);
- goto error_param;
- } else {
- vf->num_mac++;
+ if (!f) {
+ dev_err(&pf->pdev->dev,
+ "Unable to add MAC filter %pM for VF %d\n",
+ al->list[i].addr, vf->vf_id);
+ ret = I40E_ERR_PARAM;
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ goto error_param;
+ } else {
+ vf->num_mac++;
+ }
}
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index fe817e2b6fef..50864f99446d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -179,7 +179,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* if the descriptor isn't done, no work yet to do */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
index d8131139565e..da60ce12b33d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c
@@ -26,6 +26,26 @@ static struct i40e_ops i40evf_lan_ops = {
};
/**
+ * i40evf_client_get_params - retrieve relevant client parameters
+ * @vsi: VSI with parameters
+ * @params: client param struct
+ **/
+static
+void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
+{
+ int i;
+
+ memset(params, 0, sizeof(struct i40e_params));
+ params->mtu = vsi->netdev->mtu;
+ params->link_up = vsi->back->link_up;
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ params->qos.prio_qos[i].tc = 0;
+ params->qos.prio_qos[i].qs_handle = vsi->qs_handle;
+ }
+}
+
+/**
* i40evf_notify_client_message - call the client message receive callback
* @vsi: the VSI associated with this client
* @msg: message buffer
@@ -66,10 +86,6 @@ void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
return;
cinst = vsi->back->cinst;
- memset(&params, 0, sizeof(params));
- params.mtu = vsi->netdev->mtu;
- params.link_up = vsi->back->link_up;
- params.qos.prio_qos[0].qs_handle = vsi->qs_handle;
if (!cinst || !cinst->client || !cinst->client->ops ||
!cinst->client->ops->l2_param_change) {
@@ -77,6 +93,8 @@ void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
"Cannot locate client instance l2_param_change function\n");
return;
}
+ i40evf_client_get_params(vsi, &params);
+ cinst->lan_info.params = params;
cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client,
&params);
}
@@ -166,9 +184,9 @@ static struct i40e_client_instance *
i40evf_client_add_instance(struct i40evf_adapter *adapter)
{
struct i40e_client_instance *cinst = NULL;
- struct netdev_hw_addr *mac = NULL;
struct i40e_vsi *vsi = &adapter->vsi;
- int i;
+ struct netdev_hw_addr *mac = NULL;
+ struct i40e_params params;
if (!vf_registered_client)
goto out;
@@ -192,18 +210,14 @@ i40evf_client_add_instance(struct i40evf_adapter *adapter)
cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR;
cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR;
cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD;
+ i40evf_client_get_params(vsi, &params);
+ cinst->lan_info.params = params;
set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
cinst->lan_info.msix_count = adapter->num_iwarp_msix;
cinst->lan_info.msix_entries =
&adapter->msix_entries[adapter->iwarp_base_vector];
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- cinst->lan_info.params.qos.prio_qos[i].tc = 0;
- cinst->lan_info.params.qos.prio_qos[i].qs_handle =
- vsi->qs_handle;
- }
-
mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
struct netdev_hw_addr, list);
if (mac)
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index ca2ebdbd24d7..7b2a4eba92e2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -2110,6 +2110,11 @@ static void i40evf_client_task(struct work_struct *work)
adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
goto out;
}
+ if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
+ i40evf_notify_client_l2_params(&adapter->vsi);
+ adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
+ goto out;
+ }
if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
i40evf_notify_client_close(&adapter->vsi, false);
adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
@@ -2118,11 +2123,6 @@ static void i40evf_client_task(struct work_struct *work)
if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
i40evf_notify_client_open(&adapter->vsi);
adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
- goto out;
- }
- if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
- i40evf_notify_client_l2_params(&adapter->vsi);
- adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
}
out:
clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index e94d3c256667..c208753ff5b7 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7317,7 +7317,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 713e8df23744..4214c1519a87 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -810,7 +810,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ca06c3cc2ca8..62a18914f00f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1192,7 +1192,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index feed11bc9ddf..1f4a69134ade 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -326,7 +326,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index b6cee71f49d3..bc879aeb62d4 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -214,8 +214,14 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
{
int err;
- if (prog && !prog->aux->offload)
- return -EINVAL;
+ if (prog) {
+ struct bpf_dev_offload *offload = prog->aux->offload;
+
+ if (!offload)
+ return -EINVAL;
+ if (offload->netdev != nn->dp.netdev)
+ return -EINVAL;
+ }
if (prog && old_prog) {
u8 cap;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 2cb3622c4acc..fc0d5fa65ad4 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2030,21 +2030,6 @@ out:
return ret;
}
-static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int ret;
-
- del_timer_sync(&tp->timer);
-
- rtl_lock_work(tp);
- ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
- cmd->duplex, cmd->advertising);
- rtl_unlock_work(tp);
-
- return ret;
-}
-
static netdev_features_t rtl8169_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -2171,6 +2156,27 @@ static int rtl8169_get_link_ksettings(struct net_device *dev,
return rc;
}
+static int rtl8169_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ int rc;
+ u32 advertising;
+
+ if (!ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising))
+ return -EINVAL;
+
+ del_timer_sync(&tp->timer);
+
+ rtl_lock_work(tp);
+ rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed,
+ cmd->base.duplex, advertising);
+ rtl_unlock_work(tp);
+
+ return rc;
+}
+
static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
@@ -2591,7 +2597,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_coalesce = rtl_get_coalesce,
.set_coalesce = rtl_set_coalesce,
- .set_settings = rtl8169_set_settings,
.get_msglevel = rtl8169_get_msglevel,
.set_msglevel = rtl8169_set_msglevel,
.get_regs = rtl8169_get_regs,
@@ -2603,6 +2608,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.nway_reset = rtl8169_nway_reset,
.get_link_ksettings = rtl8169_get_link_ksettings,
+ .set_link_ksettings = rtl8169_set_link_ksettings,
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 4e16d839c311..b718a02a6bb6 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1337,21 +1337,33 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
}
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) {
+#if IS_ENABLED(CONFIG_IPV6)
if (changelink) {
attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_TX;
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]))
info->key.tun_flags &= ~TUNNEL_CSUM;
+#else
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX],
+ "IPv6 support not enabled in the kernel");
+ return -EPFNOSUPPORT;
+#endif
}
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) {
+#if IS_ENABLED(CONFIG_IPV6)
if (changelink) {
attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_RX;
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]))
*use_udp6_rx_checksums = false;
+#else
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX],
+ "IPv6 support not enabled in the kernel");
+ return -EPFNOSUPPORT;
+#endif
}
return 0;
@@ -1527,11 +1539,13 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
goto nla_put_failure;
if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
- goto nla_put_failure;
+ goto nla_put_failure;
+#if IS_ENABLED(CONFIG_IPV6)
if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
!geneve->use_udp6_rx_checksums))
goto nla_put_failure;
+#endif
return 0;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index f2a7e929316e..11c1e7950fe5 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -116,7 +116,7 @@ bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
return false;
}
-static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
+static void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
{
void *lyr3h = NULL;
@@ -124,7 +124,7 @@ static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
case htons(ETH_P_ARP): {
struct arphdr *arph;
- if (unlikely(!pskb_may_pull(skb, sizeof(*arph))))
+ if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev))))
return NULL;
arph = arp_hdr(skb);
@@ -165,8 +165,26 @@ static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
/* Only Neighbour Solicitation pkts need different treatment */
if (ipv6_addr_any(&ip6h->saddr) &&
ip6h->nexthdr == NEXTHDR_ICMP) {
+ struct icmp6hdr *icmph;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph))))
+ return NULL;
+
+ ip6h = ipv6_hdr(skb);
+ icmph = (struct icmp6hdr *)(ip6h + 1);
+
+ if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
+ /* Need to access the ipv6 address in body */
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)
+ + sizeof(struct in6_addr))))
+ return NULL;
+
+ ip6h = ipv6_hdr(skb);
+ icmph = (struct icmp6hdr *)(ip6h + 1);
+ }
+
*type = IPVL_ICMPV6;
- lyr3h = ip6h + 1;
+ lyr3h = icmph;
}
break;
}
@@ -510,7 +528,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
struct ipvl_addr *addr;
int addr_type;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
if (!lyr3h)
goto out;
@@ -539,7 +557,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
if (!ipvlan_is_vepa(ipvlan->port) &&
ether_addr_equal(eth->h_dest, eth->h_source)) {
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
if (lyr3h) {
addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
if (addr) {
@@ -606,7 +624,7 @@ static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
int addr_type;
if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
return true;
@@ -627,7 +645,7 @@ static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
struct sk_buff *skb = *pskb;
rx_handler_result_t ret = RX_HANDLER_PASS;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
goto out;
@@ -666,7 +684,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
} else {
struct ipvl_addr *addr;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
return ret;
@@ -717,7 +735,7 @@ static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb,
if (!port || port->mode != IPVLAN_MODE_L3S)
goto out;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
goto out;
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
index 72f4228a63bb..9442db221834 100644
--- a/drivers/net/phy/cortina.c
+++ b/drivers/net/phy/cortina.c
@@ -116,3 +116,7 @@ static struct mdio_device_id __maybe_unused cortina_tbl[] = {
};
MODULE_DEVICE_TABLE(mdio, cortina_tbl);
+
+MODULE_DESCRIPTION("Cortina EDC CDR 10G Ethernet PHY driver");
+MODULE_AUTHOR("NXP");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index b13890953ebb..e9489b88407c 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -1077,7 +1077,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
- TUN_F_TSO_ECN))
+ TUN_F_TSO_ECN | TUN_F_UFO))
return -EINVAL;
rtnl_lock();
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index c3af08f24679..95749006d687 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2372,6 +2372,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
features |= NETIF_F_TSO6;
arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
}
+
+ arg &= ~TUN_F_UFO;
}
/* This gives the user a way to test for new features in future by
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index e31438541ee1..7d295ee71534 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -566,18 +566,16 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
#define MICHAEL_MIC_LEN 8
-static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
- enum htt_rx_mpdu_encrypt_type type)
+static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_NONE:
- return 0;
case HTT_RX_MPDU_ENCRYPT_WEP40:
case HTT_RX_MPDU_ENCRYPT_WEP104:
- return IEEE80211_WEP_ICV_LEN;
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
- return IEEE80211_TKIP_ICV_LEN;
+ return 0;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
return IEEE80211_CCMP_MIC_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
@@ -594,6 +592,31 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
return 0;
}
+static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
+{
+ switch (type) {
+ case HTT_RX_MPDU_ENCRYPT_NONE:
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+ return 0;
+ case HTT_RX_MPDU_ENCRYPT_WEP40:
+ case HTT_RX_MPDU_ENCRYPT_WEP104:
+ return IEEE80211_WEP_ICV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+ return IEEE80211_TKIP_ICV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_WEP128:
+ case HTT_RX_MPDU_ENCRYPT_WAPI:
+ break;
+ }
+
+ ath10k_warn(ar, "unsupported encryption type %d\n", type);
+ return 0;
+}
+
struct amsdu_subframe_hdr {
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
@@ -1063,25 +1086,27 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
/* Tail */
if (status->flag & RX_FLAG_IV_STRIPPED) {
skb_trim(msdu, msdu->len -
- ath10k_htt_rx_crypto_tail_len(ar, enctype));
+ ath10k_htt_rx_crypto_mic_len(ar, enctype));
+
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_icv_len(ar, enctype));
} else {
/* MIC */
- if ((status->flag & RX_FLAG_MIC_STRIPPED) &&
- enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
- skb_trim(msdu, msdu->len - 8);
+ if (status->flag & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_mic_len(ar, enctype));
/* ICV */
- if (status->flag & RX_FLAG_ICV_STRIPPED &&
- enctype != HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
+ if (status->flag & RX_FLAG_ICV_STRIPPED)
skb_trim(msdu, msdu->len -
- ath10k_htt_rx_crypto_tail_len(ar, enctype));
+ ath10k_htt_rx_crypto_icv_len(ar, enctype));
}
/* MMIC */
if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
!ieee80211_has_morefrags(hdr->frame_control) &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
- skb_trim(msdu, msdu->len - 8);
+ skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
/* Head */
if (status->flag & RX_FLAG_IV_STRIPPED) {
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 71812a2dd513..f7d228b5ba93 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1233,7 +1233,7 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
}
/* External RF module */
- iris_node = of_find_node_by_name(mmio_node, "iris");
+ iris_node = of_get_child_by_name(mmio_node, "iris");
if (iris_node) {
if (of_device_is_compatible(iris_node, "qcom,wcn3620"))
wcn->rf_id = RF_IRIS_WCN3620;
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index af7c4f36b66f..e7e75b458005 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -72,18 +72,21 @@
#define IWL9000_SMEM_OFFSET 0x400000
#define IWL9000_SMEM_LEN 0x68000
-#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
+#define IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
+#define IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-"
#define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-"
#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
-#define IWL9000_MODULE_FIRMWARE(api) \
- IWL9000_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL9000A_MODULE_FIRMWARE(api) \
+ IWL9000A_FW_PRE __stringify(api) ".ucode"
+#define IWL9000B_MODULE_FIRMWARE(api) \
+ IWL9000B_FW_PRE __stringify(api) ".ucode"
#define IWL9000RFB_MODULE_FIRMWARE(api) \
- IWL9000RFB_FW_PRE "-" __stringify(api) ".ucode"
+ IWL9000RFB_FW_PRE __stringify(api) ".ucode"
#define IWL9260A_MODULE_FIRMWARE(api) \
- IWL9260A_FW_PRE "-" __stringify(api) ".ucode"
+ IWL9260A_FW_PRE __stringify(api) ".ucode"
#define IWL9260B_MODULE_FIRMWARE(api) \
- IWL9260B_FW_PRE "-" __stringify(api) ".ucode"
+ IWL9260B_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_9000 10
@@ -194,7 +197,48 @@ const struct iwl_cfg iwl9460_2ac_cfg = {
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl9460_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9460",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9461_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9461",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9462_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9462",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
};
const struct iwl_cfg iwl9560_2ac_cfg = {
@@ -206,10 +250,23 @@ const struct iwl_cfg iwl9560_2ac_cfg = {
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
- .integrated = true,
};
-MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+const struct iwl_cfg iwl9560_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9560",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
index ea8206515171..705f83b02e13 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
@@ -80,15 +80,15 @@
#define IWL_A000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_A000_HR_MODULE_FIRMWARE(api) \
- IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_HR_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_JF_MODULE_FIRMWARE(api) \
- IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_JF_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_HR_F0_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_JF_B0_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_HR_A0_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_A000 10
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 5a40092febfb..3bfc657f6b42 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -531,6 +531,8 @@ struct iwl_scan_config_v1 {
} __packed; /* SCAN_CONFIG_DB_CMD_API_S */
#define SCAN_TWO_LMACS 2
+#define SCAN_LB_LMAC_IDX 0
+#define SCAN_HB_LMAC_IDX 1
struct iwl_scan_config {
__le32 flags;
@@ -578,6 +580,7 @@ enum iwl_umac_scan_general_flags {
IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9),
IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10),
IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11),
+ IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = BIT(13),
};
/**
@@ -631,12 +634,17 @@ struct iwl_scan_req_umac_tail {
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @general_flags: &enum iwl_umac_scan_general_flags
- * @reserved2: for future use and alignment
* @scan_start_mac_id: report the scan start TSF time according to this mac TSF
* @extended_dwell: dwell time for channels 1, 6 and 11
* @active_dwell: dwell time for active scan
* @passive_dwell: dwell time for passive scan
* @fragmented_dwell: dwell time for fragmented passive scan
+ * @adwell_default_n_aps: for adaptive dwell the default number of APs
+ * per channel
+ * @adwell_default_n_aps_social: for adaptive dwell the default
+ * number of APs per social (1,6,11) channel
+ * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added
+ * to total scan time
* @max_out_time: max out of serving channel time, per LMAC - for CDB there
* are 2 LMACs
* @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs
@@ -644,6 +652,8 @@ struct iwl_scan_req_umac_tail {
* @channel_flags: &enum iwl_scan_channel_flags
* @n_channels: num of channels in scan request
* @reserved: for future use and alignment
+ * @reserved2: for future use and alignment
+ * @reserved3: for future use and alignment
* @data: &struct iwl_scan_channel_cfg_umac and
* &struct iwl_scan_req_umac_tail
*/
@@ -651,41 +661,64 @@ struct iwl_scan_req_umac {
__le32 flags;
__le32 uid;
__le32 ooc_priority;
- /* SCAN_GENERAL_PARAMS_API_S_VER_4 */
__le16 general_flags;
- u8 reserved2;
+ u8 reserved;
u8 scan_start_mac_id;
- u8 extended_dwell;
- u8 active_dwell;
- u8 passive_dwell;
- u8 fragmented_dwell;
union {
struct {
+ u8 extended_dwell;
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
__le32 max_out_time;
__le32 suspend_time;
__le32 scan_priority;
- /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
u8 channel_flags;
u8 n_channels;
- __le16 reserved;
+ __le16 reserved2;
u8 data[];
} v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
struct {
+ u8 extended_dwell;
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
__le32 max_out_time[SCAN_TWO_LMACS];
__le32 suspend_time[SCAN_TWO_LMACS];
__le32 scan_priority;
- /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
u8 channel_flags;
u8 n_channels;
- __le16 reserved;
+ __le16 reserved2;
u8 data[];
} v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
+ struct {
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
+ u8 adwell_default_n_aps;
+ u8 adwell_default_n_aps_social;
+ u8 reserved3;
+ __le16 adwell_max_budget;
+ __le32 max_out_time[SCAN_TWO_LMACS];
+ __le32 suspend_time[SCAN_TWO_LMACS];
+ __le32 scan_priority;
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
+ u8 channel_flags;
+ u8 n_channels;
+ __le16 reserved2;
+ u8 data[];
+ } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */
};
} __packed;
-#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE_V7 sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \
+ 2 * sizeof(u8) - sizeof(__le16))
#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \
- 2 * sizeof(__le32))
+ 2 * sizeof(__le32) - 2 * sizeof(u8) - \
+ sizeof(__le16))
/**
* struct iwl_umac_scan_abort
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 740d97093d1c..37a5c5b4eda6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -264,6 +264,7 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30,
IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31,
/* API Set 1 */
+ IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32,
IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34,
IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35,
IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL = (__force iwl_ucode_tlv_api_t)37,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index d1263a554420..e21e46cf6f9a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -366,6 +366,7 @@ struct iwl_cfg {
u32 dccm2_len;
u32 smem_offset;
u32 smem_len;
+ u32 soc_latency;
u16 nvm_ver;
u16 nvm_calib_ver;
u16 rx_with_siso_diversity:1,
@@ -472,6 +473,10 @@ extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
+extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
extern const struct iwl_cfg iwla000_2ac_cfg_hr;
extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwla000_2ac_cfg_jf;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 0e18c5066f04..4575595ab022 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1142,6 +1142,12 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
}
+static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_ADAPTIVE_DWELL);
+}
+
static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
{
/* For now we only use this mode to differentiate between
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 774122fed454..e4fd476e9ccb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -130,6 +130,19 @@ struct iwl_mvm_scan_params {
u32 measurement_dwell;
};
+static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
+{
+ struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+ return (void *)&cmd->v7.data;
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return (void *)&cmd->v6.data;
+
+ return (void *)&cmd->v1.data;
+}
+
static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
{
if (mvm->scan_rx_ant != ANT_NONE)
@@ -1075,25 +1088,57 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
{
struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type];
+ if (iwl_mvm_is_regular_scan(params))
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ else
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
+
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
+ if (params->measurement_dwell) {
+ cmd->v7.active_dwell = params->measurement_dwell;
+ cmd->v7.passive_dwell = params->measurement_dwell;
+ } else {
+ cmd->v7.active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ cmd->v7.passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+ }
+ cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+
+ cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
+ }
+
+ return;
+ }
+
if (params->measurement_dwell) {
- cmd->active_dwell = params->measurement_dwell;
- cmd->passive_dwell = params->measurement_dwell;
- cmd->extended_dwell = params->measurement_dwell;
+ cmd->v1.active_dwell = params->measurement_dwell;
+ cmd->v1.passive_dwell = params->measurement_dwell;
+ cmd->v1.extended_dwell = params->measurement_dwell;
} else {
- cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
- cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
- cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
+ cmd->v1.active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ cmd->v1.passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+ cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED;
}
- cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+ cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
if (iwl_mvm_has_new_tx_api(mvm)) {
cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
- cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time);
- cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time);
+ cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
if (iwl_mvm_is_cdb_supported(mvm)) {
- cmd->v6.max_out_time[1] =
+ cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(timing->max_out_time);
- cmd->v6.suspend_time[1] =
+ cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(timing->suspend_time);
}
} else {
@@ -1102,11 +1147,6 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->v1.scan_priority =
cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
}
-
- if (iwl_mvm_is_regular_scan(params))
- cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
- else
- cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
}
static void
@@ -1178,8 +1218,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int type)
{
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
- void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ?
- (void *)&cmd->v6.data : (void *)&cmd->v1.data;
+ void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
struct iwl_scan_req_umac_tail *sec_part = cmd_data +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
@@ -1216,7 +1255,10 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
- if (iwl_mvm_has_new_tx_api(mvm)) {
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
+ cmd->v7.channel_flags = channel_flags;
+ cmd->v7.n_channels = params->n_channels;
+ } else if (iwl_mvm_has_new_tx_api(mvm)) {
cmd->v6.channel_flags = channel_flags;
cmd->v6.n_channels = params->n_channels;
} else {
@@ -1661,8 +1703,10 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
- if (iwl_mvm_has_new_tx_api(mvm))
- base_size = IWL_SCAN_REQ_UMAC_SIZE;
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
+ else if (iwl_mvm_has_new_tx_api(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
return base_size +
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 4a21c12276d7..f21fe59faccf 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -535,47 +535,121 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
/* a000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 2d704361f672..bf897b1832b1 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -2074,8 +2074,10 @@ static int __init dell_init(void)
goto fail_platform_device2;
buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
- if (!buffer)
+ if (!buffer) {
+ ret = -ENOMEM;
goto fail_buffer;
+ }
ret = dell_setup_rfkill();
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
index 0cab1f9c35af..609557aa5868 100644
--- a/drivers/platform/x86/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell-smbios-wmi.c
@@ -147,7 +147,10 @@ fail_smbios_cmd:
static int dell_smbios_wmi_probe(struct wmi_device *wdev)
{
+ struct wmi_driver *wdriver =
+ container_of(wdev->dev.driver, struct wmi_driver, driver);
struct wmi_smbios_priv *priv;
+ u32 hotfix;
int count;
int ret;
@@ -164,6 +167,16 @@ static int dell_smbios_wmi_probe(struct wmi_device *wdev)
if (!dell_wmi_get_size(&priv->req_buf_size))
return -EPROBE_DEFER;
+ /* some SMBIOS calls fail unless BIOS contains hotfix */
+ if (!dell_wmi_get_hotfix(&hotfix))
+ return -EPROBE_DEFER;
+ if (!hotfix) {
+ dev_warn(&wdev->dev,
+ "WMI SMBIOS userspace interface not supported(%u), try upgrading to a newer BIOS\n",
+ hotfix);
+ wdriver->filter_callback = NULL;
+ }
+
/* add in the length object we will use internally with ioctl */
priv->req_buf_size += sizeof(u64);
ret = set_required_buffer_size(wdev, priv->req_buf_size);
diff --git a/drivers/platform/x86/dell-wmi-descriptor.c b/drivers/platform/x86/dell-wmi-descriptor.c
index 4dfef1f53481..072821aa47fc 100644
--- a/drivers/platform/x86/dell-wmi-descriptor.c
+++ b/drivers/platform/x86/dell-wmi-descriptor.c
@@ -27,6 +27,7 @@ struct descriptor_priv {
struct list_head list;
u32 interface_version;
u32 size;
+ u32 hotfix;
};
static int descriptor_valid = -EPROBE_DEFER;
static LIST_HEAD(wmi_list);
@@ -77,6 +78,24 @@ bool dell_wmi_get_size(u32 *size)
}
EXPORT_SYMBOL_GPL(dell_wmi_get_size);
+bool dell_wmi_get_hotfix(u32 *hotfix)
+{
+ struct descriptor_priv *priv;
+ bool ret = false;
+
+ mutex_lock(&list_mutex);
+ priv = list_first_entry_or_null(&wmi_list,
+ struct descriptor_priv,
+ list);
+ if (priv) {
+ *hotfix = priv->hotfix;
+ ret = true;
+ }
+ mutex_unlock(&list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_hotfix);
+
/*
* Descriptor buffer is 128 byte long and contains:
*
@@ -85,6 +104,7 @@ EXPORT_SYMBOL_GPL(dell_wmi_get_size);
* Object Signature 4 4 " WMI"
* WMI Interface Version 8 4 <version>
* WMI buffer length 12 4 <length>
+ * WMI hotfix number 16 4 <hotfix>
*/
static int dell_wmi_descriptor_probe(struct wmi_device *wdev)
{
@@ -144,15 +164,17 @@ static int dell_wmi_descriptor_probe(struct wmi_device *wdev)
priv->interface_version = buffer[2];
priv->size = buffer[3];
+ priv->hotfix = buffer[4];
ret = 0;
dev_set_drvdata(&wdev->dev, priv);
mutex_lock(&list_mutex);
list_add_tail(&priv->list, &wmi_list);
mutex_unlock(&list_mutex);
- dev_dbg(&wdev->dev, "Detected Dell WMI interface version %lu and buffer size %lu\n",
+ dev_dbg(&wdev->dev, "Detected Dell WMI interface version %lu, buffer size %lu, hotfix %lu\n",
(unsigned long) priv->interface_version,
- (unsigned long) priv->size);
+ (unsigned long) priv->size,
+ (unsigned long) priv->hotfix);
out:
kfree(obj);
diff --git a/drivers/platform/x86/dell-wmi-descriptor.h b/drivers/platform/x86/dell-wmi-descriptor.h
index 1e8cb96ffd78..a6123a4d06a7 100644
--- a/drivers/platform/x86/dell-wmi-descriptor.h
+++ b/drivers/platform/x86/dell-wmi-descriptor.h
@@ -23,5 +23,6 @@ int dell_wmi_get_descriptor_valid(void);
bool dell_wmi_get_interface_version(u32 *version);
bool dell_wmi_get_size(u32 *size);
+bool dell_wmi_get_hotfix(u32 *hotfix);
#endif
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index 75db585a2a94..acd3ce8ecf3f 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -37,11 +37,20 @@ struct atmel_tcb_pwm_device {
unsigned period; /* PWM period expressed in clk cycles */
};
+struct atmel_tcb_channel {
+ u32 enabled;
+ u32 cmr;
+ u32 ra;
+ u32 rb;
+ u32 rc;
+};
+
struct atmel_tcb_pwm_chip {
struct pwm_chip chip;
spinlock_t lock;
struct atmel_tc *tc;
struct atmel_tcb_pwm_device *pwms[NPWM];
+ struct atmel_tcb_channel bkup[NPWM / 2];
};
static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip)
@@ -175,12 +184,15 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
* Use software trigger to apply the new setting.
* If both PWM devices in this group are disabled we stop the clock.
*/
- if (!(cmr & (ATMEL_TC_ACPC | ATMEL_TC_BCPC)))
+ if (!(cmr & (ATMEL_TC_ACPC | ATMEL_TC_BCPC))) {
__raw_writel(ATMEL_TC_SWTRG | ATMEL_TC_CLKDIS,
regs + ATMEL_TC_REG(group, CCR));
- else
+ tcbpwmc->bkup[group].enabled = 1;
+ } else {
__raw_writel(ATMEL_TC_SWTRG, regs +
ATMEL_TC_REG(group, CCR));
+ tcbpwmc->bkup[group].enabled = 0;
+ }
spin_unlock(&tcbpwmc->lock);
}
@@ -263,6 +275,7 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
/* Use software trigger to apply the new setting */
__raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
regs + ATMEL_TC_REG(group, CCR));
+ tcbpwmc->bkup[group].enabled = 1;
spin_unlock(&tcbpwmc->lock);
return 0;
}
@@ -445,10 +458,56 @@ static const struct of_device_id atmel_tcb_pwm_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids);
+#ifdef CONFIG_PM_SLEEP
+static int atmel_tcb_pwm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
+ void __iomem *base = tcbpwm->tc->regs;
+ int i;
+
+ for (i = 0; i < (NPWM / 2); i++) {
+ struct atmel_tcb_channel *chan = &tcbpwm->bkup[i];
+
+ chan->cmr = readl(base + ATMEL_TC_REG(i, CMR));
+ chan->ra = readl(base + ATMEL_TC_REG(i, RA));
+ chan->rb = readl(base + ATMEL_TC_REG(i, RB));
+ chan->rc = readl(base + ATMEL_TC_REG(i, RC));
+ }
+ return 0;
+}
+
+static int atmel_tcb_pwm_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
+ void __iomem *base = tcbpwm->tc->regs;
+ int i;
+
+ for (i = 0; i < (NPWM / 2); i++) {
+ struct atmel_tcb_channel *chan = &tcbpwm->bkup[i];
+
+ writel(chan->cmr, base + ATMEL_TC_REG(i, CMR));
+ writel(chan->ra, base + ATMEL_TC_REG(i, RA));
+ writel(chan->rb, base + ATMEL_TC_REG(i, RB));
+ writel(chan->rc, base + ATMEL_TC_REG(i, RC));
+ if (chan->enabled) {
+ writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
+ base + ATMEL_TC_REG(i, CCR));
+ }
+ }
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(atmel_tcb_pwm_pm_ops, atmel_tcb_pwm_suspend,
+ atmel_tcb_pwm_resume);
+
static struct platform_driver atmel_tcb_pwm_driver = {
.driver = {
.name = "atmel-tcb-pwm",
.of_match_table = atmel_tcb_pwm_dt_ids,
+ .pm = &atmel_tcb_pwm_pm_ops,
},
.probe = atmel_tcb_pwm_probe,
.remove = atmel_tcb_pwm_remove,
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 2fb30deee345..815f5333bb8f 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -39,6 +40,8 @@
#define PERIP_PWM_PDM_CONTROL_CH_MASK 0x1
#define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch) ((ch) * 4)
+#define IMG_PWM_PM_TIMEOUT 1000 /* ms */
+
/*
* PWM period is specified with a timebase register,
* in number of step periods. The PWM duty cycle is also
@@ -52,6 +55,8 @@
*/
#define MIN_TMBASE_STEPS 16
+#define IMG_PWM_NPWM 4
+
struct img_pwm_soc_data {
u32 max_timebase;
};
@@ -66,6 +71,8 @@ struct img_pwm_chip {
int max_period_ns;
int min_period_ns;
const struct img_pwm_soc_data *data;
+ u32 suspend_ctrl_cfg;
+ u32 suspend_ch_cfg[IMG_PWM_NPWM];
};
static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
@@ -92,6 +99,7 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long mul, output_clk_hz, input_clk_hz;
struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
unsigned int max_timebase = pwm_chip->data->max_timebase;
+ int ret;
if (period_ns < pwm_chip->min_period_ns ||
period_ns > pwm_chip->max_period_ns) {
@@ -123,6 +131,10 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
duty = DIV_ROUND_UP(timebase * duty_ns, period_ns);
+ ret = pm_runtime_get_sync(chip->dev);
+ if (ret < 0)
+ return ret;
+
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm));
val |= (div & PWM_CTRL_CFG_DIV_MASK) <<
@@ -133,6 +145,9 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
(timebase << PWM_CH_CFG_TMBASE_SHIFT);
img_pwm_writel(pwm_chip, PWM_CH_CFG(pwm->hwpwm), val);
+ pm_runtime_mark_last_busy(chip->dev);
+ pm_runtime_put_autosuspend(chip->dev);
+
return 0;
}
@@ -140,6 +155,11 @@ static int img_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
u32 val;
struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
+ int ret;
+
+ ret = pm_runtime_get_sync(chip->dev);
+ if (ret < 0)
+ return ret;
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
val |= BIT(pwm->hwpwm);
@@ -160,6 +180,9 @@ static void img_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
val &= ~BIT(pwm->hwpwm);
img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
+
+ pm_runtime_mark_last_busy(chip->dev);
+ pm_runtime_put_autosuspend(chip->dev);
}
static const struct pwm_ops img_pwm_ops = {
@@ -182,6 +205,37 @@ static const struct of_device_id img_pwm_of_match[] = {
};
MODULE_DEVICE_TABLE(of, img_pwm_of_match);
+static int img_pwm_runtime_suspend(struct device *dev)
+{
+ struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(pwm_chip->pwm_clk);
+ clk_disable_unprepare(pwm_chip->sys_clk);
+
+ return 0;
+}
+
+static int img_pwm_runtime_resume(struct device *dev)
+{
+ struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(pwm_chip->sys_clk);
+ if (ret < 0) {
+ dev_err(dev, "could not prepare or enable sys clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(pwm_chip->pwm_clk);
+ if (ret < 0) {
+ dev_err(dev, "could not prepare or enable pwm clock\n");
+ clk_disable_unprepare(pwm_chip->sys_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
static int img_pwm_probe(struct platform_device *pdev)
{
int ret;
@@ -224,23 +278,20 @@ static int img_pwm_probe(struct platform_device *pdev)
return PTR_ERR(pwm->pwm_clk);
}
- ret = clk_prepare_enable(pwm->sys_clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "could not prepare or enable sys clock\n");
- return ret;
- }
-
- ret = clk_prepare_enable(pwm->pwm_clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "could not prepare or enable pwm clock\n");
- goto disable_sysclk;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = img_pwm_runtime_resume(&pdev->dev);
+ if (ret)
+ goto err_pm_disable;
}
clk_rate = clk_get_rate(pwm->pwm_clk);
if (!clk_rate) {
dev_err(&pdev->dev, "pwm clock has no frequency\n");
ret = -EINVAL;
- goto disable_pwmclk;
+ goto err_suspend;
}
/* The maximum input clock divider is 512 */
@@ -255,21 +306,23 @@ static int img_pwm_probe(struct platform_device *pdev)
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &img_pwm_ops;
pwm->chip.base = -1;
- pwm->chip.npwm = 4;
+ pwm->chip.npwm = IMG_PWM_NPWM;
ret = pwmchip_add(&pwm->chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
- goto disable_pwmclk;
+ goto err_suspend;
}
platform_set_drvdata(pdev, pwm);
return 0;
-disable_pwmclk:
- clk_disable_unprepare(pwm->pwm_clk);
-disable_sysclk:
- clk_disable_unprepare(pwm->sys_clk);
+err_suspend:
+ if (!pm_runtime_enabled(&pdev->dev))
+ img_pwm_runtime_suspend(&pdev->dev);
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
return ret;
}
@@ -278,6 +331,11 @@ static int img_pwm_remove(struct platform_device *pdev)
struct img_pwm_chip *pwm_chip = platform_get_drvdata(pdev);
u32 val;
unsigned int i;
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ return ret;
for (i = 0; i < pwm_chip->chip.npwm; i++) {
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
@@ -285,15 +343,79 @@ static int img_pwm_remove(struct platform_device *pdev)
img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val);
}
- clk_disable_unprepare(pwm_chip->pwm_clk);
- clk_disable_unprepare(pwm_chip->sys_clk);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ img_pwm_runtime_suspend(&pdev->dev);
return pwmchip_remove(&pwm_chip->chip);
}
+#ifdef CONFIG_PM_SLEEP
+static int img_pwm_suspend(struct device *dev)
+{
+ struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+ int i, ret;
+
+ if (pm_runtime_status_suspended(dev)) {
+ ret = img_pwm_runtime_resume(dev);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < pwm_chip->chip.npwm; i++)
+ pwm_chip->suspend_ch_cfg[i] = img_pwm_readl(pwm_chip,
+ PWM_CH_CFG(i));
+
+ pwm_chip->suspend_ctrl_cfg = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
+
+ img_pwm_runtime_suspend(dev);
+
+ return 0;
+}
+
+static int img_pwm_resume(struct device *dev)
+{
+ struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev);
+ int ret;
+ int i;
+
+ ret = img_pwm_runtime_resume(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < pwm_chip->chip.npwm; i++)
+ img_pwm_writel(pwm_chip, PWM_CH_CFG(i),
+ pwm_chip->suspend_ch_cfg[i]);
+
+ img_pwm_writel(pwm_chip, PWM_CTRL_CFG, pwm_chip->suspend_ctrl_cfg);
+
+ for (i = 0; i < pwm_chip->chip.npwm; i++)
+ if (pwm_chip->suspend_ctrl_cfg & BIT(i))
+ regmap_update_bits(pwm_chip->periph_regs,
+ PERIP_PWM_PDM_CONTROL,
+ PERIP_PWM_PDM_CONTROL_CH_MASK <<
+ PERIP_PWM_PDM_CONTROL_CH_SHIFT(i),
+ 0);
+
+ if (pm_runtime_status_suspended(dev))
+ img_pwm_runtime_suspend(dev);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops img_pwm_pm_ops = {
+ SET_RUNTIME_PM_OPS(img_pwm_runtime_suspend,
+ img_pwm_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(img_pwm_suspend, img_pwm_resume)
+};
+
static struct platform_driver img_pwm_driver = {
.driver = {
.name = "img-pwm",
+ .pm = &img_pwm_pm_ops,
.of_match_table = img_pwm_of_match,
},
.probe = img_pwm_probe,
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index b52f3afb2ba1..f5d97e0ad52b 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
@@ -40,11 +41,19 @@ enum {
MTK_CLK_PWM3,
MTK_CLK_PWM4,
MTK_CLK_PWM5,
+ MTK_CLK_PWM6,
+ MTK_CLK_PWM7,
+ MTK_CLK_PWM8,
MTK_CLK_MAX,
};
-static const char * const mtk_pwm_clk_name[] = {
- "main", "top", "pwm1", "pwm2", "pwm3", "pwm4", "pwm5"
+static const char * const mtk_pwm_clk_name[MTK_CLK_MAX] = {
+ "main", "top", "pwm1", "pwm2", "pwm3", "pwm4", "pwm5", "pwm6", "pwm7",
+ "pwm8"
+};
+
+struct mtk_pwm_platform_data {
+ unsigned int num_pwms;
};
/**
@@ -59,6 +68,10 @@ struct mtk_pwm_chip {
struct clk *clks[MTK_CLK_MAX];
};
+static const unsigned int mtk_pwm_reg_offset[] = {
+ 0x0010, 0x0050, 0x0090, 0x00d0, 0x0110, 0x0150, 0x0190, 0x0220
+};
+
static inline struct mtk_pwm_chip *to_mtk_pwm_chip(struct pwm_chip *chip)
{
return container_of(chip, struct mtk_pwm_chip, chip);
@@ -103,14 +116,14 @@ static void mtk_pwm_clk_disable(struct pwm_chip *chip, struct pwm_device *pwm)
static inline u32 mtk_pwm_readl(struct mtk_pwm_chip *chip, unsigned int num,
unsigned int offset)
{
- return readl(chip->regs + 0x10 + (num * 0x40) + offset);
+ return readl(chip->regs + mtk_pwm_reg_offset[num] + offset);
}
static inline void mtk_pwm_writel(struct mtk_pwm_chip *chip,
unsigned int num, unsigned int offset,
u32 value)
{
- writel(value, chip->regs + 0x10 + (num * 0x40) + offset);
+ writel(value, chip->regs + mtk_pwm_reg_offset[num] + offset);
}
static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -185,6 +198,7 @@ static const struct pwm_ops mtk_pwm_ops = {
static int mtk_pwm_probe(struct platform_device *pdev)
{
+ const struct mtk_pwm_platform_data *data;
struct mtk_pwm_chip *pc;
struct resource *res;
unsigned int i;
@@ -194,15 +208,22 @@ static int mtk_pwm_probe(struct platform_device *pdev)
if (!pc)
return -ENOMEM;
+ data = of_device_get_match_data(&pdev->dev);
+ if (data == NULL)
+ return -EINVAL;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pc->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pc->regs))
return PTR_ERR(pc->regs);
- for (i = 0; i < MTK_CLK_MAX; i++) {
+ for (i = 0; i < data->num_pwms + 2; i++) {
pc->clks[i] = devm_clk_get(&pdev->dev, mtk_pwm_clk_name[i]);
- if (IS_ERR(pc->clks[i]))
+ if (IS_ERR(pc->clks[i])) {
+ dev_err(&pdev->dev, "clock: %s fail: %ld\n",
+ mtk_pwm_clk_name[i], PTR_ERR(pc->clks[i]));
return PTR_ERR(pc->clks[i]);
+ }
}
platform_set_drvdata(pdev, pc);
@@ -210,7 +231,7 @@ static int mtk_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &mtk_pwm_ops;
pc->chip.base = -1;
- pc->chip.npwm = 5;
+ pc->chip.npwm = data->num_pwms;
ret = pwmchip_add(&pc->chip);
if (ret < 0) {
@@ -228,9 +249,23 @@ static int mtk_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&pc->chip);
}
+static const struct mtk_pwm_platform_data mt2712_pwm_data = {
+ .num_pwms = 8,
+};
+
+static const struct mtk_pwm_platform_data mt7622_pwm_data = {
+ .num_pwms = 6,
+};
+
+static const struct mtk_pwm_platform_data mt7623_pwm_data = {
+ .num_pwms = 5,
+};
+
static const struct of_device_id mtk_pwm_of_match[] = {
- { .compatible = "mediatek,mt7623-pwm" },
- { }
+ { .compatible = "mediatek,mt2712-pwm", .data = &mt2712_pwm_data },
+ { .compatible = "mediatek,mt7622-pwm", .data = &mt7622_pwm_data },
+ { .compatible = "mediatek,mt7623-pwm", .data = &mt7623_pwm_data },
+ { },
};
MODULE_DEVICE_TABLE(of, mtk_pwm_of_match);
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 9793b296108f..1ac9e4384142 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -219,8 +219,7 @@ static int stm32_pwm_lp_remove(struct platform_device *pdev)
unsigned int i;
for (i = 0; i < priv->chip.npwm; i++)
- if (pwm_is_enabled(&priv->chip.pwms[i]))
- pwm_disable(&priv->chip.pwms[i]);
+ pwm_disable(&priv->chip.pwms[i]);
return pwmchip_remove(&priv->chip);
}
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 6d23f1d1c9b7..334199c58f1d 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -368,14 +368,15 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
struct sun4i_pwm_chip *pwm;
struct resource *res;
int ret;
- const struct of_device_id *match;
-
- match = of_match_device(sun4i_pwm_dt_ids, &pdev->dev);
pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
if (!pwm)
return -ENOMEM;
+ pwm->data = of_device_get_match_data(&pdev->dev);
+ if (!pwm->data)
+ return -ENODEV;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pwm->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pwm->base))
@@ -385,7 +386,6 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pwm->clk))
return PTR_ERR(pwm->clk);
- pwm->data = match->data;
pwm->chip.dev = &pdev->dev;
pwm->chip.ops = &sun4i_pwm_ops;
pwm->chip.base = -1;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e0e58f3b1420..b59a31b079a5 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -433,6 +433,19 @@ config RTC_DRV_PCF85063
This driver can also be built as a module. If so, the module
will be called rtc-pcf85063.
+config RTC_DRV_PCF85363
+ tristate "NXP PCF85363"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the PCF85363 RTC chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-pcf85363.
+
+ The nvmem interface will be named pcf85363-#, where # is the
+ zero-based instance number.
+
config RTC_DRV_PCF8563
tristate "Philips PCF8563/Epson RTC8564"
help
@@ -1174,6 +1187,17 @@ config RTC_DRV_WM8350
This driver can also be built as a module. If so, the module
will be called "rtc-wm8350".
+config RTC_DRV_SC27XX
+ tristate "Spreadtrum SC27xx RTC"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ help
+ If you say Y here you will get support for the RTC subsystem
+ of the Spreadtrum SC27xx series PMICs. The SC27xx series PMICs
+ includes the SC2720, SC2721, SC2723, SC2730 and SC2731 chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-sc27xx.
+
config RTC_DRV_SPEAR
tristate "SPEAR ST RTC"
depends on PLAT_SPEAR || COMPILE_TEST
@@ -1706,14 +1730,24 @@ config RTC_DRV_MOXART
will be called rtc-moxart
config RTC_DRV_MT6397
- tristate "Mediatek Real Time Clock driver"
+ tristate "MediaTek PMIC based RTC"
depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN)
help
- This selects the Mediatek(R) RTC driver. RTC is part of Mediatek
+ This selects the MediaTek(R) RTC driver. RTC is part of MediaTek
MT6397 PMIC. You should enable MT6397 PMIC MFD before select
- Mediatek(R) RTC driver.
+ MediaTek(R) RTC driver.
+
+ If you want to use MediaTek(R) RTC interface, select Y or M here.
- If you want to use Mediatek(R) RTC interface, select Y or M here.
+config RTC_DRV_MT7622
+ tristate "MediaTek SoC based RTC"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ This enables support for the real time clock built in the MediaTek
+ SoCs.
+
+ This drive can also be built as a module. If so, the module
+ will be called rtc-mt7622.
config RTC_DRV_XGENE
tristate "APM X-Gene RTC"
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 0bf1fc02b82c..f2f50c11dc38 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -103,6 +103,7 @@ obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o
obj-$(CONFIG_RTC_DRV_VRTC) += rtc-mrst.o
obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
obj-$(CONFIG_RTC_DRV_MT6397) += rtc-mt6397.o
+obj-$(CONFIG_RTC_DRV_MT7622) += rtc-mt7622.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
obj-$(CONFIG_RTC_DRV_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o
@@ -114,6 +115,7 @@ obj-$(CONFIG_RTC_DRV_PCF2123) += rtc-pcf2123.o
obj-$(CONFIG_RTC_DRV_PCF2127) += rtc-pcf2127.o
obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o
obj-$(CONFIG_RTC_DRV_PCF85063) += rtc-pcf85063.o
+obj-$(CONFIG_RTC_DRV_PCF85363) += rtc-pcf85363.o
obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
@@ -144,6 +146,7 @@ obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o
obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
obj-$(CONFIG_RTC_DRV_S5M) += rtc-s5m.o
obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
+obj-$(CONFIG_RTC_DRV_SC27XX) += rtc-sc27xx.o
obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
obj-$(CONFIG_RTC_DRV_SIRFSOC) += rtc-sirfsoc.o
obj-$(CONFIG_RTC_DRV_SNVS) += rtc-snvs.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 8cec9a02c0b8..672b192f8153 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -779,7 +779,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
}
timerqueue_add(&rtc->timerqueue, &timer->node);
- if (!next) {
+ if (!next || ktime_before(timer->node.expires, next->expires)) {
struct rtc_wkalrm alarm;
int err;
alarm.time = rtc_ktime_to_tm(timer->node.expires);
@@ -1004,6 +1004,10 @@ int rtc_read_offset(struct rtc_device *rtc, long *offset)
* to compensate for differences in the actual clock rate due to temperature,
* the crystal, capacitor, etc.
*
+ * The adjustment applied is as follows:
+ * t = t0 * (1 + offset * 1e-9)
+ * where t0 is the measured length of 1 RTC second with offset = 0
+ *
* Kernel interface to adjust an rtc clock offset.
* Return 0 on success, or a negative number on error.
* If the rtc offset is not setable (or not implemented), return -EINVAL
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index fea9a60b06cf..b033bc556f5d 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -614,12 +614,12 @@ static int abx80x_probe(struct i2c_client *client,
if (err)
return err;
- rtc = devm_rtc_device_register(&client->dev, "abx8xx",
- &abx80x_rtc_ops, THIS_MODULE);
-
+ rtc = devm_rtc_allocate_device(&client->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
+ rtc->ops = &abx80x_rtc_ops;
+
i2c_set_clientdata(client, rtc);
if (client->irq > 0) {
@@ -646,10 +646,14 @@ static int abx80x_probe(struct i2c_client *client,
err = devm_add_action_or_reset(&client->dev,
rtc_calib_remove_sysfs_group,
&client->dev);
- if (err)
+ if (err) {
dev_err(&client->dev,
"Failed to add sysfs cleanup action: %d\n",
err);
+ return err;
+ }
+
+ err = rtc_register_device(rtc);
return err;
}
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 21f355c37eab..1e4978c96ffd 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -28,6 +28,8 @@
#define RTC_IRQ_AL_EN BIT(0)
#define RTC_IRQ_FREQ_EN BIT(1)
#define RTC_IRQ_FREQ_1HZ BIT(2)
+#define RTC_CCR 0x18
+#define RTC_CCR_MODE BIT(15)
#define RTC_TIME 0xC
#define RTC_ALARM1 0x10
@@ -343,18 +345,117 @@ static irqreturn_t armada38x_rtc_alarm_irq(int irq, void *data)
return IRQ_HANDLED;
}
+/*
+ * The information given in the Armada 388 functional spec is complex.
+ * They give two different formulas for calculating the offset value,
+ * but when considering "Offset" as an 8-bit signed integer, they both
+ * reduce down to (we shall rename "Offset" as "val" here):
+ *
+ * val = (f_ideal / f_measured - 1) / resolution where f_ideal = 32768
+ *
+ * Converting to time, f = 1/t:
+ * val = (t_measured / t_ideal - 1) / resolution where t_ideal = 1/32768
+ *
+ * => t_measured / t_ideal = val * resolution + 1
+ *
+ * "offset" in the RTC interface is defined as:
+ * t = t0 * (1 + offset * 1e-9)
+ * where t is the desired period, t0 is the measured period with a zero
+ * offset, which is t_measured above. With t0 = t_measured and t = t_ideal,
+ * offset = (t_ideal / t_measured - 1) / 1e-9
+ *
+ * => t_ideal / t_measured = offset * 1e-9 + 1
+ *
+ * so:
+ *
+ * offset * 1e-9 + 1 = 1 / (val * resolution + 1)
+ *
+ * We want "resolution" to be an integer, so resolution = R * 1e-9, giving
+ * offset = 1e18 / (val * R + 1e9) - 1e9
+ * val = (1e18 / (offset + 1e9) - 1e9) / R
+ * with a common transformation:
+ * f(x) = 1e18 / (x + 1e9) - 1e9
+ * offset = f(val * R)
+ * val = f(offset) / R
+ *
+ * Armada 38x supports two modes, fine mode (954ppb) and coarse mode (3815ppb).
+ */
+static long armada38x_ppb_convert(long ppb)
+{
+ long div = ppb + 1000000000L;
+
+ return div_s64(1000000000000000000LL + div / 2, div) - 1000000000L;
+}
+
+static int armada38x_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct armada38x_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long ccr, flags;
+ long ppb_cor;
+
+ spin_lock_irqsave(&rtc->lock, flags);
+ ccr = rtc->data->read_rtc_reg(rtc, RTC_CCR);
+ spin_unlock_irqrestore(&rtc->lock, flags);
+
+ ppb_cor = (ccr & RTC_CCR_MODE ? 3815 : 954) * (s8)ccr;
+ /* ppb_cor + 1000000000L can never be zero */
+ *offset = armada38x_ppb_convert(ppb_cor);
+
+ return 0;
+}
+
+static int armada38x_rtc_set_offset(struct device *dev, long offset)
+{
+ struct armada38x_rtc *rtc = dev_get_drvdata(dev);
+ unsigned long ccr = 0;
+ long ppb_cor, off;
+
+ /*
+ * The maximum ppb_cor is -128 * 3815 .. 127 * 3815, but we
+ * need to clamp the input. This equates to -484270 .. 488558.
+ * Not only is this to stop out of range "off" but also to
+ * avoid the division by zero in armada38x_ppb_convert().
+ */
+ offset = clamp(offset, -484270L, 488558L);
+
+ ppb_cor = armada38x_ppb_convert(offset);
+
+ /*
+ * Use low update mode where possible, which gives a better
+ * resolution of correction.
+ */
+ off = DIV_ROUND_CLOSEST(ppb_cor, 954);
+ if (off > 127 || off < -128) {
+ ccr = RTC_CCR_MODE;
+ off = DIV_ROUND_CLOSEST(ppb_cor, 3815);
+ }
+
+ /*
+ * Armada 388 requires a bit pattern in bits 14..8 depending on
+ * the sign bit: { 0, ~S, S, S, S, S, S }
+ */
+ ccr |= (off & 0x3fff) ^ 0x2000;
+ rtc_delayed_write(ccr, rtc, RTC_CCR);
+
+ return 0;
+}
+
static const struct rtc_class_ops armada38x_rtc_ops = {
.read_time = armada38x_rtc_read_time,
.set_time = armada38x_rtc_set_time,
.read_alarm = armada38x_rtc_read_alarm,
.set_alarm = armada38x_rtc_set_alarm,
.alarm_irq_enable = armada38x_rtc_alarm_irq_enable,
+ .read_offset = armada38x_rtc_read_offset,
+ .set_offset = armada38x_rtc_set_offset,
};
static const struct rtc_class_ops armada38x_rtc_ops_noirq = {
.read_time = armada38x_rtc_read_time,
.set_time = armada38x_rtc_set_time,
.read_alarm = armada38x_rtc_read_alarm,
+ .read_offset = armada38x_rtc_read_offset,
+ .set_offset = armada38x_rtc_set_offset,
};
static const struct armada38x_rtc_data armada38x_data = {
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index e221b78b6f10..de81ecedd571 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -42,8 +42,6 @@
#define at91_rtc_write(field, val) \
writel_relaxed((val), at91_rtc_regs + field)
-#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */
-
struct at91_rtc_config {
bool use_shadow_imr;
};
@@ -51,7 +49,6 @@ struct at91_rtc_config {
static const struct at91_rtc_config *at91_rtc_config;
static DECLARE_COMPLETION(at91_rtc_updated);
static DECLARE_COMPLETION(at91_rtc_upd_rdy);
-static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
static void __iomem *at91_rtc_regs;
static int irq;
static DEFINE_SPINLOCK(at91_rtc_lock);
@@ -131,8 +128,7 @@ static void at91_rtc_decodetime(unsigned int timereg, unsigned int calreg,
/*
* The Calendar Alarm register does not have a field for
- * the year - so these will return an invalid value. When an
- * alarm is set, at91_alarm_year will store the current year.
+ * the year - so these will return an invalid value.
*/
tm->tm_year = bcd2bin(date & AT91_RTC_CENT) * 100; /* century */
tm->tm_year += bcd2bin((date & AT91_RTC_YEAR) >> 8); /* year */
@@ -208,15 +204,14 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
struct rtc_time *tm = &alrm->time;
at91_rtc_decodetime(AT91_RTC_TIMALR, AT91_RTC_CALALR, tm);
- tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
- tm->tm_year = at91_alarm_year - 1900;
+ tm->tm_year = -1;
alrm->enabled = (at91_rtc_read_imr() & AT91_RTC_ALARM)
? 1 : 0;
- dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
- 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
- tm->tm_hour, tm->tm_min, tm->tm_sec);
+ dev_dbg(dev, "%s(): %02d-%02d %02d:%02d:%02d %sabled\n", __func__,
+ tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec,
+ alrm->enabled ? "en" : "dis");
return 0;
}
@@ -230,8 +225,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
at91_rtc_decodetime(AT91_RTC_TIMR, AT91_RTC_CALR, &tm);
- at91_alarm_year = tm.tm_year;
-
tm.tm_mon = alrm->time.tm_mon;
tm.tm_mday = alrm->time.tm_mday;
tm.tm_hour = alrm->time.tm_hour;
@@ -255,7 +248,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
}
dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
- at91_alarm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
+ tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
tm.tm_min, tm.tm_sec);
return 0;
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 72b22935eb62..d8df2e9e14ad 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -514,56 +514,43 @@ static void msg_init(struct spi_message *m, struct spi_transfer *x,
spi_message_add_tail(x, m);
}
-static ssize_t
-ds1305_nvram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int ds1305_nvram_read(void *priv, unsigned int off, void *buf,
+ size_t count)
{
- struct spi_device *spi;
+ struct ds1305 *ds1305 = priv;
+ struct spi_device *spi = ds1305->spi;
u8 addr;
struct spi_message m;
struct spi_transfer x[2];
- int status;
-
- spi = to_spi_device(kobj_to_dev(kobj));
addr = DS1305_NVRAM + off;
msg_init(&m, x, &addr, count, NULL, buf);
- status = spi_sync(spi, &m);
- if (status < 0)
- dev_err(&spi->dev, "nvram %s error %d\n", "read", status);
- return (status < 0) ? status : count;
+ return spi_sync(spi, &m);
}
-static ssize_t
-ds1305_nvram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int ds1305_nvram_write(void *priv, unsigned int off, void *buf,
+ size_t count)
{
- struct spi_device *spi;
+ struct ds1305 *ds1305 = priv;
+ struct spi_device *spi = ds1305->spi;
u8 addr;
struct spi_message m;
struct spi_transfer x[2];
- int status;
-
- spi = to_spi_device(kobj_to_dev(kobj));
addr = (DS1305_WRITE | DS1305_NVRAM) + off;
msg_init(&m, x, &addr, count, buf, NULL);
- status = spi_sync(spi, &m);
- if (status < 0)
- dev_err(&spi->dev, "nvram %s error %d\n", "write", status);
- return (status < 0) ? status : count;
+ return spi_sync(spi, &m);
}
-static struct bin_attribute nvram = {
- .attr.name = "nvram",
- .attr.mode = S_IRUGO | S_IWUSR,
- .read = ds1305_nvram_read,
- .write = ds1305_nvram_write,
- .size = DS1305_NVRAM_LEN,
+static struct nvmem_config ds1305_nvmem_cfg = {
+ .name = "ds1305_nvram",
+ .word_size = 1,
+ .stride = 1,
+ .size = DS1305_NVRAM_LEN,
+ .reg_read = ds1305_nvram_read,
+ .reg_write = ds1305_nvram_write,
};
/*----------------------------------------------------------------------*/
@@ -708,10 +695,19 @@ static int ds1305_probe(struct spi_device *spi)
dev_dbg(&spi->dev, "AM/PM\n");
/* register RTC ... from here on, ds1305->ctrl needs locking */
- ds1305->rtc = devm_rtc_device_register(&spi->dev, "ds1305",
- &ds1305_ops, THIS_MODULE);
+ ds1305->rtc = devm_rtc_allocate_device(&spi->dev);
if (IS_ERR(ds1305->rtc)) {
- status = PTR_ERR(ds1305->rtc);
+ return PTR_ERR(ds1305->rtc);
+ }
+
+ ds1305->rtc->ops = &ds1305_ops;
+
+ ds1305_nvmem_cfg.priv = ds1305;
+ ds1305->rtc->nvmem_config = &ds1305_nvmem_cfg;
+ ds1305->rtc->nvram_old_abi = true;
+
+ status = rtc_register_device(ds1305->rtc);
+ if (status) {
dev_dbg(&spi->dev, "register rtc --> %d\n", status);
return status;
}
@@ -734,12 +730,6 @@ static int ds1305_probe(struct spi_device *spi)
}
}
- /* export NVRAM */
- status = sysfs_create_bin_file(&spi->dev.kobj, &nvram);
- if (status < 0) {
- dev_err(&spi->dev, "register nvram --> %d\n", status);
- }
-
return 0;
}
@@ -747,8 +737,6 @@ static int ds1305_remove(struct spi_device *spi)
{
struct ds1305 *ds1305 = spi_get_drvdata(spi);
- sysfs_remove_bin_file(&spi->dev.kobj, &nvram);
-
/* carefully shut down irq and workqueue, if present */
if (spi->irq) {
set_bit(FLAG_EXITING, &ds1305->flags);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index e7d9215c9201..923dde912f60 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -325,6 +325,10 @@ static const struct of_device_id ds1307_of_match[] = {
.compatible = "isil,isl12057",
.data = (void *)ds_1337
},
+ {
+ .compatible = "epson,rx8130",
+ .data = (void *)rx_8130
+ },
{ }
};
MODULE_DEVICE_TABLE(of, ds1307_of_match);
@@ -348,6 +352,7 @@ static const struct acpi_device_id ds1307_acpi_ids[] = {
{ .id = "PT7C4338", .driver_data = ds_1307 },
{ .id = "RX8025", .driver_data = rx_8025 },
{ .id = "ISL12057", .driver_data = ds_1337 },
+ { .id = "RX8130", .driver_data = rx_8130 },
{ }
};
MODULE_DEVICE_TABLE(acpi, ds1307_acpi_ids);
@@ -787,8 +792,6 @@ static int rx8130_alarm_irq_enable(struct device *dev, unsigned int enabled)
* Alarm support for mcp794xx devices.
*/
-#define MCP794XX_REG_WEEKDAY 0x3
-#define MCP794XX_REG_WEEKDAY_WDAY_MASK 0x7
#define MCP794XX_REG_CONTROL 0x07
# define MCP794XX_BIT_ALM0_EN 0x10
# define MCP794XX_BIT_ALM1_EN 0x20
@@ -877,15 +880,38 @@ static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t)
return 0;
}
+/*
+ * We may have a random RTC weekday, therefore calculate alarm weekday based
+ * on current weekday we read from the RTC timekeeping regs
+ */
+static int mcp794xx_alm_weekday(struct device *dev, struct rtc_time *tm_alarm)
+{
+ struct rtc_time tm_now;
+ int days_now, days_alarm, ret;
+
+ ret = ds1307_get_time(dev, &tm_now);
+ if (ret)
+ return ret;
+
+ days_now = div_s64(rtc_tm_to_time64(&tm_now), 24 * 60 * 60);
+ days_alarm = div_s64(rtc_tm_to_time64(tm_alarm), 24 * 60 * 60);
+
+ return (tm_now.tm_wday + days_alarm - days_now) % 7 + 1;
+}
+
static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct ds1307 *ds1307 = dev_get_drvdata(dev);
unsigned char regs[10];
- int ret;
+ int wday, ret;
if (!test_bit(HAS_ALARM, &ds1307->flags))
return -EINVAL;
+ wday = mcp794xx_alm_weekday(dev, &t->time);
+ if (wday < 0)
+ return wday;
+
dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d "
"enabled=%d pending=%d\n", __func__,
t->time.tm_sec, t->time.tm_min, t->time.tm_hour,
@@ -902,7 +928,7 @@ static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t)
regs[3] = bin2bcd(t->time.tm_sec);
regs[4] = bin2bcd(t->time.tm_min);
regs[5] = bin2bcd(t->time.tm_hour);
- regs[6] = bin2bcd(t->time.tm_wday + 1);
+ regs[6] = wday;
regs[7] = bin2bcd(t->time.tm_mday);
regs[8] = bin2bcd(t->time.tm_mon + 1);
@@ -1354,14 +1380,12 @@ static int ds1307_probe(struct i2c_client *client,
{
struct ds1307 *ds1307;
int err = -ENODEV;
- int tmp, wday;
+ int tmp;
const struct chip_desc *chip;
bool want_irq;
bool ds1307_can_wakeup_device = false;
unsigned char regs[8];
struct ds1307_platform_data *pdata = dev_get_platdata(&client->dev);
- struct rtc_time tm;
- unsigned long timestamp;
u8 trickle_charger_setup = 0;
ds1307 = devm_kzalloc(&client->dev, sizeof(struct ds1307), GFP_KERNEL);
@@ -1641,25 +1665,6 @@ read_rtc:
bin2bcd(tmp));
}
- /*
- * Some IPs have weekday reset value = 0x1 which might not correct
- * hence compute the wday using the current date/month/year values
- */
- ds1307_get_time(ds1307->dev, &tm);
- wday = tm.tm_wday;
- timestamp = rtc_tm_to_time64(&tm);
- rtc_time64_to_tm(timestamp, &tm);
-
- /*
- * Check if reset wday is different from the computed wday
- * If different then set the wday which we computed using
- * timestamp
- */
- if (wday != tm.tm_wday)
- regmap_update_bits(ds1307->regmap, MCP794XX_REG_WEEKDAY,
- MCP794XX_REG_WEEKDAY_WDAY_MASK,
- tm.tm_wday + 1);
-
if (want_irq || ds1307_can_wakeup_device) {
device_set_wakeup_capable(ds1307->dev, true);
set_bit(HAS_ALARM, &ds1307->flags);
diff --git a/drivers/rtc/rtc-ds1390.c b/drivers/rtc/rtc-ds1390.c
index aa0d2c6f1edc..4d5b007d7fc6 100644
--- a/drivers/rtc/rtc-ds1390.c
+++ b/drivers/rtc/rtc-ds1390.c
@@ -216,9 +216,16 @@ static int ds1390_probe(struct spi_device *spi)
return res;
}
+static const struct of_device_id ds1390_of_match[] = {
+ { .compatible = "dallas,ds1390" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ds1390_of_match);
+
static struct spi_driver ds1390_driver = {
.driver = {
.name = "rtc-ds1390",
+ .of_match_table = of_match_ptr(ds1390_of_match),
},
.probe = ds1390_probe,
};
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 1b2dcb58c0ab..1e95312a6f2e 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -398,42 +398,37 @@ static const struct rtc_class_ops ds1511_rtc_ops = {
.alarm_irq_enable = ds1511_rtc_alarm_irq_enable,
};
-static ssize_t
-ds1511_nvram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *ba,
- char *buf, loff_t pos, size_t size)
+static int ds1511_nvram_read(void *priv, unsigned int pos, void *buf,
+ size_t size)
{
- ssize_t count;
+ int i;
rtc_write(pos, DS1511_RAMADDR_LSB);
- for (count = 0; count < size; count++)
- *buf++ = rtc_read(DS1511_RAMDATA);
+ for (i = 0; i < size; i++)
+ *(char *)buf++ = rtc_read(DS1511_RAMDATA);
- return count;
+ return 0;
}
-static ssize_t
-ds1511_nvram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t size)
+static int ds1511_nvram_write(void *priv, unsigned int pos, void *buf,
+ size_t size)
{
- ssize_t count;
+ int i;
rtc_write(pos, DS1511_RAMADDR_LSB);
- for (count = 0; count < size; count++)
- rtc_write(*buf++, DS1511_RAMDATA);
+ for (i = 0; i < size; i++)
+ rtc_write(*(char *)buf++, DS1511_RAMDATA);
- return count;
+ return 0;
}
-static struct bin_attribute ds1511_nvram_attr = {
- .attr = {
- .name = "nvram",
- .mode = S_IRUGO | S_IWUSR,
- },
+static struct nvmem_config ds1511_nvmem_cfg = {
+ .name = "ds1511_nvram",
+ .word_size = 1,
+ .stride = 1,
.size = DS1511_RAM_MAX,
- .read = ds1511_nvram_read,
- .write = ds1511_nvram_write,
+ .reg_read = ds1511_nvram_read,
+ .reg_write = ds1511_nvram_write,
};
static int ds1511_rtc_probe(struct platform_device *pdev)
@@ -477,11 +472,20 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
spin_lock_init(&pdata->lock);
platform_set_drvdata(pdev, pdata);
- pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &ds1511_rtc_ops, THIS_MODULE);
+ pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(pdata->rtc))
return PTR_ERR(pdata->rtc);
+ pdata->rtc->ops = &ds1511_rtc_ops;
+
+ ds1511_nvmem_cfg.priv = &pdev->dev;
+ pdata->rtc->nvmem_config = &ds1511_nvmem_cfg;
+ pdata->rtc->nvram_old_abi = true;
+
+ ret = rtc_register_device(pdata->rtc);
+ if (ret)
+ return ret;
+
/*
* if the platform has an interrupt in mind for this device,
* then by all means, set it
@@ -496,26 +500,6 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
}
}
- ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
- if (ret)
- dev_err(&pdev->dev, "Unable to create sysfs entry: %s\n",
- ds1511_nvram_attr.attr.name);
-
- return 0;
-}
-
-static int ds1511_rtc_remove(struct platform_device *pdev)
-{
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-
- sysfs_remove_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
- if (pdata->irq > 0) {
- /*
- * disable the alarm interrupt
- */
- rtc_write(rtc_read(RTC_CMD) & ~RTC_TIE, RTC_CMD);
- rtc_read(RTC_CMD1);
- }
return 0;
}
@@ -524,7 +508,6 @@ MODULE_ALIAS("platform:ds1511");
static struct platform_driver ds1511_rtc_driver = {
.probe = ds1511_rtc_probe,
- .remove = ds1511_rtc_remove,
.driver = {
.name = "ds1511",
},
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 64989afffa3d..ff65a7d2b9c9 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -82,7 +82,7 @@ static inline uint32_t jz4740_rtc_reg_read(struct jz4740_rtc *rtc, size_t reg)
static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc)
{
uint32_t ctrl;
- int timeout = 1000;
+ int timeout = 10000;
do {
ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL);
@@ -94,7 +94,7 @@ static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc)
static inline int jz4780_rtc_enable_write(struct jz4740_rtc *rtc)
{
uint32_t ctrl;
- int ret, timeout = 1000;
+ int ret, timeout = 10000;
ret = jz4740_rtc_wait_write_ready(rtc);
if (ret != 0)
@@ -368,7 +368,7 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SCRATCHPAD, 0x12345678);
ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, 0);
if (ret) {
- dev_err(&pdev->dev, "Could not write write to RTC registers\n");
+ dev_err(&pdev->dev, "Could not write to RTC registers\n");
return ret;
}
}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index f4c070ea8384..c90fba3ed861 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -154,6 +154,8 @@ struct m41t80_data {
struct rtc_device *rtc;
#ifdef CONFIG_COMMON_CLK
struct clk_hw sqw;
+ unsigned long freq;
+ unsigned int sqwe;
#endif
};
@@ -443,43 +445,40 @@ static SIMPLE_DEV_PM_OPS(m41t80_pm, m41t80_suspend, m41t80_resume);
#ifdef CONFIG_COMMON_CLK
#define sqw_to_m41t80_data(_hw) container_of(_hw, struct m41t80_data, sqw)
-static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static unsigned long m41t80_decode_freq(int setting)
+{
+ return (setting == 0) ? 0 : (setting == 1) ? M41T80_SQW_MAX_FREQ :
+ M41T80_SQW_MAX_FREQ >> setting;
+}
+
+static unsigned long m41t80_get_freq(struct m41t80_data *m41t80)
{
- struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
struct i2c_client *client = m41t80->client;
int reg_sqw = (m41t80->features & M41T80_FEATURE_SQ_ALT) ?
M41T80_REG_WDAY : M41T80_REG_SQW;
int ret = i2c_smbus_read_byte_data(client, reg_sqw);
- unsigned long val = M41T80_SQW_MAX_FREQ;
if (ret < 0)
return 0;
+ return m41t80_decode_freq(ret >> 4);
+}
- ret >>= 4;
- if (ret == 0)
- val = 0;
- else if (ret > 1)
- val = val / (1 << ret);
-
- return val;
+static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return sqw_to_m41t80_data(hw)->freq;
}
static long m41t80_sqw_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
- int i, freq = M41T80_SQW_MAX_FREQ;
-
- if (freq <= rate)
- return freq;
-
- for (i = 2; i <= ilog2(M41T80_SQW_MAX_FREQ); i++) {
- freq /= 1 << i;
- if (freq <= rate)
- return freq;
- }
-
- return 0;
+ if (rate >= M41T80_SQW_MAX_FREQ)
+ return M41T80_SQW_MAX_FREQ;
+ if (rate >= M41T80_SQW_MAX_FREQ / 4)
+ return M41T80_SQW_MAX_FREQ / 4;
+ if (!rate)
+ return 0;
+ return 1 << ilog2(rate);
}
static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -491,17 +490,12 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
M41T80_REG_WDAY : M41T80_REG_SQW;
int reg, ret, val = 0;
- if (rate) {
- if (!is_power_of_2(rate))
- return -EINVAL;
- val = ilog2(rate);
- if (val == ilog2(M41T80_SQW_MAX_FREQ))
- val = 1;
- else if (val < (ilog2(M41T80_SQW_MAX_FREQ) - 1))
- val = ilog2(M41T80_SQW_MAX_FREQ) - val;
- else
- return -EINVAL;
- }
+ if (rate >= M41T80_SQW_MAX_FREQ)
+ val = 1;
+ else if (rate >= M41T80_SQW_MAX_FREQ / 4)
+ val = 2;
+ else if (rate)
+ val = 15 - ilog2(rate);
reg = i2c_smbus_read_byte_data(client, reg_sqw);
if (reg < 0)
@@ -510,10 +504,9 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
reg = (reg & 0x0f) | (val << 4);
ret = i2c_smbus_write_byte_data(client, reg_sqw, reg);
- if (ret < 0)
- return ret;
-
- return -EINVAL;
+ if (!ret)
+ m41t80->freq = m41t80_decode_freq(val);
+ return ret;
}
static int m41t80_sqw_control(struct clk_hw *hw, bool enable)
@@ -530,7 +523,10 @@ static int m41t80_sqw_control(struct clk_hw *hw, bool enable)
else
ret &= ~M41T80_ALMON_SQWE;
- return i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret);
+ ret = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret);
+ if (!ret)
+ m41t80->sqwe = enable;
+ return ret;
}
static int m41t80_sqw_prepare(struct clk_hw *hw)
@@ -545,14 +541,7 @@ static void m41t80_sqw_unprepare(struct clk_hw *hw)
static int m41t80_sqw_is_prepared(struct clk_hw *hw)
{
- struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw);
- struct i2c_client *client = m41t80->client;
- int ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
-
- if (ret < 0)
- return ret;
-
- return !!(ret & M41T80_ALMON_SQWE);
+ return sqw_to_m41t80_data(hw)->sqwe;
}
static const struct clk_ops m41t80_sqw_ops = {
@@ -587,6 +576,7 @@ static struct clk *m41t80_sqw_register_clk(struct m41t80_data *m41t80)
init.parent_names = NULL;
init.num_parents = 0;
m41t80->sqw.init = &init;
+ m41t80->freq = m41t80_get_freq(m41t80);
/* optional override of the clockname */
of_property_read_string(node, "clock-output-names", &init.name);
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index 02af045305dd..d9aea9b6d9cd 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -163,35 +163,30 @@ static const struct rtc_class_ops m48t86_rtc_ops = {
.proc = m48t86_rtc_proc,
};
-static ssize_t m48t86_nvram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int m48t86_nvram_read(void *priv, unsigned int off, void *buf,
+ size_t count)
{
- struct device *dev = kobj_to_dev(kobj);
+ struct device *dev = priv;
unsigned int i;
for (i = 0; i < count; i++)
- buf[i] = m48t86_readb(dev, M48T86_NVRAM(off + i));
+ ((u8 *)buf)[i] = m48t86_readb(dev, M48T86_NVRAM(off + i));
- return count;
+ return 0;
}
-static ssize_t m48t86_nvram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static int m48t86_nvram_write(void *priv, unsigned int off, void *buf,
+ size_t count)
{
- struct device *dev = kobj_to_dev(kobj);
+ struct device *dev = priv;
unsigned int i;
for (i = 0; i < count; i++)
- m48t86_writeb(dev, buf[i], M48T86_NVRAM(off + i));
+ m48t86_writeb(dev, ((u8 *)buf)[i], M48T86_NVRAM(off + i));
- return count;
+ return 0;
}
-static BIN_ATTR(nvram, 0644, m48t86_nvram_read, m48t86_nvram_write,
- M48T86_NVRAM_LEN);
-
/*
* The RTC is an optional feature at purchase time on some Technologic Systems
* boards. Verify that it actually exists by checking if the last two bytes
@@ -223,11 +218,21 @@ static bool m48t86_verify_chip(struct platform_device *pdev)
return false;
}
+static struct nvmem_config m48t86_nvmem_cfg = {
+ .name = "m48t86_nvram",
+ .word_size = 1,
+ .stride = 1,
+ .size = M48T86_NVRAM_LEN,
+ .reg_read = m48t86_nvram_read,
+ .reg_write = m48t86_nvram_write,
+};
+
static int m48t86_rtc_probe(struct platform_device *pdev)
{
struct m48t86_rtc_info *info;
struct resource *res;
unsigned char reg;
+ int err;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -254,25 +259,25 @@ static int m48t86_rtc_probe(struct platform_device *pdev)
return -ENODEV;
}
- info->rtc = devm_rtc_device_register(&pdev->dev, "m48t86",
- &m48t86_rtc_ops, THIS_MODULE);
+ info->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(info->rtc))
return PTR_ERR(info->rtc);
+ info->rtc->ops = &m48t86_rtc_ops;
+
+ m48t86_nvmem_cfg.priv = &pdev->dev;
+ info->rtc->nvmem_config = &m48t86_nvmem_cfg;
+ info->rtc->nvram_old_abi = true;
+
+ err = rtc_register_device(info->rtc);
+ if (err)
+ return err;
+
/* read battery status */
reg = m48t86_readb(&pdev->dev, M48T86_D);
dev_info(&pdev->dev, "battery %s\n",
(reg & M48T86_D_VRT) ? "ok" : "exhausted");
- if (device_create_bin_file(&pdev->dev, &bin_attr_nvram))
- dev_err(&pdev->dev, "failed to create nvram sysfs entry\n");
-
- return 0;
-}
-
-static int m48t86_rtc_remove(struct platform_device *pdev)
-{
- device_remove_bin_file(&pdev->dev, &bin_attr_nvram);
return 0;
}
@@ -281,7 +286,6 @@ static struct platform_driver m48t86_rtc_platform_driver = {
.name = "rtc-m48t86",
},
.probe = m48t86_rtc_probe,
- .remove = m48t86_rtc_remove,
};
module_platform_driver(m48t86_rtc_platform_driver);
diff --git a/drivers/rtc/rtc-mt7622.c b/drivers/rtc/rtc-mt7622.c
new file mode 100644
index 000000000000..d79b9ae4d237
--- /dev/null
+++ b/drivers/rtc/rtc-mt7622.c
@@ -0,0 +1,422 @@
+/*
+ * Driver for MediaTek SoC based RTC
+ *
+ * Copyright (C) 2017 Sean Wang <sean.wang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+#define MTK_RTC_DEV KBUILD_MODNAME
+
+#define MTK_RTC_PWRCHK1 0x4
+#define RTC_PWRCHK1_MAGIC 0xc6
+
+#define MTK_RTC_PWRCHK2 0x8
+#define RTC_PWRCHK2_MAGIC 0x9a
+
+#define MTK_RTC_KEY 0xc
+#define RTC_KEY_MAGIC 0x59
+
+#define MTK_RTC_PROT1 0x10
+#define RTC_PROT1_MAGIC 0xa3
+
+#define MTK_RTC_PROT2 0x14
+#define RTC_PROT2_MAGIC 0x57
+
+#define MTK_RTC_PROT3 0x18
+#define RTC_PROT3_MAGIC 0x67
+
+#define MTK_RTC_PROT4 0x1c
+#define RTC_PROT4_MAGIC 0xd2
+
+#define MTK_RTC_CTL 0x20
+#define RTC_RC_STOP BIT(0)
+
+#define MTK_RTC_DEBNCE 0x2c
+#define RTC_DEBNCE_MASK GENMASK(2, 0)
+
+#define MTK_RTC_INT 0x30
+#define RTC_INT_AL_STA BIT(4)
+
+/*
+ * Ranges from 0x40 to 0x78 provide RTC time setup for year, month,
+ * day of month, day of week, hour, minute and second.
+ */
+#define MTK_RTC_TREG(_t, _f) (0x40 + (0x4 * (_f)) + ((_t) * 0x20))
+
+#define MTK_RTC_AL_CTL 0x7c
+#define RTC_AL_EN BIT(0)
+#define RTC_AL_ALL GENMASK(7, 0)
+
+/*
+ * The offset is used in the translation for the year between in struct
+ * rtc_time and in hardware register MTK_RTC_TREG(x,MTK_YEA)
+ */
+#define MTK_RTC_TM_YR_OFFSET 100
+
+/*
+ * The lowest value for the valid tm_year. RTC hardware would take incorrectly
+ * tm_year 100 as not a leap year and thus it is also required being excluded
+ * from the valid options.
+ */
+#define MTK_RTC_TM_YR_L (MTK_RTC_TM_YR_OFFSET + 1)
+
+/*
+ * The most year the RTC can hold is 99 and the next to 99 in year register
+ * would be wraparound to 0, for MT7622.
+ */
+#define MTK_RTC_HW_YR_LIMIT 99
+
+/* The highest value for the valid tm_year */
+#define MTK_RTC_TM_YR_H (MTK_RTC_TM_YR_OFFSET + MTK_RTC_HW_YR_LIMIT)
+
+/* Simple macro helps to check whether the hardware supports the tm_year */
+#define MTK_RTC_TM_YR_VALID(_y) ((_y) >= MTK_RTC_TM_YR_L && \
+ (_y) <= MTK_RTC_TM_YR_H)
+
+/* Types of the function the RTC provides are time counter and alarm. */
+enum {
+ MTK_TC,
+ MTK_AL,
+};
+
+/* Indexes are used for the pointer to relevant registers in MTK_RTC_TREG */
+enum {
+ MTK_YEA,
+ MTK_MON,
+ MTK_DOM,
+ MTK_DOW,
+ MTK_HOU,
+ MTK_MIN,
+ MTK_SEC
+};
+
+struct mtk_rtc {
+ struct rtc_device *rtc;
+ void __iomem *base;
+ int irq;
+ struct clk *clk;
+};
+
+static void mtk_w32(struct mtk_rtc *rtc, u32 reg, u32 val)
+{
+ writel_relaxed(val, rtc->base + reg);
+}
+
+static u32 mtk_r32(struct mtk_rtc *rtc, u32 reg)
+{
+ return readl_relaxed(rtc->base + reg);
+}
+
+static void mtk_rmw(struct mtk_rtc *rtc, u32 reg, u32 mask, u32 set)
+{
+ u32 val;
+
+ val = mtk_r32(rtc, reg);
+ val &= ~mask;
+ val |= set;
+ mtk_w32(rtc, reg, val);
+}
+
+static void mtk_set(struct mtk_rtc *rtc, u32 reg, u32 val)
+{
+ mtk_rmw(rtc, reg, 0, val);
+}
+
+static void mtk_clr(struct mtk_rtc *rtc, u32 reg, u32 val)
+{
+ mtk_rmw(rtc, reg, val, 0);
+}
+
+static void mtk_rtc_hw_init(struct mtk_rtc *hw)
+{
+ /* The setup of the init sequence is for allowing RTC got to work */
+ mtk_w32(hw, MTK_RTC_PWRCHK1, RTC_PWRCHK1_MAGIC);
+ mtk_w32(hw, MTK_RTC_PWRCHK2, RTC_PWRCHK2_MAGIC);
+ mtk_w32(hw, MTK_RTC_KEY, RTC_KEY_MAGIC);
+ mtk_w32(hw, MTK_RTC_PROT1, RTC_PROT1_MAGIC);
+ mtk_w32(hw, MTK_RTC_PROT2, RTC_PROT2_MAGIC);
+ mtk_w32(hw, MTK_RTC_PROT3, RTC_PROT3_MAGIC);
+ mtk_w32(hw, MTK_RTC_PROT4, RTC_PROT4_MAGIC);
+ mtk_rmw(hw, MTK_RTC_DEBNCE, RTC_DEBNCE_MASK, 0);
+ mtk_clr(hw, MTK_RTC_CTL, RTC_RC_STOP);
+}
+
+static void mtk_rtc_get_alarm_or_time(struct mtk_rtc *hw, struct rtc_time *tm,
+ int time_alarm)
+{
+ u32 year, mon, mday, wday, hour, min, sec;
+
+ /*
+ * Read again until the field of the second is not changed which
+ * ensures all fields in the consistent state. Note that MTK_SEC must
+ * be read first. In this way, it guarantees the others remain not
+ * changed when the results for two MTK_SEC consecutive reads are same.
+ */
+ do {
+ sec = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_SEC));
+ min = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_MIN));
+ hour = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_HOU));
+ wday = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_DOW));
+ mday = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_DOM));
+ mon = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_MON));
+ year = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_YEA));
+ } while (sec != mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_SEC)));
+
+ tm->tm_sec = sec;
+ tm->tm_min = min;
+ tm->tm_hour = hour;
+ tm->tm_wday = wday;
+ tm->tm_mday = mday;
+ tm->tm_mon = mon - 1;
+
+ /* Rebase to the absolute year which userspace queries */
+ tm->tm_year = year + MTK_RTC_TM_YR_OFFSET;
+}
+
+static void mtk_rtc_set_alarm_or_time(struct mtk_rtc *hw, struct rtc_time *tm,
+ int time_alarm)
+{
+ u32 year;
+
+ /* Rebase to the relative year which RTC hardware requires */
+ year = tm->tm_year - MTK_RTC_TM_YR_OFFSET;
+
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_YEA), year);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_MON), tm->tm_mon + 1);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_DOW), tm->tm_wday);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_DOM), tm->tm_mday);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_HOU), tm->tm_hour);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_MIN), tm->tm_min);
+ mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_SEC), tm->tm_sec);
+}
+
+static irqreturn_t mtk_rtc_alarmirq(int irq, void *id)
+{
+ struct mtk_rtc *hw = (struct mtk_rtc *)id;
+ u32 irq_sta;
+
+ irq_sta = mtk_r32(hw, MTK_RTC_INT);
+ if (irq_sta & RTC_INT_AL_STA) {
+ /* Stop alarm also implicitly disables the alarm interrupt */
+ mtk_w32(hw, MTK_RTC_AL_CTL, 0);
+ rtc_update_irq(hw->rtc, 1, RTC_IRQF | RTC_AF);
+
+ /* Ack alarm interrupt status */
+ mtk_w32(hw, MTK_RTC_INT, RTC_INT_AL_STA);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int mtk_rtc_gettime(struct device *dev, struct rtc_time *tm)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+ mtk_rtc_get_alarm_or_time(hw, tm, MTK_TC);
+
+ return rtc_valid_tm(tm);
+}
+
+static int mtk_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+ if (!MTK_RTC_TM_YR_VALID(tm->tm_year))
+ return -EINVAL;
+
+ /* Stop time counter before setting a new one*/
+ mtk_set(hw, MTK_RTC_CTL, RTC_RC_STOP);
+
+ mtk_rtc_set_alarm_or_time(hw, tm, MTK_TC);
+
+ /* Restart the time counter */
+ mtk_clr(hw, MTK_RTC_CTL, RTC_RC_STOP);
+
+ return 0;
+}
+
+static int mtk_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+ struct rtc_time *alrm_tm = &wkalrm->time;
+
+ mtk_rtc_get_alarm_or_time(hw, alrm_tm, MTK_AL);
+
+ wkalrm->enabled = !!(mtk_r32(hw, MTK_RTC_AL_CTL) & RTC_AL_EN);
+ wkalrm->pending = !!(mtk_r32(hw, MTK_RTC_INT) & RTC_INT_AL_STA);
+
+ return 0;
+}
+
+static int mtk_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+ struct rtc_time *alrm_tm = &wkalrm->time;
+
+ if (!MTK_RTC_TM_YR_VALID(alrm_tm->tm_year))
+ return -EINVAL;
+
+ /*
+ * Stop the alarm also implicitly including disables interrupt before
+ * setting a new one.
+ */
+ mtk_clr(hw, MTK_RTC_AL_CTL, RTC_AL_EN);
+
+ /*
+ * Avoid contention between mtk_rtc_setalarm and IRQ handler so that
+ * disabling the interrupt and awaiting for pending IRQ handler to
+ * complete.
+ */
+ synchronize_irq(hw->irq);
+
+ mtk_rtc_set_alarm_or_time(hw, alrm_tm, MTK_AL);
+
+ /* Restart the alarm with the new setup */
+ mtk_w32(hw, MTK_RTC_AL_CTL, RTC_AL_ALL);
+
+ return 0;
+}
+
+static const struct rtc_class_ops mtk_rtc_ops = {
+ .read_time = mtk_rtc_gettime,
+ .set_time = mtk_rtc_settime,
+ .read_alarm = mtk_rtc_getalarm,
+ .set_alarm = mtk_rtc_setalarm,
+};
+
+static const struct of_device_id mtk_rtc_match[] = {
+ { .compatible = "mediatek,mt7622-rtc" },
+ { .compatible = "mediatek,soc-rtc" },
+ {},
+};
+
+static int mtk_rtc_probe(struct platform_device *pdev)
+{
+ struct mtk_rtc *hw;
+ struct resource *res;
+ int ret;
+
+ hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, hw);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hw->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->base))
+ return PTR_ERR(hw->base);
+
+ hw->clk = devm_clk_get(&pdev->dev, "rtc");
+ if (IS_ERR(hw->clk)) {
+ dev_err(&pdev->dev, "No clock\n");
+ return PTR_ERR(hw->clk);
+ }
+
+ ret = clk_prepare_enable(hw->clk);
+ if (ret)
+ return ret;
+
+ hw->irq = platform_get_irq(pdev, 0);
+ if (hw->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ ret = hw->irq;
+ goto err;
+ }
+
+ ret = devm_request_irq(&pdev->dev, hw->irq, mtk_rtc_alarmirq,
+ 0, dev_name(&pdev->dev), hw);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't request IRQ\n");
+ goto err;
+ }
+
+ mtk_rtc_hw_init(hw);
+
+ device_init_wakeup(&pdev->dev, true);
+
+ hw->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &mtk_rtc_ops, THIS_MODULE);
+ if (IS_ERR(hw->rtc)) {
+ ret = PTR_ERR(hw->rtc);
+ dev_err(&pdev->dev, "Unable to register device\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ clk_disable_unprepare(hw->clk);
+
+ return ret;
+}
+
+static int mtk_rtc_remove(struct platform_device *pdev)
+{
+ struct mtk_rtc *hw = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(hw->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_rtc_suspend(struct device *dev)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(hw->irq);
+
+ return 0;
+}
+
+static int mtk_rtc_resume(struct device *dev)
+{
+ struct mtk_rtc *hw = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(hw->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtk_rtc_pm_ops, mtk_rtc_suspend, mtk_rtc_resume);
+
+#define MTK_RTC_PM_OPS (&mtk_rtc_pm_ops)
+#else /* CONFIG_PM */
+#define MTK_RTC_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver mtk_rtc_driver = {
+ .probe = mtk_rtc_probe,
+ .remove = mtk_rtc_remove,
+ .driver = {
+ .name = MTK_RTC_DEV,
+ .of_match_table = mtk_rtc_match,
+ .pm = MTK_RTC_PM_OPS,
+ },
+};
+
+module_platform_driver(mtk_rtc_driver);
+
+MODULE_DESCRIPTION("MediaTek SoC based RTC Driver");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 13f7cd11c07e..1d666ac9ef70 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -70,6 +70,10 @@
#define OMAP_RTC_COMP_MSB_REG 0x50
#define OMAP_RTC_OSC_REG 0x54
+#define OMAP_RTC_SCRATCH0_REG 0x60
+#define OMAP_RTC_SCRATCH1_REG 0x64
+#define OMAP_RTC_SCRATCH2_REG 0x68
+
#define OMAP_RTC_KICK0_REG 0x6c
#define OMAP_RTC_KICK1_REG 0x70
@@ -667,6 +671,45 @@ static struct pinctrl_desc rtc_pinctrl_desc = {
.owner = THIS_MODULE,
};
+static int omap_rtc_scratch_read(void *priv, unsigned int offset, void *_val,
+ size_t bytes)
+{
+ struct omap_rtc *rtc = priv;
+ u32 *val = _val;
+ int i;
+
+ for (i = 0; i < bytes / 4; i++)
+ val[i] = rtc_readl(rtc,
+ OMAP_RTC_SCRATCH0_REG + offset + (i * 4));
+
+ return 0;
+}
+
+static int omap_rtc_scratch_write(void *priv, unsigned int offset, void *_val,
+ size_t bytes)
+{
+ struct omap_rtc *rtc = priv;
+ u32 *val = _val;
+ int i;
+
+ rtc->type->unlock(rtc);
+ for (i = 0; i < bytes / 4; i++)
+ rtc_writel(rtc,
+ OMAP_RTC_SCRATCH0_REG + offset + (i * 4), val[i]);
+ rtc->type->lock(rtc);
+
+ return 0;
+}
+
+static struct nvmem_config omap_rtc_nvmem_config = {
+ .name = "omap_rtc_scratch",
+ .word_size = 4,
+ .stride = 4,
+ .size = OMAP_RTC_KICK0_REG - OMAP_RTC_SCRATCH0_REG,
+ .reg_read = omap_rtc_scratch_read,
+ .reg_write = omap_rtc_scratch_write,
+};
+
static int omap_rtc_probe(struct platform_device *pdev)
{
struct omap_rtc *rtc;
@@ -797,13 +840,16 @@ static int omap_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, true);
- rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &omap_rtc_ops, THIS_MODULE);
+ rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(rtc->rtc)) {
ret = PTR_ERR(rtc->rtc);
goto err;
}
+ rtc->rtc->ops = &omap_rtc_ops;
+ omap_rtc_nvmem_config.priv = rtc;
+ rtc->rtc->nvmem_config = &omap_rtc_nvmem_config;
+
/* handle periodic and alarm irqs */
ret = devm_request_irq(&pdev->dev, rtc->irq_timer, rtc_irq, 0,
dev_name(&rtc->rtc->dev), rtc);
@@ -830,9 +876,14 @@ static int omap_rtc_probe(struct platform_device *pdev)
rtc->pctldev = pinctrl_register(&rtc_pinctrl_desc, &pdev->dev, rtc);
if (IS_ERR(rtc->pctldev)) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
- return PTR_ERR(rtc->pctldev);
+ ret = PTR_ERR(rtc->pctldev);
+ goto err;
}
+ ret = rtc_register_device(rtc->rtc);
+ if (ret)
+ goto err;
+
return 0;
err:
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 28c48b3c1946..c312af0db729 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -35,6 +35,9 @@
#define REG_MONTHS 0x08
#define REG_YEARS 0x09
+#define REG_OFFSET 0x0e
+#define REG_OFFSET_MODE BIT(7)
+
struct pcf8523 {
struct rtc_device *rtc;
};
@@ -272,10 +275,47 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd,
#define pcf8523_rtc_ioctl NULL
#endif
+static int pcf8523_rtc_read_offset(struct device *dev, long *offset)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int err;
+ u8 value;
+ s8 val;
+
+ err = pcf8523_read(client, REG_OFFSET, &value);
+ if (err < 0)
+ return err;
+
+ /* sign extend the 7-bit offset value */
+ val = value << 1;
+ *offset = (value & REG_OFFSET_MODE ? 4069 : 4340) * (val >> 1);
+
+ return 0;
+}
+
+static int pcf8523_rtc_set_offset(struct device *dev, long offset)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ long reg_m0, reg_m1;
+ u8 value;
+
+ reg_m0 = clamp(DIV_ROUND_CLOSEST(offset, 4340), -64L, 63L);
+ reg_m1 = clamp(DIV_ROUND_CLOSEST(offset, 4069), -64L, 63L);
+
+ if (abs(reg_m0 * 4340 - offset) < abs(reg_m1 * 4069 - offset))
+ value = reg_m0 & 0x7f;
+ else
+ value = (reg_m1 & 0x7f) | REG_OFFSET_MODE;
+
+ return pcf8523_write(client, REG_OFFSET, value);
+}
+
static const struct rtc_class_ops pcf8523_rtc_ops = {
.read_time = pcf8523_rtc_read_time,
.set_time = pcf8523_rtc_set_time,
.ioctl = pcf8523_rtc_ioctl,
+ .read_offset = pcf8523_rtc_read_offset,
+ .set_offset = pcf8523_rtc_set_offset,
};
static int pcf8523_probe(struct i2c_client *client,
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
new file mode 100644
index 000000000000..ea04e9f0930b
--- /dev/null
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -0,0 +1,220 @@
+/*
+ * drivers/rtc/rtc-pcf85363.c
+ *
+ * Driver for NXP PCF85363 real-time clock.
+ *
+ * Copyright (C) 2017 Eric Nelson
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based loosely on rtc-8583 by Russell King, Wolfram Sang and Juergen Beisert
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/rtc.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/bcd.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+/*
+ * Date/Time registers
+ */
+#define DT_100THS 0x00
+#define DT_SECS 0x01
+#define DT_MINUTES 0x02
+#define DT_HOURS 0x03
+#define DT_DAYS 0x04
+#define DT_WEEKDAYS 0x05
+#define DT_MONTHS 0x06
+#define DT_YEARS 0x07
+
+/*
+ * Alarm registers
+ */
+#define DT_SECOND_ALM1 0x08
+#define DT_MINUTE_ALM1 0x09
+#define DT_HOUR_ALM1 0x0a
+#define DT_DAY_ALM1 0x0b
+#define DT_MONTH_ALM1 0x0c
+#define DT_MINUTE_ALM2 0x0d
+#define DT_HOUR_ALM2 0x0e
+#define DT_WEEKDAY_ALM2 0x0f
+#define DT_ALARM_EN 0x10
+
+/*
+ * Time stamp registers
+ */
+#define DT_TIMESTAMP1 0x11
+#define DT_TIMESTAMP2 0x17
+#define DT_TIMESTAMP3 0x1d
+#define DT_TS_MODE 0x23
+
+/*
+ * control registers
+ */
+#define CTRL_OFFSET 0x24
+#define CTRL_OSCILLATOR 0x25
+#define CTRL_BATTERY 0x26
+#define CTRL_PIN_IO 0x27
+#define CTRL_FUNCTION 0x28
+#define CTRL_INTA_EN 0x29
+#define CTRL_INTB_EN 0x2a
+#define CTRL_FLAGS 0x2b
+#define CTRL_RAMBYTE 0x2c
+#define CTRL_WDOG 0x2d
+#define CTRL_STOP_EN 0x2e
+#define CTRL_RESETS 0x2f
+#define CTRL_RAM 0x40
+
+#define NVRAM_SIZE 0x40
+
+static struct i2c_driver pcf85363_driver;
+
+struct pcf85363 {
+ struct device *dev;
+ struct rtc_device *rtc;
+ struct nvmem_config nvmem_cfg;
+ struct regmap *regmap;
+};
+
+static int pcf85363_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct pcf85363 *pcf85363 = dev_get_drvdata(dev);
+ unsigned char buf[DT_YEARS + 1];
+ int ret, len = sizeof(buf);
+
+ /* read the RTC date and time registers all at once */
+ ret = regmap_bulk_read(pcf85363->regmap, DT_100THS, buf, len);
+ if (ret) {
+ dev_err(dev, "%s: error %d\n", __func__, ret);
+ return ret;
+ }
+
+ tm->tm_year = bcd2bin(buf[DT_YEARS]);
+ /* adjust for 1900 base of rtc_time */
+ tm->tm_year += 100;
+
+ tm->tm_wday = buf[DT_WEEKDAYS] & 7;
+ buf[DT_SECS] &= 0x7F;
+ tm->tm_sec = bcd2bin(buf[DT_SECS]);
+ buf[DT_MINUTES] &= 0x7F;
+ tm->tm_min = bcd2bin(buf[DT_MINUTES]);
+ tm->tm_hour = bcd2bin(buf[DT_HOURS]);
+ tm->tm_mday = bcd2bin(buf[DT_DAYS]);
+ tm->tm_mon = bcd2bin(buf[DT_MONTHS]) - 1;
+
+ return 0;
+}
+
+static int pcf85363_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct pcf85363 *pcf85363 = dev_get_drvdata(dev);
+ unsigned char buf[DT_YEARS + 1];
+ int len = sizeof(buf);
+
+ buf[DT_100THS] = 0;
+ buf[DT_SECS] = bin2bcd(tm->tm_sec);
+ buf[DT_MINUTES] = bin2bcd(tm->tm_min);
+ buf[DT_HOURS] = bin2bcd(tm->tm_hour);
+ buf[DT_DAYS] = bin2bcd(tm->tm_mday);
+ buf[DT_WEEKDAYS] = tm->tm_wday;
+ buf[DT_MONTHS] = bin2bcd(tm->tm_mon + 1);
+ buf[DT_YEARS] = bin2bcd(tm->tm_year % 100);
+
+ return regmap_bulk_write(pcf85363->regmap, DT_100THS,
+ buf, len);
+}
+
+static const struct rtc_class_ops rtc_ops = {
+ .read_time = pcf85363_rtc_read_time,
+ .set_time = pcf85363_rtc_set_time,
+};
+
+static int pcf85363_nvram_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct pcf85363 *pcf85363 = priv;
+
+ return regmap_bulk_read(pcf85363->regmap, CTRL_RAM + offset,
+ val, bytes);
+}
+
+static int pcf85363_nvram_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct pcf85363 *pcf85363 = priv;
+
+ return regmap_bulk_write(pcf85363->regmap, CTRL_RAM + offset,
+ val, bytes);
+}
+
+static const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int pcf85363_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pcf85363 *pcf85363;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ pcf85363 = devm_kzalloc(&client->dev, sizeof(struct pcf85363),
+ GFP_KERNEL);
+ if (!pcf85363)
+ return -ENOMEM;
+
+ pcf85363->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ if (IS_ERR(pcf85363->regmap)) {
+ dev_err(&client->dev, "regmap allocation failed\n");
+ return PTR_ERR(pcf85363->regmap);
+ }
+
+ pcf85363->dev = &client->dev;
+ i2c_set_clientdata(client, pcf85363);
+
+ pcf85363->rtc = devm_rtc_allocate_device(pcf85363->dev);
+ if (IS_ERR(pcf85363->rtc))
+ return PTR_ERR(pcf85363->rtc);
+
+ pcf85363->nvmem_cfg.name = "pcf85363-";
+ pcf85363->nvmem_cfg.word_size = 1;
+ pcf85363->nvmem_cfg.stride = 1;
+ pcf85363->nvmem_cfg.size = NVRAM_SIZE;
+ pcf85363->nvmem_cfg.reg_read = pcf85363_nvram_read;
+ pcf85363->nvmem_cfg.reg_write = pcf85363_nvram_write;
+ pcf85363->nvmem_cfg.priv = pcf85363;
+ pcf85363->rtc->nvmem_config = &pcf85363->nvmem_cfg;
+ pcf85363->rtc->ops = &rtc_ops;
+
+ return rtc_register_device(pcf85363->rtc);
+}
+
+static const struct of_device_id dev_ids[] = {
+ { .compatible = "nxp,pcf85363" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dev_ids);
+
+static struct i2c_driver pcf85363_driver = {
+ .driver = {
+ .name = "pcf85363",
+ .of_match_table = of_match_ptr(dev_ids),
+ },
+ .probe = pcf85363_probe,
+};
+
+module_i2c_driver(pcf85363_driver);
+
+MODULE_AUTHOR("Eric Nelson");
+MODULE_DESCRIPTION("pcf85363 I2C RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index cea6ea4df970..3efc86c25d27 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -387,7 +387,7 @@ static int pcf8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
if (err)
return err;
- return pcf8563_set_alarm_mode(client, 1);
+ return pcf8563_set_alarm_mode(client, !!tm->enabled);
}
static int pcf8563_irq_enable(struct device *dev, unsigned int enabled)
@@ -422,7 +422,7 @@ static unsigned long pcf8563_clkout_recalc_rate(struct clk_hw *hw,
return 0;
buf &= PCF8563_REG_CLKO_F_MASK;
- return clkout_rates[ret];
+ return clkout_rates[buf];
}
static long pcf8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index e1687e19c59f..82eb7da2c478 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -308,10 +308,9 @@ static int pl031_remove(struct amba_device *adev)
dev_pm_clear_wake_irq(&adev->dev);
device_init_wakeup(&adev->dev, false);
- free_irq(adev->irq[0], ldata);
+ if (adev->irq[0])
+ free_irq(adev->irq[0], ldata);
rtc_device_unregister(ldata->rtc);
- iounmap(ldata->base);
- kfree(ldata);
amba_release_regions(adev);
return 0;
@@ -322,25 +321,28 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
int ret;
struct pl031_local *ldata;
struct pl031_vendor_data *vendor = id->data;
- struct rtc_class_ops *ops = &vendor->ops;
+ struct rtc_class_ops *ops;
unsigned long time, data;
ret = amba_request_regions(adev, NULL);
if (ret)
goto err_req;
- ldata = kzalloc(sizeof(struct pl031_local), GFP_KERNEL);
- if (!ldata) {
+ ldata = devm_kzalloc(&adev->dev, sizeof(struct pl031_local),
+ GFP_KERNEL);
+ ops = devm_kmemdup(&adev->dev, &vendor->ops, sizeof(vendor->ops),
+ GFP_KERNEL);
+ if (!ldata || !ops) {
ret = -ENOMEM;
goto out;
}
- ldata->vendor = vendor;
-
- ldata->base = ioremap(adev->res.start, resource_size(&adev->res));
+ ldata->vendor = vendor;
+ ldata->base = devm_ioremap(&adev->dev, adev->res.start,
+ resource_size(&adev->res));
if (!ldata->base) {
ret = -ENOMEM;
- goto out_no_remap;
+ goto out;
}
amba_set_drvdata(adev, ldata);
@@ -373,28 +375,32 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
}
}
+ if (!adev->irq[0]) {
+ /* When there's no interrupt, no point in exposing the alarm */
+ ops->read_alarm = NULL;
+ ops->set_alarm = NULL;
+ ops->alarm_irq_enable = NULL;
+ }
+
device_init_wakeup(&adev->dev, true);
ldata->rtc = rtc_device_register("pl031", &adev->dev, ops,
THIS_MODULE);
if (IS_ERR(ldata->rtc)) {
ret = PTR_ERR(ldata->rtc);
- goto out_no_rtc;
+ goto out;
}
- if (request_irq(adev->irq[0], pl031_interrupt,
- vendor->irqflags, "rtc-pl031", ldata)) {
- ret = -EIO;
- goto out_no_irq;
+ if (adev->irq[0]) {
+ ret = request_irq(adev->irq[0], pl031_interrupt,
+ vendor->irqflags, "rtc-pl031", ldata);
+ if (ret)
+ goto out_no_irq;
+ dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
}
- dev_pm_set_wake_irq(&adev->dev, adev->irq[0]);
return 0;
out_no_irq:
rtc_device_unregister(ldata->rtc);
-out_no_rtc:
- iounmap(ldata->base);
-out_no_remap:
- kfree(ldata);
out:
amba_release_regions(adev);
err_req:
@@ -446,7 +452,7 @@ static struct pl031_vendor_data stv2_pl031 = {
.irqflags = IRQF_SHARED | IRQF_COND_SUSPEND,
};
-static struct amba_id pl031_ids[] = {
+static const struct amba_id pl031_ids[] = {
{
.id = 0x00041031,
.mask = 0x000fffff,
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index aa09771de04f..3d6174eb32f6 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -282,13 +282,13 @@ static int rv3029_eeprom_read(struct device *dev, u8 reg,
static int rv3029_eeprom_write(struct device *dev, u8 reg,
u8 const buf[], size_t len)
{
- int ret, err;
+ int ret;
size_t i;
u8 tmp;
- err = rv3029_eeprom_enter(dev);
- if (err < 0)
- return err;
+ ret = rv3029_eeprom_enter(dev);
+ if (ret < 0)
+ return ret;
for (i = 0; i < len; i++, reg++) {
ret = rv3029_read_regs(dev, reg, &tmp, 1);
@@ -304,11 +304,11 @@ static int rv3029_eeprom_write(struct device *dev, u8 reg,
break;
}
- err = rv3029_eeprom_exit(dev);
- if (err < 0)
- return err;
+ ret = rv3029_eeprom_exit(dev);
+ if (ret < 0)
+ return ret;
- return ret;
+ return 0;
}
static int rv3029_eeprom_update_bits(struct device *dev,
@@ -876,6 +876,8 @@ static const struct i2c_device_id rv3029_id[] = {
MODULE_DEVICE_TABLE(i2c, rv3029_id);
static const struct of_device_id rv3029_of_match[] = {
+ { .compatible = "microcrystal,rv3029" },
+ /* Backward compatibility only, do not use compatibles below: */
{ .compatible = "rv3029" },
{ .compatible = "rv3029c2" },
{ .compatible = "mc,rv3029c2" },
diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c
index 1ed3403ff8ac..5c5938ab3d86 100644
--- a/drivers/rtc/rtc-rx8010.c
+++ b/drivers/rtc/rtc-rx8010.c
@@ -24,7 +24,6 @@
#define RX8010_MDAY 0x14
#define RX8010_MONTH 0x15
#define RX8010_YEAR 0x16
-#define RX8010_YEAR 0x16
#define RX8010_RESV17 0x17
#define RX8010_ALMIN 0x18
#define RX8010_ALHOUR 0x19
@@ -36,7 +35,7 @@
#define RX8010_CTRL 0x1F
/* 0x20 to 0x2F are user registers */
#define RX8010_RESV30 0x30
-#define RX8010_RESV31 0x32
+#define RX8010_RESV31 0x31
#define RX8010_IRQ 0x32
#define RX8010_EXT_WADA BIT(3)
@@ -248,7 +247,7 @@ static int rx8010_init_client(struct i2c_client *client)
rx8010->ctrlreg = (ctrl[1] & ~RX8010_CTRL_TEST);
- return err;
+ return 0;
}
static int rx8010_read_alarm(struct device *dev, struct rtc_wkalrm *t)
@@ -277,7 +276,7 @@ static int rx8010_read_alarm(struct device *dev, struct rtc_wkalrm *t)
t->enabled = !!(rx8010->ctrlreg & RX8010_CTRL_AIE);
t->pending = (flagreg & RX8010_FLAG_AF) && t->enabled;
- return err;
+ return 0;
}
static int rx8010_set_alarm(struct device *dev, struct rtc_wkalrm *t)
diff --git a/drivers/rtc/rtc-sc27xx.c b/drivers/rtc/rtc-sc27xx.c
new file mode 100644
index 000000000000..d544d5268757
--- /dev/null
+++ b/drivers/rtc/rtc-sc27xx.c
@@ -0,0 +1,662 @@
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+#define SPRD_RTC_SEC_CNT_VALUE 0x0
+#define SPRD_RTC_MIN_CNT_VALUE 0x4
+#define SPRD_RTC_HOUR_CNT_VALUE 0x8
+#define SPRD_RTC_DAY_CNT_VALUE 0xc
+#define SPRD_RTC_SEC_CNT_UPD 0x10
+#define SPRD_RTC_MIN_CNT_UPD 0x14
+#define SPRD_RTC_HOUR_CNT_UPD 0x18
+#define SPRD_RTC_DAY_CNT_UPD 0x1c
+#define SPRD_RTC_SEC_ALM_UPD 0x20
+#define SPRD_RTC_MIN_ALM_UPD 0x24
+#define SPRD_RTC_HOUR_ALM_UPD 0x28
+#define SPRD_RTC_DAY_ALM_UPD 0x2c
+#define SPRD_RTC_INT_EN 0x30
+#define SPRD_RTC_INT_RAW_STS 0x34
+#define SPRD_RTC_INT_CLR 0x38
+#define SPRD_RTC_INT_MASK_STS 0x3C
+#define SPRD_RTC_SEC_ALM_VALUE 0x40
+#define SPRD_RTC_MIN_ALM_VALUE 0x44
+#define SPRD_RTC_HOUR_ALM_VALUE 0x48
+#define SPRD_RTC_DAY_ALM_VALUE 0x4c
+#define SPRD_RTC_SPG_VALUE 0x50
+#define SPRD_RTC_SPG_UPD 0x54
+#define SPRD_RTC_SEC_AUXALM_UPD 0x60
+#define SPRD_RTC_MIN_AUXALM_UPD 0x64
+#define SPRD_RTC_HOUR_AUXALM_UPD 0x68
+#define SPRD_RTC_DAY_AUXALM_UPD 0x6c
+
+/* BIT & MASK definition for SPRD_RTC_INT_* registers */
+#define SPRD_RTC_SEC_EN BIT(0)
+#define SPRD_RTC_MIN_EN BIT(1)
+#define SPRD_RTC_HOUR_EN BIT(2)
+#define SPRD_RTC_DAY_EN BIT(3)
+#define SPRD_RTC_ALARM_EN BIT(4)
+#define SPRD_RTC_HRS_FORMAT_EN BIT(5)
+#define SPRD_RTC_AUXALM_EN BIT(6)
+#define SPRD_RTC_SPG_UPD_EN BIT(7)
+#define SPRD_RTC_SEC_UPD_EN BIT(8)
+#define SPRD_RTC_MIN_UPD_EN BIT(9)
+#define SPRD_RTC_HOUR_UPD_EN BIT(10)
+#define SPRD_RTC_DAY_UPD_EN BIT(11)
+#define SPRD_RTC_ALMSEC_UPD_EN BIT(12)
+#define SPRD_RTC_ALMMIN_UPD_EN BIT(13)
+#define SPRD_RTC_ALMHOUR_UPD_EN BIT(14)
+#define SPRD_RTC_ALMDAY_UPD_EN BIT(15)
+#define SPRD_RTC_INT_MASK GENMASK(15, 0)
+
+#define SPRD_RTC_TIME_INT_MASK \
+ (SPRD_RTC_SEC_UPD_EN | SPRD_RTC_MIN_UPD_EN | \
+ SPRD_RTC_HOUR_UPD_EN | SPRD_RTC_DAY_UPD_EN)
+
+#define SPRD_RTC_ALMTIME_INT_MASK \
+ (SPRD_RTC_ALMSEC_UPD_EN | SPRD_RTC_ALMMIN_UPD_EN | \
+ SPRD_RTC_ALMHOUR_UPD_EN | SPRD_RTC_ALMDAY_UPD_EN)
+
+#define SPRD_RTC_ALM_INT_MASK \
+ (SPRD_RTC_SEC_EN | SPRD_RTC_MIN_EN | \
+ SPRD_RTC_HOUR_EN | SPRD_RTC_DAY_EN | \
+ SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN)
+
+/* second/minute/hour/day values mask definition */
+#define SPRD_RTC_SEC_MASK GENMASK(5, 0)
+#define SPRD_RTC_MIN_MASK GENMASK(5, 0)
+#define SPRD_RTC_HOUR_MASK GENMASK(4, 0)
+#define SPRD_RTC_DAY_MASK GENMASK(15, 0)
+
+/* alarm lock definition for SPRD_RTC_SPG_UPD register */
+#define SPRD_RTC_ALMLOCK_MASK GENMASK(7, 0)
+#define SPRD_RTC_ALM_UNLOCK 0xa5
+#define SPRD_RTC_ALM_LOCK (~SPRD_RTC_ALM_UNLOCK & \
+ SPRD_RTC_ALMLOCK_MASK)
+
+/* SPG values definition for SPRD_RTC_SPG_UPD register */
+#define SPRD_RTC_POWEROFF_ALM_FLAG BIT(8)
+#define SPRD_RTC_POWER_RESET_FLAG BIT(9)
+
+/* timeout of synchronizing time and alarm registers (us) */
+#define SPRD_RTC_POLL_TIMEOUT 200000
+#define SPRD_RTC_POLL_DELAY_US 20000
+
+struct sprd_rtc {
+ struct rtc_device *rtc;
+ struct regmap *regmap;
+ struct device *dev;
+ u32 base;
+ int irq;
+ bool valid;
+};
+
+/*
+ * The Spreadtrum RTC controller has 3 groups registers, including time, normal
+ * alarm and auxiliary alarm. The time group registers are used to set RTC time,
+ * the normal alarm registers are used to set normal alarm, and the auxiliary
+ * alarm registers are used to set auxiliary alarm. Both alarm event and
+ * auxiliary alarm event can wake up system from deep sleep, but only alarm
+ * event can power up system from power down status.
+ */
+enum sprd_rtc_reg_types {
+ SPRD_RTC_TIME,
+ SPRD_RTC_ALARM,
+ SPRD_RTC_AUX_ALARM,
+};
+
+static int sprd_rtc_clear_alarm_ints(struct sprd_rtc *rtc)
+{
+ return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ SPRD_RTC_ALM_INT_MASK);
+}
+
+static int sprd_rtc_disable_ints(struct sprd_rtc *rtc)
+{
+ int ret;
+
+ ret = regmap_update_bits(rtc->regmap, rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_INT_MASK, 0);
+ if (ret)
+ return ret;
+
+ return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ SPRD_RTC_INT_MASK);
+}
+
+static int sprd_rtc_lock_alarm(struct sprd_rtc *rtc, bool lock)
+{
+ int ret;
+ u32 val;
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_SPG_VALUE, &val);
+ if (ret)
+ return ret;
+
+ val &= ~(SPRD_RTC_ALMLOCK_MASK | SPRD_RTC_POWEROFF_ALM_FLAG);
+ if (lock)
+ val |= SPRD_RTC_ALM_LOCK;
+ else
+ val |= SPRD_RTC_ALM_UNLOCK | SPRD_RTC_POWEROFF_ALM_FLAG;
+
+ ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_SPG_UPD, val);
+ if (ret)
+ return ret;
+
+ /* wait until the SPG value is updated successfully */
+ ret = regmap_read_poll_timeout(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_RAW_STS, val,
+ (val & SPRD_RTC_SPG_UPD_EN),
+ SPRD_RTC_POLL_DELAY_US,
+ SPRD_RTC_POLL_TIMEOUT);
+ if (ret) {
+ dev_err(rtc->dev, "failed to update SPG value:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sprd_rtc_get_secs(struct sprd_rtc *rtc, enum sprd_rtc_reg_types type,
+ time64_t *secs)
+{
+ u32 sec_reg, min_reg, hour_reg, day_reg;
+ u32 val, sec, min, hour, day;
+ int ret;
+
+ switch (type) {
+ case SPRD_RTC_TIME:
+ sec_reg = SPRD_RTC_SEC_CNT_VALUE;
+ min_reg = SPRD_RTC_MIN_CNT_VALUE;
+ hour_reg = SPRD_RTC_HOUR_CNT_VALUE;
+ day_reg = SPRD_RTC_DAY_CNT_VALUE;
+ break;
+ case SPRD_RTC_ALARM:
+ sec_reg = SPRD_RTC_SEC_ALM_VALUE;
+ min_reg = SPRD_RTC_MIN_ALM_VALUE;
+ hour_reg = SPRD_RTC_HOUR_ALM_VALUE;
+ day_reg = SPRD_RTC_DAY_ALM_VALUE;
+ break;
+ case SPRD_RTC_AUX_ALARM:
+ sec_reg = SPRD_RTC_SEC_AUXALM_UPD;
+ min_reg = SPRD_RTC_MIN_AUXALM_UPD;
+ hour_reg = SPRD_RTC_HOUR_AUXALM_UPD;
+ day_reg = SPRD_RTC_DAY_AUXALM_UPD;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_read(rtc->regmap, rtc->base + sec_reg, &val);
+ if (ret)
+ return ret;
+
+ sec = val & SPRD_RTC_SEC_MASK;
+
+ ret = regmap_read(rtc->regmap, rtc->base + min_reg, &val);
+ if (ret)
+ return ret;
+
+ min = val & SPRD_RTC_MIN_MASK;
+
+ ret = regmap_read(rtc->regmap, rtc->base + hour_reg, &val);
+ if (ret)
+ return ret;
+
+ hour = val & SPRD_RTC_HOUR_MASK;
+
+ ret = regmap_read(rtc->regmap, rtc->base + day_reg, &val);
+ if (ret)
+ return ret;
+
+ day = val & SPRD_RTC_DAY_MASK;
+ *secs = (((time64_t)(day * 24) + hour) * 60 + min) * 60 + sec;
+ return 0;
+}
+
+static int sprd_rtc_set_secs(struct sprd_rtc *rtc, enum sprd_rtc_reg_types type,
+ time64_t secs)
+{
+ u32 sec_reg, min_reg, hour_reg, day_reg, sts_mask;
+ u32 sec, min, hour, day, val;
+ int ret, rem;
+
+ /* convert seconds to RTC time format */
+ day = div_s64_rem(secs, 86400, &rem);
+ hour = rem / 3600;
+ rem -= hour * 3600;
+ min = rem / 60;
+ sec = rem - min * 60;
+
+ switch (type) {
+ case SPRD_RTC_TIME:
+ sec_reg = SPRD_RTC_SEC_CNT_UPD;
+ min_reg = SPRD_RTC_MIN_CNT_UPD;
+ hour_reg = SPRD_RTC_HOUR_CNT_UPD;
+ day_reg = SPRD_RTC_DAY_CNT_UPD;
+ sts_mask = SPRD_RTC_TIME_INT_MASK;
+ break;
+ case SPRD_RTC_ALARM:
+ sec_reg = SPRD_RTC_SEC_ALM_UPD;
+ min_reg = SPRD_RTC_MIN_ALM_UPD;
+ hour_reg = SPRD_RTC_HOUR_ALM_UPD;
+ day_reg = SPRD_RTC_DAY_ALM_UPD;
+ sts_mask = SPRD_RTC_ALMTIME_INT_MASK;
+ break;
+ case SPRD_RTC_AUX_ALARM:
+ sec_reg = SPRD_RTC_SEC_AUXALM_UPD;
+ min_reg = SPRD_RTC_MIN_AUXALM_UPD;
+ hour_reg = SPRD_RTC_HOUR_AUXALM_UPD;
+ day_reg = SPRD_RTC_DAY_AUXALM_UPD;
+ sts_mask = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_write(rtc->regmap, rtc->base + sec_reg, sec);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rtc->regmap, rtc->base + min_reg, min);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rtc->regmap, rtc->base + hour_reg, hour);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(rtc->regmap, rtc->base + day_reg, day);
+ if (ret)
+ return ret;
+
+ if (type == SPRD_RTC_AUX_ALARM)
+ return 0;
+
+ /*
+ * Since the time and normal alarm registers are put in always-power-on
+ * region supplied by VDDRTC, then these registers changing time will
+ * be very long, about 125ms. Thus here we should wait until all
+ * values are updated successfully.
+ */
+ ret = regmap_read_poll_timeout(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_RAW_STS, val,
+ ((val & sts_mask) == sts_mask),
+ SPRD_RTC_POLL_DELAY_US,
+ SPRD_RTC_POLL_TIMEOUT);
+ if (ret < 0) {
+ dev_err(rtc->dev, "set time/alarm values timeout\n");
+ return ret;
+ }
+
+ return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ sts_mask);
+}
+
+static int sprd_rtc_read_aux_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs;
+ u32 val;
+ int ret;
+
+ ret = sprd_rtc_get_secs(rtc, SPRD_RTC_AUX_ALARM, &secs);
+ if (ret)
+ return ret;
+
+ rtc_time64_to_tm(secs, &alrm->time);
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_EN, &val);
+ if (ret)
+ return ret;
+
+ alrm->enabled = !!(val & SPRD_RTC_AUXALM_EN);
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_RAW_STS, &val);
+ if (ret)
+ return ret;
+
+ alrm->pending = !!(val & SPRD_RTC_AUXALM_EN);
+ return 0;
+}
+
+static int sprd_rtc_set_aux_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs = rtc_tm_to_time64(&alrm->time);
+ int ret;
+
+ /* clear the auxiliary alarm interrupt status */
+ ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ SPRD_RTC_AUXALM_EN);
+ if (ret)
+ return ret;
+
+ ret = sprd_rtc_set_secs(rtc, SPRD_RTC_AUX_ALARM, secs);
+ if (ret)
+ return ret;
+
+ if (alrm->enabled) {
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_AUXALM_EN,
+ SPRD_RTC_AUXALM_EN);
+ } else {
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_AUXALM_EN, 0);
+ }
+
+ return ret;
+}
+
+static int sprd_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs;
+ int ret;
+
+ if (!rtc->valid) {
+ dev_warn(dev, "RTC values are invalid\n");
+ return -EINVAL;
+ }
+
+ ret = sprd_rtc_get_secs(rtc, SPRD_RTC_TIME, &secs);
+ if (ret)
+ return ret;
+
+ rtc_time64_to_tm(secs, tm);
+ return rtc_valid_tm(tm);
+}
+
+static int sprd_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs = rtc_tm_to_time64(tm);
+ u32 val;
+ int ret;
+
+ ret = sprd_rtc_set_secs(rtc, SPRD_RTC_TIME, secs);
+ if (ret)
+ return ret;
+
+ if (!rtc->valid) {
+ /*
+ * Set SPRD_RTC_POWER_RESET_FLAG to indicate now RTC has valid
+ * time values.
+ */
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_SPG_UPD,
+ SPRD_RTC_POWER_RESET_FLAG,
+ SPRD_RTC_POWER_RESET_FLAG);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_RAW_STS,
+ val, (val & SPRD_RTC_SPG_UPD_EN),
+ SPRD_RTC_POLL_DELAY_US,
+ SPRD_RTC_POLL_TIMEOUT);
+ if (ret) {
+ dev_err(rtc->dev, "failed to update SPG value:%d\n",
+ ret);
+ return ret;
+ }
+
+ rtc->valid = true;
+ }
+
+ return 0;
+}
+
+static int sprd_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs;
+ int ret;
+ u32 val;
+
+ /*
+ * If aie_timer is enabled, we should get the normal alarm time.
+ * Otherwise we should get auxiliary alarm time.
+ */
+ if (rtc->rtc && rtc->rtc->aie_timer.enabled == 0)
+ return sprd_rtc_read_aux_alarm(dev, alrm);
+
+ ret = sprd_rtc_get_secs(rtc, SPRD_RTC_ALARM, &secs);
+ if (ret)
+ return ret;
+
+ rtc_time64_to_tm(secs, &alrm->time);
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_EN, &val);
+ if (ret)
+ return ret;
+
+ alrm->enabled = !!(val & SPRD_RTC_ALARM_EN);
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_RAW_STS, &val);
+ if (ret)
+ return ret;
+
+ alrm->pending = !!(val & SPRD_RTC_ALARM_EN);
+ return 0;
+}
+
+static int sprd_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ time64_t secs = rtc_tm_to_time64(&alrm->time);
+ struct rtc_time aie_time =
+ rtc_ktime_to_tm(rtc->rtc->aie_timer.node.expires);
+ int ret;
+
+ /*
+ * We have 2 groups alarms: normal alarm and auxiliary alarm. Since
+ * both normal alarm event and auxiliary alarm event can wake up system
+ * from deep sleep, but only alarm event can power up system from power
+ * down status. Moreover we do not need to poll about 125ms when
+ * updating auxiliary alarm registers. Thus we usually set auxiliary
+ * alarm when wake up system from deep sleep, and for other scenarios,
+ * we should set normal alarm with polling status.
+ *
+ * So here we check if the alarm time is set by aie_timer, if yes, we
+ * should set normal alarm, if not, we should set auxiliary alarm which
+ * means it is just a wake event.
+ */
+ if (!rtc->rtc->aie_timer.enabled || rtc_tm_sub(&aie_time, &alrm->time))
+ return sprd_rtc_set_aux_alarm(dev, alrm);
+
+ /* clear the alarm interrupt status firstly */
+ ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR,
+ SPRD_RTC_ALARM_EN);
+ if (ret)
+ return ret;
+
+ ret = sprd_rtc_set_secs(rtc, SPRD_RTC_ALARM, secs);
+ if (ret)
+ return ret;
+
+ if (alrm->enabled) {
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_ALARM_EN,
+ SPRD_RTC_ALARM_EN);
+ if (ret)
+ return ret;
+
+ /* unlock the alarm to enable the alarm function. */
+ ret = sprd_rtc_lock_alarm(rtc, false);
+ } else {
+ regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_ALARM_EN, 0);
+
+ /*
+ * Lock the alarm function in case fake alarm event will power
+ * up systems.
+ */
+ ret = sprd_rtc_lock_alarm(rtc, true);
+ }
+
+ return ret;
+}
+
+static int sprd_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct sprd_rtc *rtc = dev_get_drvdata(dev);
+ int ret;
+
+ if (enabled) {
+ ret = regmap_update_bits(rtc->regmap,
+ rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN,
+ SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN);
+ if (ret)
+ return ret;
+
+ ret = sprd_rtc_lock_alarm(rtc, false);
+ } else {
+ regmap_update_bits(rtc->regmap, rtc->base + SPRD_RTC_INT_EN,
+ SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN, 0);
+
+ ret = sprd_rtc_lock_alarm(rtc, true);
+ }
+
+ return ret;
+}
+
+static const struct rtc_class_ops sprd_rtc_ops = {
+ .read_time = sprd_rtc_read_time,
+ .set_time = sprd_rtc_set_time,
+ .read_alarm = sprd_rtc_read_alarm,
+ .set_alarm = sprd_rtc_set_alarm,
+ .alarm_irq_enable = sprd_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t sprd_rtc_handler(int irq, void *dev_id)
+{
+ struct sprd_rtc *rtc = dev_id;
+ int ret;
+
+ ret = sprd_rtc_clear_alarm_ints(rtc);
+ if (ret)
+ return IRQ_RETVAL(ret);
+
+ rtc_update_irq(rtc->rtc, 1, RTC_AF | RTC_IRQF);
+ return IRQ_HANDLED;
+}
+
+static int sprd_rtc_check_power_down(struct sprd_rtc *rtc)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_SPG_VALUE, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * If the SPRD_RTC_POWER_RESET_FLAG was not set, which means the RTC has
+ * been powered down, so the RTC time values are invalid.
+ */
+ rtc->valid = (val & SPRD_RTC_POWER_RESET_FLAG) ? true : false;
+ return 0;
+}
+
+static int sprd_rtc_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct sprd_rtc *rtc;
+ int ret;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!rtc->regmap)
+ return -ENODEV;
+
+ ret = of_property_read_u32(node, "reg", &rtc->base);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get RTC base address\n");
+ return ret;
+ }
+
+ rtc->irq = platform_get_irq(pdev, 0);
+ if (rtc->irq < 0) {
+ dev_err(&pdev->dev, "failed to get RTC irq number\n");
+ return rtc->irq;
+ }
+
+ rtc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, rtc);
+
+ /* clear all RTC interrupts and disable all RTC interrupts */
+ ret = sprd_rtc_disable_ints(rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to disable RTC interrupts\n");
+ return ret;
+ }
+
+ /* check if RTC time values are valid */
+ ret = sprd_rtc_check_power_down(rtc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to check RTC time values\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+ sprd_rtc_handler,
+ IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ pdev->name, rtc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request RTC irq\n");
+ return ret;
+ }
+
+ rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &sprd_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtc))
+ return PTR_ERR(rtc->rtc);
+
+ device_init_wakeup(&pdev->dev, 1);
+ return 0;
+}
+
+static int sprd_rtc_remove(struct platform_device *pdev)
+{
+ device_init_wakeup(&pdev->dev, 0);
+ return 0;
+}
+
+static const struct of_device_id sprd_rtc_of_match[] = {
+ { .compatible = "sprd,sc2731-rtc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sprd_rtc_of_match);
+
+static struct platform_driver sprd_rtc_driver = {
+ .driver = {
+ .name = "sprd-rtc",
+ .of_match_table = sprd_rtc_of_match,
+ },
+ .probe = sprd_rtc_probe,
+ .remove = sprd_rtc_remove,
+};
+module_platform_driver(sprd_rtc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Spreadtrum RTC Device Driver");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index e364550eb9a7..92ff2edb86a6 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -72,9 +72,10 @@ since_epoch_show(struct device *dev, struct device_attribute *attr, char *buf)
retval = rtc_read_time(to_rtc_device(dev), &tm);
if (retval == 0) {
- unsigned long time;
- rtc_tm_to_time(&tm, &time);
- retval = sprintf(buf, "%lu\n", time);
+ time64_t time;
+
+ time = rtc_tm_to_time64(&tm);
+ retval = sprintf(buf, "%lld\n", time);
}
return retval;
@@ -132,7 +133,7 @@ static ssize_t
wakealarm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
ssize_t retval;
- unsigned long alarm;
+ time64_t alarm;
struct rtc_wkalrm alm;
/* Don't show disabled alarms. For uniformity, RTC alarms are
@@ -145,8 +146,8 @@ wakealarm_show(struct device *dev, struct device_attribute *attr, char *buf)
*/
retval = rtc_read_alarm(to_rtc_device(dev), &alm);
if (retval == 0 && alm.enabled) {
- rtc_tm_to_time(&alm.time, &alarm);
- retval = sprintf(buf, "%lu\n", alarm);
+ alarm = rtc_tm_to_time64(&alm.time);
+ retval = sprintf(buf, "%lld\n", alarm);
}
return retval;
@@ -157,8 +158,8 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
ssize_t retval;
- unsigned long now, alarm;
- unsigned long push = 0;
+ time64_t now, alarm;
+ time64_t push = 0;
struct rtc_wkalrm alm;
struct rtc_device *rtc = to_rtc_device(dev);
const char *buf_ptr;
@@ -170,7 +171,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
retval = rtc_read_time(rtc, &alm.time);
if (retval < 0)
return retval;
- rtc_tm_to_time(&alm.time, &now);
+ now = rtc_tm_to_time64(&alm.time);
buf_ptr = buf;
if (*buf_ptr == '+') {
@@ -181,7 +182,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
} else
adjust = 1;
}
- retval = kstrtoul(buf_ptr, 0, &alarm);
+ retval = kstrtos64(buf_ptr, 0, &alarm);
if (retval)
return retval;
if (adjust) {
@@ -197,7 +198,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
return retval;
if (alm.enabled) {
if (push) {
- rtc_tm_to_time(&alm.time, &push);
+ push = rtc_tm_to_time64(&alm.time);
alarm += push;
} else
return -EBUSY;
@@ -212,7 +213,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
*/
alarm = now + 300;
}
- rtc_time_to_tm(alarm, &alm.time);
+ rtc_time64_to_tm(alarm, &alm.time);
retval = rtc_set_alarm(rtc, &alm);
return (retval < 0) ? retval : n;
diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c
index 65b432a096fe..0c34d3b81279 100644
--- a/drivers/rtc/rtc-xgene.c
+++ b/drivers/rtc/rtc-xgene.c
@@ -52,6 +52,7 @@ struct xgene_rtc_dev {
void __iomem *csr_base;
struct clk *clk;
unsigned int irq_wake;
+ unsigned int irq_enabled;
};
static int xgene_rtc_read_time(struct device *dev, struct rtc_time *tm)
@@ -104,15 +105,19 @@ static int xgene_rtc_alarm_irq_enable(struct device *dev, u32 enabled)
return 0;
}
+static int xgene_rtc_alarm_irq_enabled(struct device *dev)
+{
+ struct xgene_rtc_dev *pdata = dev_get_drvdata(dev);
+
+ return readl(pdata->csr_base + RTC_CCR) & RTC_CCR_IE ? 1 : 0;
+}
+
static int xgene_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct xgene_rtc_dev *pdata = dev_get_drvdata(dev);
- unsigned long rtc_time;
unsigned long alarm_time;
- rtc_time = readl(pdata->csr_base + RTC_CCVR);
rtc_tm_to_time(&alrm->time, &alarm_time);
-
pdata->alarm_time = alarm_time;
writel((u32) pdata->alarm_time, pdata->csr_base + RTC_CMR);
@@ -180,12 +185,18 @@ static int xgene_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Couldn't get the clock for RTC\n");
return -ENODEV;
}
- clk_prepare_enable(pdata->clk);
+ ret = clk_prepare_enable(pdata->clk);
+ if (ret)
+ return ret;
/* Turn on the clock and the crystal */
writel(RTC_CCR_EN, pdata->csr_base + RTC_CCR);
- device_init_wakeup(&pdev->dev, 1);
+ ret = device_init_wakeup(&pdev->dev, 1);
+ if (ret) {
+ clk_disable_unprepare(pdata->clk);
+ return ret;
+ }
pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&xgene_rtc_ops, THIS_MODULE);
@@ -210,45 +221,55 @@ static int xgene_rtc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int xgene_rtc_suspend(struct device *dev)
+static int __maybe_unused xgene_rtc_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct xgene_rtc_dev *pdata = platform_get_drvdata(pdev);
int irq;
irq = platform_get_irq(pdev, 0);
+
+ /*
+ * If this RTC alarm will be used for waking the system up,
+ * don't disable it of course. Else we just disable the alarm
+ * and await suspension.
+ */
if (device_may_wakeup(&pdev->dev)) {
if (!enable_irq_wake(irq))
pdata->irq_wake = 1;
} else {
+ pdata->irq_enabled = xgene_rtc_alarm_irq_enabled(dev);
xgene_rtc_alarm_irq_enable(dev, 0);
- clk_disable(pdata->clk);
+ clk_disable_unprepare(pdata->clk);
}
-
return 0;
}
-static int xgene_rtc_resume(struct device *dev)
+static int __maybe_unused xgene_rtc_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct xgene_rtc_dev *pdata = platform_get_drvdata(pdev);
int irq;
+ int rc;
irq = platform_get_irq(pdev, 0);
+
if (device_may_wakeup(&pdev->dev)) {
if (pdata->irq_wake) {
disable_irq_wake(irq);
pdata->irq_wake = 0;
}
} else {
- clk_enable(pdata->clk);
- xgene_rtc_alarm_irq_enable(dev, 1);
+ rc = clk_prepare_enable(pdata->clk);
+ if (rc) {
+ dev_err(dev, "Unable to enable clock error %d\n", rc);
+ return rc;
+ }
+ xgene_rtc_alarm_irq_enable(dev, pdata->irq_enabled);
}
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(xgene_rtc_pm_ops, xgene_rtc_suspend, xgene_rtc_resume);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 5b6153f23f01..8e2f767147cb 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1084,24 +1084,35 @@ static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
{
struct bnx2fc_rport *tgt = io_req->tgt;
int rc = SUCCESS;
+ unsigned int time_left;
io_req->wait_for_comp = 1;
bnx2fc_initiate_cleanup(io_req);
spin_unlock_bh(&tgt->tgt_lock);
- wait_for_completion(&io_req->tm_done);
-
+ /*
+ * Can't wait forever on cleanup response lest we let the SCSI error
+ * handler wait forever
+ */
+ time_left = wait_for_completion_timeout(&io_req->tm_done,
+ BNX2FC_FW_TIMEOUT);
io_req->wait_for_comp = 0;
+ if (!time_left)
+ BNX2FC_IO_DBG(io_req, "%s(): Wait for cleanup timed out.\n",
+ __func__);
+
/*
- * release the reference taken in eh_abort to allow the
- * target to re-login after flushing IOs
+ * Release reference held by SCSI command the cleanup completion
+ * hits the BNX2FC_CLEANUP case in bnx2fc_process_cq_compl() and
+ * thus the SCSI command is not returnedi by bnx2fc_scsi_done().
*/
kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_lock_bh(&tgt->tgt_lock);
return rc;
}
+
/**
* bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
* SCSI command
@@ -1118,6 +1129,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
struct fc_lport *lport;
struct bnx2fc_rport *tgt;
int rc;
+ unsigned int time_left;
rc = fc_block_scsi_eh(sc_cmd);
if (rc)
@@ -1194,6 +1206,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
if (cancel_delayed_work(&io_req->timeout_work))
kref_put(&io_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */
+ /*
+ * We don't want to hold off the upper layer timer so simply
+ * cleanup the command and return that I/O was successfully
+ * aborted.
+ */
rc = bnx2fc_abts_cleanup(io_req);
/* This only occurs when an task abort was requested while ABTS
is in progress. Setting the IO_CLEANUP flag will skip the
@@ -1201,7 +1218,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
was a result from the ABTS request rather than the CLEANUP
request */
set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
- goto out;
+ goto done;
}
/* Cancel the current timer running on this io_req */
@@ -1221,7 +1238,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
}
spin_unlock_bh(&tgt->tgt_lock);
- wait_for_completion(&io_req->tm_done);
+ /* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */
+ time_left = wait_for_completion_timeout(&io_req->tm_done,
+ (2 * rp->r_a_tov + 1) * HZ);
+ if (time_left)
+ BNX2FC_IO_DBG(io_req, "Timed out in eh_abort waiting for tm_done");
spin_lock_bh(&tgt->tgt_lock);
io_req->wait_for_comp = 0;
@@ -1233,8 +1254,12 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
/* Let the scsi-ml try to recover this command */
printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
io_req->xid);
+ /*
+ * Cleanup firmware residuals before returning control back
+ * to SCSI ML.
+ */
rc = bnx2fc_abts_cleanup(io_req);
- goto out;
+ goto done;
} else {
/*
* We come here even when there was a race condition
@@ -1249,7 +1274,6 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
done:
/* release the reference taken in eh_abort */
kref_put(&io_req->refcount, bnx2fc_cmd_release);
-out:
spin_unlock_bh(&tgt->tgt_lock);
return rc;
}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index fe5a9ea27b5e..78d4aa8df675 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -22,7 +22,7 @@ struct scsi_dev_info_list {
struct list_head dev_info_list;
char vendor[8];
char model[16];
- unsigned flags;
+ blist_flags_t flags;
unsigned compatible; /* for use with scsi_static_device_list entries */
};
@@ -35,7 +35,7 @@ struct scsi_dev_info_list_table {
static const char spaces[] = " "; /* 16 of them */
-static unsigned scsi_default_dev_flags;
+static blist_flags_t scsi_default_dev_flags;
static LIST_HEAD(scsi_dev_info_list);
static char scsi_dev_flags[256];
@@ -52,7 +52,7 @@ static struct {
char *vendor;
char *model;
char *revision; /* revision known to be bad, unused */
- unsigned flags;
+ blist_flags_t flags;
} scsi_static_device_list[] __initdata = {
/*
* The following devices are known not to tolerate a lun != 0 scan
@@ -335,7 +335,7 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
* Returns: 0 OK, -error on failure.
**/
static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
- char *strflags, int flags)
+ char *strflags, blist_flags_t flags)
{
return scsi_dev_info_list_add_keyed(compatible, vendor, model,
strflags, flags,
@@ -361,7 +361,7 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
* Returns: 0 OK, -error on failure.
**/
int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
- char *strflags, int flags, int key)
+ char *strflags, blist_flags_t flags, int key)
{
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
@@ -571,9 +571,9 @@ static int scsi_dev_info_list_add_str(char *dev_list)
* matching flags value, else return the host or global default
* settings. Called during scan time.
**/
-int scsi_get_device_flags(struct scsi_device *sdev,
- const unsigned char *vendor,
- const unsigned char *model)
+blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model)
{
return scsi_get_device_flags_keyed(sdev, vendor, model,
SCSI_DEVINFO_GLOBAL);
@@ -593,7 +593,7 @@ int scsi_get_device_flags(struct scsi_device *sdev,
* flags value, else return the host or global default settings.
* Called during scan time.
**/
-int scsi_get_device_flags_keyed(struct scsi_device *sdev,
+blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model,
int key)
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index df1368aea9a3..a5946cd64caa 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -50,15 +50,16 @@ enum {
SCSI_DEVINFO_SPI,
};
-extern int scsi_get_device_flags(struct scsi_device *sdev,
- const unsigned char *vendor,
- const unsigned char *model);
-extern int scsi_get_device_flags_keyed(struct scsi_device *sdev,
- const unsigned char *vendor,
- const unsigned char *model, int key);
+extern blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model);
+extern blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model,
+ int key);
extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
char *model, char *strflags,
- int flags, int key);
+ blist_flags_t flags, int key);
extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key);
extern int scsi_dev_info_add_list(int key, const char *name);
extern int scsi_dev_info_remove_list(int key);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index a0f2a20ea9e9..be5e919db0e8 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -566,7 +566,7 @@ EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
* are copied to the scsi_device any flags value is stored in *@bflags.
**/
static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
- int result_len, int *bflags)
+ int result_len, blist_flags_t *bflags)
{
unsigned char scsi_cmd[MAX_COMMAND_SIZE];
int first_inquiry_len, try_inquiry_len, next_inquiry_len;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit.h b/drivers/target/iscsi/cxgbit/cxgbit.h
index 90388698c222..417b9e66b0cd 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit.h
+++ b/drivers/target/iscsi/cxgbit/cxgbit.h
@@ -165,6 +165,7 @@ enum cxgbit_csk_flags {
CSK_LOGIN_PDU_DONE,
CSK_LOGIN_DONE,
CSK_DDP_ENABLE,
+ CSK_ABORT_RPL_WAIT,
};
struct cxgbit_sock_common {
@@ -321,6 +322,7 @@ int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
int cxgbit_setup_conn_digest(struct cxgbit_sock *);
int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
void cxgbit_free_np(struct iscsi_np *);
+void cxgbit_abort_conn(struct cxgbit_sock *csk);
void cxgbit_free_conn(struct iscsi_conn *);
extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index d4fa41be80f9..92eb57e2adaf 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -665,6 +665,46 @@ static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
}
+static void
+__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ __kfree_skb(skb);
+
+ if (csk->com.state != CSK_STATE_ESTABLISHED)
+ goto no_abort;
+
+ set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
+ csk->com.state = CSK_STATE_ABORTING;
+
+ cxgbit_send_abort_req(csk);
+
+ return;
+
+no_abort:
+ cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
+ cxgbit_put_csk(csk);
+}
+
+void cxgbit_abort_conn(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
+
+ cxgbit_get_csk(csk);
+ cxgbit_init_wr_wait(&csk->com.wr_wait);
+
+ spin_lock_bh(&csk->lock);
+ if (csk->lock_owner) {
+ cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
+ __skb_queue_tail(&csk->backlogq, skb);
+ } else {
+ __cxgbit_abort_conn(csk, skb);
+ }
+ spin_unlock_bh(&csk->lock);
+
+ cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
+ csk->tid, 600, __func__);
+}
+
void cxgbit_free_conn(struct iscsi_conn *conn)
{
struct cxgbit_sock *csk = conn->context;
@@ -1709,12 +1749,17 @@ rel_skb:
static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
{
+ struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+
pr_debug("%s: csk %p; tid %u; state %d\n",
__func__, csk, csk->tid, csk->com.state);
switch (csk->com.state) {
case CSK_STATE_ABORTING:
csk->com.state = CSK_STATE_DEAD;
+ if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
+ cxgbit_wake_up(&csk->com.wr_wait, __func__,
+ rpl->status);
cxgbit_put_csk(csk);
break;
default:
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
index 5fdb57cac968..768cce0ccb80 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -275,6 +275,14 @@ void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
struct cxgbit_device *cdev = csk->com.cdev;
struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+ /* Abort the TCP conn if DDP is not complete to
+ * avoid any possibility of DDP after freeing
+ * the cmd.
+ */
+ if (unlikely(cmd->write_data_done !=
+ cmd->se_cmd.data_length))
+ cxgbit_abort_conn(csk);
+
cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index 4fd775ace541..f3f8856bfb68 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -446,6 +446,7 @@ cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
case CPL_RX_ISCSI_DDP:
case CPL_FW4_ACK:
lro_flush = false;
+ /* fall through */
case CPL_ABORT_RPL_RSS:
case CPL_PASS_ESTABLISH:
case CPL_PEER_CLOSE:
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 9e67c7678c86..9eb10d34682c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -502,7 +502,7 @@ void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
EXPORT_SYMBOL(iscsit_aborted_task);
static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
- u32, u32, u8 *, u8 *);
+ u32, u32, const void *, void *);
static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *);
static int
@@ -523,7 +523,7 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL,
- (u8 *)header_digest);
+ header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -550,9 +550,8 @@ iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (conn->conn_ops->DataDigest) {
iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
data_buf, data_buf_len,
- padding,
- (u8 *)&cmd->pad_bytes,
- (u8 *)&cmd->data_crc);
+ padding, &cmd->pad_bytes,
+ &cmd->data_crc);
iov[niov].iov_base = &cmd->data_crc;
iov[niov++].iov_len = ISCSI_CRC_LEN;
@@ -597,7 +596,7 @@ iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
ISCSI_HDR_LEN, 0, NULL,
- (u8 *)header_digest);
+ header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
@@ -836,6 +835,7 @@ static int iscsit_add_reject_from_cmd(
unsigned char *buf)
{
struct iscsi_conn *conn;
+ const bool do_put = cmd->se_cmd.se_tfo != NULL;
if (!cmd->conn) {
pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
@@ -866,7 +866,7 @@ static int iscsit_add_reject_from_cmd(
* Perform the kref_put now if se_cmd has already been setup by
* scsit_setup_scsi_cmd()
*/
- if (cmd->se_cmd.se_tfo != NULL) {
+ if (do_put) {
pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
target_put_sess_cmd(&cmd->se_cmd);
}
@@ -1410,13 +1410,9 @@ static u32 iscsit_do_crypto_hash_sg(
return data_crc;
}
-static void iscsit_do_crypto_hash_buf(
- struct ahash_request *hash,
- const void *buf,
- u32 payload_length,
- u32 padding,
- u8 *pad_bytes,
- u8 *data_crc)
+static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
+ const void *buf, u32 payload_length, u32 padding,
+ const void *pad_bytes, void *data_crc)
{
struct scatterlist sg[2];
@@ -1462,9 +1458,9 @@ __iscsit_check_dataout_hdr(struct iscsi_conn *conn, void *buf,
iscsit_mod_dataout_timer(cmd);
if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
- pr_err("DataOut Offset: %u, Length %u greater than"
- " iSCSI Command EDTL %u, protocol error.\n",
- hdr->offset, payload_length, cmd->se_cmd.data_length);
+ pr_err("DataOut Offset: %u, Length %u greater than iSCSI Command EDTL %u, protocol error.\n",
+ be32_to_cpu(hdr->offset), payload_length,
+ cmd->se_cmd.data_length);
return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
}
@@ -1878,10 +1874,9 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
- ping_data, payload_length,
- padding, cmd->pad_bytes,
- (u8 *)&data_crc);
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data,
+ payload_length, padding,
+ cmd->pad_bytes, &data_crc);
if (checksum != data_crc) {
pr_err("Ping data CRC32C DataDigest"
@@ -1962,7 +1957,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
struct iscsi_tmr_req *tmr_req;
struct iscsi_tm *hdr;
int out_of_order_cmdsn = 0, ret;
- bool sess_ref = false;
u8 function, tcm_function = TMR_UNKNOWN;
hdr = (struct iscsi_tm *) buf;
@@ -1995,22 +1989,23 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
cmd->data_direction = DMA_NONE;
cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
- if (!cmd->tmr_req)
+ if (!cmd->tmr_req) {
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
buf);
+ }
+
+ transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
+ conn->sess->se_sess, 0, DMA_NONE,
+ TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
+
+ target_get_sess_cmd(&cmd->se_cmd, true);
/*
* TASK_REASSIGN for ERL=2 / connection stays inside of
* LIO-Target $FABRIC_MOD
*/
if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
- transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
- conn->sess->se_sess, 0, DMA_NONE,
- TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
-
- target_get_sess_cmd(&cmd->se_cmd, true);
- sess_ref = true;
tcm_function = iscsit_convert_tmf(function);
if (tcm_function == TMR_UNKNOWN) {
pr_err("Unknown iSCSI TMR Function:"
@@ -2101,12 +2096,14 @@ attach:
if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
- if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
+ if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
out_of_order_cmdsn = 1;
- else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+ } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
- else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
return -1;
+ }
}
iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
@@ -2126,12 +2123,8 @@ attach:
* For connection recovery, this is also the default action for
* TMR TASK_REASSIGN.
*/
- if (sess_ref) {
- pr_debug("Handle TMR, using sess_ref=true check\n");
- target_put_sess_cmd(&cmd->se_cmd);
- }
-
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
}
EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
@@ -2287,10 +2280,9 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
goto reject;
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
- text_in, payload_length,
- padding, (u8 *)&pad_bytes,
- (u8 *)&data_crc);
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash, text_in,
+ payload_length, padding,
+ &pad_bytes, &data_crc);
if (checksum != data_crc) {
pr_err("Text data CRC32C DataDigest"
@@ -3978,9 +3970,9 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
return;
}
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
- buffer, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)&checksum);
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
+ ISCSI_HDR_LEN, 0, NULL,
+ &checksum);
if (digest != checksum) {
pr_err("HeaderDigest CRC32C failed,"
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 0dd4c45f7575..0ebc4818e132 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1123,7 +1123,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI);
if (ret < 0)
- return NULL;
+ goto free_out;
ret = iscsit_tpg_add_portal_group(tiqn, tpg);
if (ret != 0)
@@ -1135,6 +1135,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
return &tpg->tpg_se_tpg;
out:
core_tpg_deregister(&tpg->tpg_se_tpg);
+free_out:
kfree(tpg);
return NULL;
}
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 76184094a0cf..5efa42b939a1 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -34,7 +34,7 @@
#include "iscsi_target_erl2.h"
#include "iscsi_target.h"
-#define OFFLOAD_BUF_SIZE 32768
+#define OFFLOAD_BUF_SIZE 32768U
/*
* Used to dump excess datain payload for certain error recovery
@@ -56,7 +56,7 @@ int iscsit_dump_data_payload(
if (conn->sess->sess_ops->RDMAExtensions)
return 0;
- length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
+ length = min(buf_len, OFFLOAD_BUF_SIZE);
buf = kzalloc(length, GFP_ATOMIC);
if (!buf) {
@@ -67,8 +67,7 @@ int iscsit_dump_data_payload(
memset(&iov, 0, sizeof(struct kvec));
while (offset < buf_len) {
- size = ((offset + length) > buf_len) ?
- (buf_len - offset) : length;
+ size = min(buf_len - offset, length);
iov.iov_len = size;
iov.iov_base = buf;
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index caab1045742d..29a37b242d30 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1380,10 +1380,8 @@ int iscsi_decode_text_input(
char *key, *value;
struct iscsi_param *param;
- if (iscsi_extract_key_value(start, &key, &value) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_extract_key_value(start, &key, &value) < 0)
+ goto free_buffer;
pr_debug("Got key: %s=%s\n", key, value);
@@ -1396,38 +1394,37 @@ int iscsi_decode_text_input(
param = iscsi_check_key(key, phase, sender, param_list);
if (!param) {
- if (iscsi_add_notunderstood_response(key,
- value, param_list) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_add_notunderstood_response(key, value,
+ param_list) < 0)
+ goto free_buffer;
+
start += strlen(key) + strlen(value) + 2;
continue;
}
- if (iscsi_check_value(param, value) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_check_value(param, value) < 0)
+ goto free_buffer;
start += strlen(key) + strlen(value) + 2;
if (IS_PSTATE_PROPOSER(param)) {
- if (iscsi_check_proposer_state(param, value) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_check_proposer_state(param, value) < 0)
+ goto free_buffer;
+
SET_PSTATE_RESPONSE_GOT(param);
} else {
- if (iscsi_check_acceptor_state(param, value, conn) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_check_acceptor_state(param, value, conn) < 0)
+ goto free_buffer;
+
SET_PSTATE_ACCEPTOR(param);
}
}
kfree(tmpbuf);
return 0;
+
+free_buffer:
+ kfree(tmpbuf);
+ return -1;
}
int iscsi_encode_text_output(
diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
index e446a09c886b..f65e5e584212 100644
--- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
+++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c
@@ -25,8 +25,6 @@
#include "iscsi_target_tpg.h"
#include "iscsi_target_seq_pdu_list.h"
-#define OFFLOAD_BUF_SIZE 32768
-
#ifdef DEBUG
static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
{
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 594d07a1e995..4b34f71547c6 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -90,10 +90,10 @@ int iscsit_load_discovery_tpg(void)
*/
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
if (!param)
- goto out;
+ goto free_pl_out;
if (iscsi_update_param_value(param, "CHAP,None") < 0)
- goto out;
+ goto free_pl_out;
tpg->tpg_attrib.authentication = 0;
@@ -105,6 +105,8 @@ int iscsit_load_discovery_tpg(void)
pr_debug("CORE[0] - Allocated Discovery TPG\n");
return 0;
+free_pl_out:
+ iscsi_release_param_list(tpg->param_list);
out:
if (tpg->sid == 1)
core_tpg_deregister(&tpg->tpg_se_tpg);
@@ -119,6 +121,7 @@ void iscsit_release_discovery_tpg(void)
if (!tpg)
return;
+ iscsi_release_param_list(tpg->param_list);
core_tpg_deregister(&tpg->tpg_se_tpg);
kfree(tpg);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 54f20f184dd6..4435bf374d2d 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -695,6 +695,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
struct iscsi_session *sess;
struct se_cmd *se_cmd = &cmd->se_cmd;
+ WARN_ON(!list_empty(&cmd->i_conn_node));
+
if (cmd->conn)
sess = cmd->conn->sess;
else
@@ -717,6 +719,8 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues)
{
struct iscsi_conn *conn = cmd->conn;
+ WARN_ON(!list_empty(&cmd->i_conn_node));
+
if (cmd->data_direction == DMA_TO_DEVICE) {
iscsit_stop_dataout_timer(cmd);
iscsit_free_r2ts_from_list(cmd);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 928127642574..e46ca968009c 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -918,7 +918,7 @@ static int core_alua_update_tpg_primary_metadata(
{
unsigned char *md_buf;
struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
- char path[ALUA_METADATA_PATH_LEN];
+ char *path;
int len, rc;
md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
@@ -927,8 +927,6 @@ static int core_alua_update_tpg_primary_metadata(
return -ENOMEM;
}
- memset(path, 0, ALUA_METADATA_PATH_LEN);
-
len = snprintf(md_buf, ALUA_MD_BUF_LEN,
"tg_pt_gp_id=%hu\n"
"alua_access_state=0x%02x\n"
@@ -937,11 +935,14 @@ static int core_alua_update_tpg_primary_metadata(
tg_pt_gp->tg_pt_gp_alua_access_state,
tg_pt_gp->tg_pt_gp_alua_access_status);
- snprintf(path, ALUA_METADATA_PATH_LEN,
- "%s/alua/tpgs_%s/%s", db_root, &wwn->unit_serial[0],
- config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
-
- rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ rc = -ENOMEM;
+ path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
+ &wwn->unit_serial[0],
+ config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
+ if (path) {
+ rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ kfree(path);
+ }
kfree(md_buf);
return rc;
}
@@ -1209,7 +1210,7 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
{
struct se_portal_group *se_tpg = lun->lun_tpg;
unsigned char *md_buf;
- char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
+ char *path;
int len, rc;
mutex_lock(&lun->lun_tg_pt_md_mutex);
@@ -1221,28 +1222,32 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
goto out_unlock;
}
- memset(path, 0, ALUA_METADATA_PATH_LEN);
- memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
-
- len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
- se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
-
- if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
- snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
- se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
-
len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
"alua_tg_pt_status=0x%02x\n",
atomic_read(&lun->lun_tg_pt_secondary_offline),
lun->lun_tg_pt_secondary_stat);
- snprintf(path, ALUA_METADATA_PATH_LEN, "%s/alua/%s/%s/lun_%llu",
- db_root, se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
- lun->unpacked_lun);
+ if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
+ path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
+ db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
+ lun->unpacked_lun);
+ } else {
+ path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
+ db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+ lun->unpacked_lun);
+ }
+ if (!path) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ kfree(path);
+out_free:
kfree(md_buf);
-
out_unlock:
mutex_unlock(&lun->lun_tg_pt_md_mutex);
return rc;
diff --git a/drivers/target/target_core_alua.h b/drivers/target/target_core_alua.h
index 1902cb5c3b52..fc9637cce825 100644
--- a/drivers/target/target_core_alua.h
+++ b/drivers/target/target_core_alua.h
@@ -72,15 +72,6 @@
*/
#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0
#define ALUA_MAX_IMPLICIT_TRANS_SECS 255
-/*
- * Used by core_alua_update_tpg_primary_metadata() and
- * core_alua_update_tpg_secondary_metadata()
- */
-#define ALUA_METADATA_PATH_LEN 512
-/*
- * Used by core_alua_update_tpg_secondary_metadata()
- */
-#define ALUA_SECONDARY_METADATA_WWN_LEN 256
/* Used by core_alua_update_tpg_(primary,secondary)_metadata */
#define ALUA_MD_BUF_LEN 1024
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index bd87cc26c6e5..72b1cd1bf9d9 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1611,12 +1611,12 @@ static match_table_t tokens = {
{Opt_res_type, "res_type=%d"},
{Opt_res_scope, "res_scope=%d"},
{Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
- {Opt_mapped_lun, "mapped_lun=%lld"},
+ {Opt_mapped_lun, "mapped_lun=%u"},
{Opt_target_fabric, "target_fabric=%s"},
{Opt_target_node, "target_node=%s"},
{Opt_tpgt, "tpgt=%d"},
{Opt_port_rtpi, "port_rtpi=%d"},
- {Opt_target_lun, "target_lun=%lld"},
+ {Opt_target_lun, "target_lun=%u"},
{Opt_err, NULL}
};
@@ -1693,7 +1693,7 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
}
break;
case Opt_sa_res_key:
- ret = kstrtoull(args->from, 0, &tmp_ll);
+ ret = match_u64(args, &tmp_ll);
if (ret < 0) {
pr_err("kstrtoull() failed for sa_res_key=\n");
goto out;
@@ -1727,10 +1727,10 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
all_tg_pt = (int)arg;
break;
case Opt_mapped_lun:
- ret = match_int(args, &arg);
+ ret = match_u64(args, &tmp_ll);
if (ret)
goto out;
- mapped_lun = (u64)arg;
+ mapped_lun = (u64)tmp_ll;
break;
/*
* PR APTPL Metadata for Target Port
@@ -1768,10 +1768,10 @@ static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
goto out;
break;
case Opt_target_lun:
- ret = match_int(args, &arg);
+ ret = match_u64(args, &tmp_ll);
if (ret)
goto out;
- target_lun = (u64)arg;
+ target_lun = (u64)tmp_ll;
break;
default:
break;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index e9e917cc6441..e1416b007aa4 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -623,8 +623,6 @@ static struct configfs_attribute *target_fabric_port_attrs[] = {
NULL,
};
-extern struct configfs_item_operations target_core_dev_item_ops;
-
static int target_fabric_port_link(
struct config_item *lun_ci,
struct config_item *se_dev_ci)
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index c629817a8854..9b2c0c773022 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -482,6 +482,10 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
struct inode *inode = file->f_mapping->host;
int ret;
+ if (!nolb) {
+ return 0;
+ }
+
if (cmd->se_dev->dev_attrib.pi_prot_type) {
ret = fd_do_prot_unmap(cmd, lba, nolb);
if (ret)
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 18e3eb16e756..9384d19a7326 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -89,6 +89,7 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
void *data);
/* target_core_configfs.c */
+extern struct configfs_item_operations target_core_dev_item_ops;
void target_setup_backend_cits(struct target_backend *);
/* target_core_fabric_configfs.c */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index dd2cd8048582..b024613f9217 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -58,8 +58,10 @@ void core_pr_dump_initiator_port(
char *buf,
u32 size)
{
- if (!pr_reg->isid_present_at_reg)
+ if (!pr_reg->isid_present_at_reg) {
buf[0] = '\0';
+ return;
+ }
snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid);
}
@@ -351,6 +353,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
break;
case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
we = 1;
+ /* fall through */
case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
/*
* Some commands are only allowed for registered I_T Nexuses.
@@ -359,6 +362,7 @@ static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd, u32 pr_reg_type,
break;
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
we = 1;
+ /* fall through */
case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
/*
* Each registered I_T Nexus is a reservation holder.
@@ -1521,7 +1525,7 @@ core_scsi3_decode_spec_i_port(
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
if (!tidh_new) {
pr_err("Unable to allocate tidh_new\n");
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = tpg;
@@ -1533,7 +1537,7 @@ core_scsi3_decode_spec_i_port(
sa_res_key, all_tg_pt, aptpl);
if (!local_pr_reg) {
kfree(tidh_new);
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
}
tidh_new->dest_pr_reg = local_pr_reg;
/*
@@ -1553,7 +1557,7 @@ core_scsi3_decode_spec_i_port(
buf = transport_kmap_data_sg(cmd);
if (!buf) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out;
}
@@ -1767,7 +1771,7 @@ core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
kfree(tidh_new);
- ret = TCM_INVALID_PARAMETER_LIST;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out_unmap;
}
tidh_new->dest_pr_reg = dest_pr_reg;
@@ -1971,24 +1975,21 @@ static int __core_scsi3_write_aptpl_to_file(
struct t10_wwn *wwn = &dev->t10_wwn;
struct file *file;
int flags = O_RDWR | O_CREAT | O_TRUNC;
- char path[512];
+ char *path;
u32 pr_aptpl_buf_len;
int ret;
loff_t pos = 0;
- memset(path, 0, 512);
-
- if (strlen(&wwn->unit_serial[0]) >= 512) {
- pr_err("WWN value for struct se_device does not fit"
- " into path buffer\n");
- return -EMSGSIZE;
- }
+ path = kasprintf(GFP_KERNEL, "%s/pr/aptpl_%s", db_root,
+ &wwn->unit_serial[0]);
+ if (!path)
+ return -ENOMEM;
- snprintf(path, 512, "%s/pr/aptpl_%s", db_root, &wwn->unit_serial[0]);
file = filp_open(path, flags, 0600);
if (IS_ERR(file)) {
pr_err("filp_open(%s) for APTPL metadata"
" failed\n", path);
+ kfree(path);
return PTR_ERR(file);
}
@@ -1999,6 +2000,7 @@ static int __core_scsi3_write_aptpl_to_file(
if (ret < 0)
pr_debug("Error writing APTPL metadata file: %s\n", path);
fput(file);
+ kfree(path);
return (ret < 0) ? -EIO : 0;
}
@@ -2103,7 +2105,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
register_type, 0)) {
pr_err("Unable to allocate"
" struct t10_pr_registration\n");
- return TCM_INVALID_PARAMETER_LIST;
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
}
} else {
/*
@@ -3215,7 +3217,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
*/
buf = transport_kmap_data_sg(cmd);
if (!buf) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out_put_pr_reg;
}
@@ -3267,7 +3269,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
buf = transport_kmap_data_sg(cmd);
if (!buf) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out_put_pr_reg;
}
proto_ident = (buf[24] & 0x0f);
@@ -3466,7 +3468,7 @@ after_iport_check:
if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
dest_lun, dest_se_deve, dest_se_deve->mapped_lun,
iport_ptr, sa_res_key, 0, aptpl, 2, 1)) {
- ret = TCM_INVALID_PARAMETER_LIST;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out;
}
spin_lock(&dev->dev_reservation_lock);
@@ -3528,8 +3530,6 @@ after_iport_check:
core_scsi3_update_and_write_aptpl(cmd->se_dev, aptpl);
- transport_kunmap_data_sg(cmd);
-
core_scsi3_put_pr_reg(dest_pr_reg);
return 0;
out:
@@ -4011,6 +4011,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
* Set the ADDITIONAL DESCRIPTOR LENGTH
*/
put_unaligned_be32(desc_len, &buf[off]);
+ off += 4;
/*
* Size of full desctipor header minus TransportID
* containing $FABRIC_MOD specific) initiator device/port
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index e22847bd79b9..9c7bc1ca341a 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -133,6 +133,15 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
spin_unlock(&se_cmd->t_state_lock);
return false;
}
+ if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) {
+ if (se_cmd->scsi_status) {
+ pr_debug("Attempted to abort io tag: %llu early failure"
+ " status: 0x%02x\n", se_cmd->tag,
+ se_cmd->scsi_status);
+ spin_unlock(&se_cmd->t_state_lock);
+ return false;
+ }
+ }
if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
pr_debug("Attempted to abort io tag: %llu already shutdown,"
" skipping\n", se_cmd->tag);
@@ -217,7 +226,8 @@ static void core_tmr_drain_tmr_list(
* LUN_RESET tmr..
*/
spin_lock_irqsave(&dev->se_tmr_lock, flags);
- list_del_init(&tmr->tmr_list);
+ if (tmr)
+ list_del_init(&tmr->tmr_list);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
cmd = tmr_p->task_cmd;
if (!cmd) {
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 836d552b0385..58caacd54a3b 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -67,7 +67,6 @@ static void transport_complete_task_attr(struct se_cmd *cmd);
static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev, int err, bool write_pending);
-static int transport_put_cmd(struct se_cmd *cmd);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
@@ -668,7 +667,7 @@ int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
if (transport_cmd_check_stop_to_fabric(cmd))
return 1;
if (remove && ack_kref)
- ret = transport_put_cmd(cmd);
+ ret = target_put_sess_cmd(cmd);
return ret;
}
@@ -1730,9 +1729,6 @@ void transport_generic_request_failure(struct se_cmd *cmd,
{
int ret = 0, post_ret = 0;
- if (transport_check_aborted_status(cmd, 1))
- return;
-
pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
sense_reason);
target_show_cmd("-----[ ", cmd);
@@ -1741,6 +1737,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
* For SAM Task Attribute emulation for failed struct se_cmd
*/
transport_complete_task_attr(cmd);
+
/*
* Handle special case for COMPARE_AND_WRITE failure, where the
* callback is expected to drop the per device ->caw_sem.
@@ -1749,6 +1746,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
cmd->transport_complete_callback)
cmd->transport_complete_callback(cmd, false, &post_ret);
+ if (transport_check_aborted_status(cmd, 1))
+ return;
+
switch (sense_reason) {
case TCM_NON_EXISTENT_LUN:
case TCM_UNSUPPORTED_SCSI_OPCODE:
@@ -1772,8 +1772,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
break;
case TCM_OUT_OF_RESOURCES:
- sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- break;
+ cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+ goto queue_status;
case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
@@ -1795,11 +1795,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
}
- trace_target_cmd_complete(cmd);
- ret = cmd->se_tfo->queue_status(cmd);
- if (ret)
- goto queue_full;
- goto check_stop;
+
+ goto queue_status;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
cmd->t_task_cdb[0], sense_reason);
@@ -1816,6 +1813,11 @@ check_stop:
transport_cmd_check_stop_to_fabric(cmd);
return;
+queue_status:
+ trace_target_cmd_complete(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (!ret)
+ goto check_stop;
queue_full:
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
}
@@ -1973,6 +1975,7 @@ void target_execute_cmd(struct se_cmd *cmd)
}
cmd->t_state = TRANSPORT_PROCESSING;
+ cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
@@ -2010,6 +2013,8 @@ static void target_restart_delayed_cmds(struct se_device *dev)
list_del(&cmd->se_delayed_node);
spin_unlock(&dev->delayed_cmd_lock);
+ cmd->transport_state |= CMD_T_SENT;
+
__target_execute_cmd(cmd, true);
if (cmd->sam_task_attr == TCM_ORDERED_TAG)
@@ -2045,6 +2050,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
dev->dev_cur_ordered_id);
}
+ cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
+
restart:
target_restart_delayed_cmds(dev);
}
@@ -2090,7 +2097,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
ret = cmd->se_tfo->queue_data_in(cmd);
break;
}
- /* Fall through for DMA_TO_DEVICE */
+ /* fall through */
case DMA_NONE:
queue_status:
trace_target_cmd_complete(cmd);
@@ -2268,7 +2275,7 @@ queue_rsp:
goto queue_full;
break;
}
- /* Fall through for DMA_TO_DEVICE */
+ /* fall through */
case DMA_NONE:
queue_status:
trace_target_cmd_complete(cmd);
@@ -2352,22 +2359,6 @@ static inline void transport_free_pages(struct se_cmd *cmd)
cmd->t_bidi_data_nents = 0;
}
-/**
- * transport_put_cmd - release a reference to a command
- * @cmd: command to release
- *
- * This routine releases our reference to the command and frees it if possible.
- */
-static int transport_put_cmd(struct se_cmd *cmd)
-{
- BUG_ON(!cmd->se_tfo);
- /*
- * If this cmd has been setup with target_get_sess_cmd(), drop
- * the kref and call ->release_cmd() in kref callback.
- */
- return target_put_sess_cmd(cmd);
-}
-
void *transport_kmap_data_sg(struct se_cmd *cmd)
{
struct scatterlist *sg = cmd->t_data_sg;
@@ -2570,7 +2561,20 @@ EXPORT_SYMBOL(transport_generic_new_cmd);
static void transport_write_pending_qf(struct se_cmd *cmd)
{
+ unsigned long flags;
int ret;
+ bool stop;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ if (stop) {
+ pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
+ __func__, __LINE__, cmd->tag);
+ complete_all(&cmd->t_transport_stop_comp);
+ return;
+ }
ret = cmd->se_tfo->write_pending(cmd);
if (ret) {
@@ -2603,7 +2607,7 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
target_wait_free_cmd(cmd, &aborted, &tas);
if (!aborted || tas)
- ret = transport_put_cmd(cmd);
+ ret = target_put_sess_cmd(cmd);
} else {
if (wait_for_tasks)
target_wait_free_cmd(cmd, &aborted, &tas);
@@ -2619,7 +2623,7 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
transport_lun_remove_cmd(cmd);
if (!aborted || tas)
- ret = transport_put_cmd(cmd);
+ ret = target_put_sess_cmd(cmd);
}
/*
* If the task has been internally aborted due to TMR ABORT_TASK
@@ -2664,6 +2668,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
ret = -ESHUTDOWN;
goto out;
}
+ se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
out:
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -3145,6 +3150,21 @@ static const struct sense_info sense_info_table[] = {
.key = NOT_READY,
.asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
},
+ [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
+ /*
+ * From spc4r22 section5.7.7,5.7.8
+ * If a PERSISTENT RESERVE OUT command with a REGISTER service action
+ * or a REGISTER AND IGNORE EXISTING KEY service action or
+ * REGISTER AND MOVE service actionis attempted,
+ * but there are insufficient device server resources to complete the
+ * operation, then the command shall be terminated with CHECK CONDITION
+ * status, with the sense key set to ILLEGAL REQUEST,and the additonal
+ * sense code set to INSUFFICIENT REGISTRATION RESOURCES.
+ */
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x55,
+ .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
+ },
};
static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index a8eaed2c211a..a415d87f22d2 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -150,6 +150,8 @@ struct tcmu_dev {
wait_queue_head_t nl_cmd_wq;
char dev_config[TCMU_CONFIG_LEN];
+
+ int nl_reply_supported;
};
#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
@@ -430,7 +432,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
struct se_device *se_dev = se_cmd->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
- int cmd_id;
tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
if (!tcmu_cmd)
@@ -438,9 +439,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
- if (udev->cmd_time_out)
- tcmu_cmd->deadline = jiffies +
- msecs_to_jiffies(udev->cmd_time_out);
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
@@ -451,19 +449,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
return NULL;
}
- idr_preload(GFP_KERNEL);
- spin_lock_irq(&udev->commands_lock);
- cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
- USHRT_MAX, GFP_NOWAIT);
- spin_unlock_irq(&udev->commands_lock);
- idr_preload_end();
-
- if (cmd_id < 0) {
- tcmu_free_cmd(tcmu_cmd);
- return NULL;
- }
- tcmu_cmd->cmd_id = cmd_id;
-
return tcmu_cmd;
}
@@ -746,6 +731,30 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
return command_size;
}
+static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
+{
+ struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
+ unsigned long tmo = udev->cmd_time_out;
+ int cmd_id;
+
+ if (tcmu_cmd->cmd_id)
+ return 0;
+
+ cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
+ if (cmd_id < 0) {
+ pr_err("tcmu: Could not allocate cmd id.\n");
+ return cmd_id;
+ }
+ tcmu_cmd->cmd_id = cmd_id;
+
+ if (!tmo)
+ return 0;
+
+ tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
+ mod_timer(&udev->timeout, tcmu_cmd->deadline);
+ return 0;
+}
+
static sense_reason_t
tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
{
@@ -839,7 +848,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
entry = (void *) mb + CMDR_OFF + cmd_head;
memset(entry, 0, command_size);
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
- entry->hdr.cmd_id = tcmu_cmd->cmd_id;
/* Handle allocating space from the data area */
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
@@ -877,6 +885,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
}
entry->req.iov_bidi_cnt = iov_cnt;
+ ret = tcmu_setup_cmd_timer(tcmu_cmd);
+ if (ret) {
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
+ mutex_unlock(&udev->cmdr_lock);
+ return TCM_OUT_OF_RESOURCES;
+ }
+ entry->hdr.cmd_id = tcmu_cmd->cmd_id;
+
/*
* Recalaulate the command's base size and size according
* to the actual needs
@@ -910,8 +926,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
static sense_reason_t
tcmu_queue_cmd(struct se_cmd *se_cmd)
{
- struct se_device *se_dev = se_cmd->se_dev;
- struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
sense_reason_t ret;
@@ -922,9 +936,6 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
ret = tcmu_queue_cmd_ring(tcmu_cmd);
if (ret != TCM_NO_SENSE) {
pr_err("TCMU: Could not queue command\n");
- spin_lock_irq(&udev->commands_lock);
- idr_remove(&udev->commands, tcmu_cmd->cmd_id);
- spin_unlock_irq(&udev->commands_lock);
tcmu_free_cmd(tcmu_cmd);
}
@@ -1111,6 +1122,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
init_waitqueue_head(&udev->nl_cmd_wq);
spin_lock_init(&udev->nl_cmd_lock);
+ INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
+
return &udev->se_dev;
}
@@ -1279,10 +1292,54 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
kfree(udev);
}
+static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
+{
+ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ kmem_cache_free(tcmu_cmd_cache, cmd);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void tcmu_blocks_release(struct tcmu_dev *udev)
+{
+ int i;
+ struct page *page;
+
+ /* Try to release all block pages */
+ mutex_lock(&udev->cmdr_lock);
+ for (i = 0; i <= udev->dbi_max; i++) {
+ page = radix_tree_delete(&udev->data_blocks, i);
+ if (page) {
+ __free_page(page);
+ atomic_dec(&global_db_count);
+ }
+ }
+ mutex_unlock(&udev->cmdr_lock);
+}
+
static void tcmu_dev_kref_release(struct kref *kref)
{
struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
struct se_device *dev = &udev->se_dev;
+ struct tcmu_cmd *cmd;
+ bool all_expired = true;
+ int i;
+
+ vfree(udev->mb_addr);
+ udev->mb_addr = NULL;
+
+ /* Upper layer should drain all requests before calling this */
+ spin_lock_irq(&udev->commands_lock);
+ idr_for_each_entry(&udev->commands, cmd, i) {
+ if (tcmu_check_and_free_pending_cmd(cmd) != 0)
+ all_expired = false;
+ }
+ idr_destroy(&udev->commands);
+ spin_unlock_irq(&udev->commands_lock);
+ WARN_ON(!all_expired);
+
+ tcmu_blocks_release(udev);
call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
}
@@ -1305,6 +1362,10 @@ static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
if (!tcmu_kern_cmd_reply_supported)
return;
+
+ if (udev->nl_reply_supported <= 0)
+ return;
+
relock:
spin_lock(&udev->nl_cmd_lock);
@@ -1331,6 +1392,9 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
if (!tcmu_kern_cmd_reply_supported)
return 0;
+ if (udev->nl_reply_supported <= 0)
+ return 0;
+
pr_debug("sleeping for nl reply\n");
wait_for_completion(&nl_cmd->complete);
@@ -1475,8 +1539,6 @@ static int tcmu_configure_device(struct se_device *dev)
WARN_ON(udev->data_size % PAGE_SIZE);
WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
- INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
-
info->version = __stringify(TCMU_MAILBOX_VERSION);
info->mem[0].name = "tcm-user command & data buffer";
@@ -1505,6 +1567,12 @@ static int tcmu_configure_device(struct se_device *dev)
dev->dev_attrib.emulate_write_cache = 0;
dev->dev_attrib.hw_queue_depth = 128;
+ /* If user didn't explicitly disable netlink reply support, use
+ * module scope setting.
+ */
+ if (udev->nl_reply_supported >= 0)
+ udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
+
/*
* Get a ref incase userspace does a close on the uio device before
* LIO has initiated tcmu_free_device.
@@ -1526,6 +1594,7 @@ err_netlink:
uio_unregister_device(&udev->uio_info);
err_register:
vfree(udev->mb_addr);
+ udev->mb_addr = NULL;
err_vzalloc:
kfree(info->name);
info->name = NULL;
@@ -1533,37 +1602,11 @@ err_vzalloc:
return ret;
}
-static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
-{
- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
- kmem_cache_free(tcmu_cmd_cache, cmd);
- return 0;
- }
- return -EINVAL;
-}
-
static bool tcmu_dev_configured(struct tcmu_dev *udev)
{
return udev->uio_info.uio_dev ? true : false;
}
-static void tcmu_blocks_release(struct tcmu_dev *udev)
-{
- int i;
- struct page *page;
-
- /* Try to release all block pages */
- mutex_lock(&udev->cmdr_lock);
- for (i = 0; i <= udev->dbi_max; i++) {
- page = radix_tree_delete(&udev->data_blocks, i);
- if (page) {
- __free_page(page);
- atomic_dec(&global_db_count);
- }
- }
- mutex_unlock(&udev->cmdr_lock);
-}
-
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1575,9 +1618,6 @@ static void tcmu_free_device(struct se_device *dev)
static void tcmu_destroy_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
- struct tcmu_cmd *cmd;
- bool all_expired = true;
- int i;
del_timer_sync(&udev->timeout);
@@ -1585,20 +1625,6 @@ static void tcmu_destroy_device(struct se_device *dev)
list_del(&udev->node);
mutex_unlock(&root_udev_mutex);
- vfree(udev->mb_addr);
-
- /* Upper layer should drain all requests before calling this */
- spin_lock_irq(&udev->commands_lock);
- idr_for_each_entry(&udev->commands, cmd, i) {
- if (tcmu_check_and_free_pending_cmd(cmd) != 0)
- all_expired = false;
- }
- idr_destroy(&udev->commands);
- spin_unlock_irq(&udev->commands_lock);
- WARN_ON(!all_expired);
-
- tcmu_blocks_release(udev);
-
tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
uio_unregister_device(&udev->uio_info);
@@ -1609,7 +1635,7 @@ static void tcmu_destroy_device(struct se_device *dev)
enum {
Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
- Opt_err,
+ Opt_nl_reply_supported, Opt_err,
};
static match_table_t tokens = {
@@ -1617,6 +1643,7 @@ static match_table_t tokens = {
{Opt_dev_size, "dev_size=%u"},
{Opt_hw_block_size, "hw_block_size=%u"},
{Opt_hw_max_sectors, "hw_max_sectors=%u"},
+ {Opt_nl_reply_supported, "nl_reply_supported=%d"},
{Opt_err, NULL}
};
@@ -1691,6 +1718,17 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
ret = tcmu_set_dev_attrib(&args[0],
&(dev->dev_attrib.hw_max_sectors));
break;
+ case Opt_nl_reply_supported:
+ arg_p = match_strdup(&args[0]);
+ if (!arg_p) {
+ ret = -ENOMEM;
+ break;
+ }
+ ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
+ kfree(arg_p);
+ if (ret < 0)
+ pr_err("kstrtoint() failed for nl_reply_supported=\n");
+ break;
default:
break;
}
@@ -1733,8 +1771,7 @@ static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
- struct tcmu_dev *udev = container_of(da->da_dev,
- struct tcmu_dev, se_dev);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
}
@@ -1841,6 +1878,34 @@ static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
}
CONFIGFS_ATTR(tcmu_, dev_size);
+static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
+ char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
+}
+
+static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ s8 val;
+ int ret;
+
+ ret = kstrtos8(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ udev->nl_reply_supported = val;
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, nl_reply_supported);
+
static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
char *page)
{
@@ -1883,6 +1948,7 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = {
&tcmu_attr_dev_config,
&tcmu_attr_dev_size,
&tcmu_attr_emulate_write_cache,
+ &tcmu_attr_nl_reply_supported,
NULL,
};