diff options
Diffstat (limited to 'drivers')
127 files changed, 1513 insertions, 541 deletions
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c index 46c503486e96..00fb4120a5b3 100644 --- a/drivers/base/arch_numa.c +++ b/drivers/base/arch_numa.c @@ -264,7 +264,7 @@ void __init numa_free_distance(void) size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]); - memblock_free(__pa(numa_distance), size); + memblock_free_ptr(numa_distance, size); numa_distance_cnt = 0; numa_distance = NULL; } diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index a97f33d0c59f..94665037f4a3 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -13,6 +13,7 @@ #include <linux/export.h> #include <linux/rtc.h> #include <linux/suspend.h> +#include <linux/init.h> #include <linux/mc146818rtc.h> @@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user) const char *file = *(const char **)(tracedata + 2); unsigned int user_hash_value, file_hash_value; + if (!x86_platform.legacy.rtc) + return; + user_hash_value = user % USERHASH; file_hash_value = hash_string(lineno, file, FILEHASH); set_magic_time(user_hash_value, file_hash_value, dev_hash_value); @@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = { static int __init early_resume_init(void) { + if (!x86_platform.legacy.rtc) + return 0; + hash_value_early_read = read_magic_time(); register_pm_notifier(&pm_trace_nb); return 0; @@ -277,6 +284,9 @@ static int __init late_resume_init(void) unsigned int val = hash_value_early_read; unsigned int user, file, dev; + if (!x86_platform.legacy.rtc) + return 0; + user = val % USERHASH; val = val / USERHASH; file = val % FILEHASH; diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c index 66b05a326910..a6f365b9cc1a 100644 --- a/drivers/cpufreq/cpufreq_governor_attr_set.c +++ b/drivers/cpufreq/cpufreq_governor_attr_set.c @@ -74,8 +74,8 @@ unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *l if (count) return count; - kobject_put(&attr_set->kobj); mutex_destroy(&attr_set->update_lock); + kobject_put(&attr_set->kobj); return 0; } EXPORT_SYMBOL_GPL(gov_attr_set_put); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 1097f826ad70..8c176b7dae41 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -3205,11 +3205,15 @@ static int __init intel_pstate_init(void) if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return -ENODEV; - if (no_load) - return -ENODEV; - id = x86_match_cpu(hwp_support_ids); if (id) { + bool hwp_forced = intel_pstate_hwp_is_enabled(); + + if (hwp_forced) + pr_info("HWP enabled by BIOS\n"); + else if (no_load) + return -ENODEV; + copy_cpu_funcs(&core_funcs); /* * Avoid enabling HWP for processors without EPP support, @@ -3219,8 +3223,7 @@ static int __init intel_pstate_init(void) * If HWP is enabled already, though, there is no choice but to * deal with it. */ - if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || - intel_pstate_hwp_is_enabled()) { + if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) { hwp_active++; hwp_mode_bdw = id->driver_data; intel_pstate.attr = hwp_cpufreq_attrs; @@ -3235,7 +3238,11 @@ static int __init intel_pstate_init(void) goto hwp_cpu_matched; } + pr_info("HWP not enabled\n"); } else { + if (no_load) + return -ENODEV; + id = x86_match_cpu(intel_pstate_cpu_ids); if (!id) { pr_info("CPU model not supported\n"); @@ -3314,10 +3321,9 @@ static int __init intel_pstate_setup(char *str) else if (!strcmp(str, "passive")) default_driver = &intel_cpufreq; - if (!strcmp(str, "no_hwp")) { - pr_info("HWP disabled\n"); + if (!strcmp(str, "no_hwp")) no_hwp = 1; - } + if (!strcmp(str, "force")) force_load = 1; if (!strcmp(str, "hwp_only")) diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c index 284b6bd040b1..d295f405c4bb 100644 --- a/drivers/cpufreq/vexpress-spc-cpufreq.c +++ b/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -451,7 +451,6 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy) static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy) { struct device *cpu_dev; - int cur_cluster = cpu_to_cluster(policy->cpu); cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index dc3c6b3a00e5..d356e329e6f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -758,7 +758,7 @@ enum amd_hw_ip_block_type { MAX_HWIP }; -#define HWIP_MAX_INSTANCE 8 +#define HWIP_MAX_INSTANCE 10 struct amd_powerplay { void *pp_handle; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 3003ee1c9487..1d41c2c00623 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -192,6 +192,16 @@ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm) kgd2kfd_suspend(adev->kfd.dev, run_pm); } +int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev) +{ + int r = 0; + + if (adev->kfd.dev) + r = kgd2kfd_resume_iommu(adev->kfd.dev); + + return r; +} + int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm) { int r = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index ec028cf963f5..3bc52b2c604f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -137,6 +137,7 @@ int amdgpu_amdkfd_init(void); void amdgpu_amdkfd_fini(void); void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); +int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev); int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, const void *ih_ring_entry); @@ -327,6 +328,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, const struct kgd2kfd_shared_resources *gpu_resources); void kgd2kfd_device_exit(struct kfd_dev *kfd); void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); +int kgd2kfd_resume_iommu(struct kfd_dev *kfd); int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); int kgd2kfd_pre_reset(struct kfd_dev *kfd); int kgd2kfd_post_reset(struct kfd_dev *kfd); @@ -365,6 +367,11 @@ static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) { } +static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd) +{ + return 0; +} + static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) { return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 277128846dd1..463b9c0283f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1544,20 +1544,18 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) struct dentry *ent; int r, i; - - ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev, &fops_ib_preempt); - if (!ent) { + if (IS_ERR(ent)) { DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); - return -EIO; + return PTR_ERR(ent); } ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev, &fops_sclk_set); - if (!ent) { + if (IS_ERR(ent)) { DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); - return -EIO; + return PTR_ERR(ent); } /* Register debugfs entries for amdgpu_ttm */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 41c6b3aacd37..ab3794c42d36 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2394,6 +2394,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (r) goto init_failed; + r = amdgpu_amdkfd_resume_iommu(adev); + if (r) + goto init_failed; + r = amdgpu_device_ip_hw_init_phase1(adev); if (r) goto init_failed; @@ -3148,6 +3152,10 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev) { int r; + r = amdgpu_amdkfd_resume_iommu(adev); + if (r) + return r; + r = amdgpu_device_ip_resume_phase1(adev); if (r) return r; @@ -4601,6 +4609,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, dev_warn(tmp_adev->dev, "asic atom init failed!"); } else { dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); + r = amdgpu_amdkfd_resume_iommu(tmp_adev); + if (r) + goto out; + r = amdgpu_device_ip_resume_phase1(tmp_adev); if (r) goto out; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index c7797eac83c3..9ff600a38559 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -598,7 +598,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) break; default: adev->gmc.tmz_enabled = false; - dev_warn(adev->dev, + dev_info(adev->dev, "Trusted Memory Zone (TMZ) feature not supported\n"); break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index dc44c946a244..98732518543e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -757,7 +757,7 @@ Out: return res; } -inline uint32_t amdgpu_ras_eeprom_max_record_count(void) +uint32_t amdgpu_ras_eeprom_max_record_count(void) { return RAS_MAX_RECORD_COUNT; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index f95fc61b3021..6bb00578bfbb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -120,7 +120,7 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control, struct eeprom_table_record *records, const u32 num); -inline uint32_t amdgpu_ras_eeprom_max_record_count(void); +uint32_t amdgpu_ras_eeprom_max_record_count(void); void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 7b634a1517f9..0554576d3695 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -428,8 +428,8 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, ent = debugfs_create_file(name, S_IFREG | S_IRUGO, root, ring, &amdgpu_debugfs_ring_fops); - if (!ent) - return -ENOMEM; + if (IS_ERR(ent)) + return PTR_ERR(ent); i_size_write(ent->d_inode, ring->ring_size + 12); ring->ent = ent; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 38dade421d46..94126dc39688 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -515,6 +515,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, goto out; } + if (bo->type == ttm_bo_type_device && + new_mem->mem_type == TTM_PL_VRAM && + old_mem->mem_type != TTM_PL_VRAM) { + /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU + * accesses the BO after it's moved. + */ + abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + } + if (adev->mman.buffer_funcs_enabled) { if (((old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_VRAM) || @@ -545,15 +554,6 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, return r; } - if (bo->type == ttm_bo_type_device && - new_mem->mem_type == TTM_PL_VRAM && - old_mem->mem_type != TTM_PL_VRAM) { - /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU - * accesses the BO after it's moved. - */ - abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - } - out: /* update statistics */ atomic64_add(bo->base.size, &adev->num_bytes_moved); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 16a57b70cc1a..98d1b3ab3a46 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -468,6 +468,7 @@ static const struct kfd_device_info navi10_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, + .no_atomic_fw_version = 145, .num_sdma_engines = 2, .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, @@ -487,6 +488,7 @@ static const struct kfd_device_info navi12_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, + .no_atomic_fw_version = 145, .num_sdma_engines = 2, .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, @@ -506,6 +508,7 @@ static const struct kfd_device_info navi14_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, + .no_atomic_fw_version = 145, .num_sdma_engines = 2, .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, @@ -525,6 +528,7 @@ static const struct kfd_device_info sienna_cichlid_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, + .no_atomic_fw_version = 92, .num_sdma_engines = 4, .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, @@ -544,6 +548,7 @@ static const struct kfd_device_info navy_flounder_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, + .no_atomic_fw_version = 92, .num_sdma_engines = 2, .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, @@ -562,7 +567,8 @@ static const struct kfd_device_info vangogh_device_info = { .mqd_size_aligned = MQD_SIZE_ALIGNED, .needs_iommu_device = false, .supports_cwsr = true, - .needs_pci_atomics = false, + .needs_pci_atomics = true, + .no_atomic_fw_version = 92, .num_sdma_engines = 1, .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, @@ -582,6 +588,7 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, + .no_atomic_fw_version = 92, .num_sdma_engines = 2, .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, @@ -601,6 +608,7 @@ static const struct kfd_device_info beige_goby_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, + .no_atomic_fw_version = 92, .num_sdma_engines = 1, .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, @@ -619,7 +627,8 @@ static const struct kfd_device_info yellow_carp_device_info = { .mqd_size_aligned = MQD_SIZE_ALIGNED, .needs_iommu_device = false, .supports_cwsr = true, - .needs_pci_atomics = false, + .needs_pci_atomics = true, + .no_atomic_fw_version = 92, .num_sdma_engines = 1, .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, @@ -708,20 +717,6 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, if (!kfd) return NULL; - /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. - * 32 and 64-bit requests are possible and must be - * supported. - */ - kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd); - if (device_info->needs_pci_atomics && - !kfd->pci_atomic_requested) { - dev_info(kfd_device, - "skipped device %x:%x, PCI rejects atomics\n", - pdev->vendor, pdev->device); - kfree(kfd); - return NULL; - } - kfd->kgd = kgd; kfd->device_info = device_info; kfd->pdev = pdev; @@ -821,6 +816,23 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd - kfd->vm_info.first_vmid_kfd + 1; + /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. + * 32 and 64-bit requests are possible and must be + * supported. + */ + kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->kgd); + if (!kfd->pci_atomic_requested && + kfd->device_info->needs_pci_atomics && + (!kfd->device_info->no_atomic_fw_version || + kfd->mec_fw_version < kfd->device_info->no_atomic_fw_version)) { + dev_info(kfd_device, + "skipped device %x:%x, PCI rejects atomics %d<%d\n", + kfd->pdev->vendor, kfd->pdev->device, + kfd->mec_fw_version, + kfd->device_info->no_atomic_fw_version); + return false; + } + /* Verify module parameters regarding mapped process number*/ if ((hws_max_conc_proc < 0) || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) { @@ -1057,17 +1069,21 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) return ret; } -static int kfd_resume(struct kfd_dev *kfd) +int kgd2kfd_resume_iommu(struct kfd_dev *kfd) { int err = 0; err = kfd_iommu_resume(kfd); - if (err) { + if (err) dev_err(kfd_device, "Failed to resume IOMMU for device %x:%x\n", kfd->pdev->vendor, kfd->pdev->device); - return err; - } + return err; +} + +static int kfd_resume(struct kfd_dev *kfd) +{ + int err = 0; err = kfd->dqm->ops.start(kfd->dqm); if (err) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index ab83b0de6b22..6d8f9bb2d905 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -207,6 +207,7 @@ struct kfd_device_info { bool supports_cwsr; bool needs_iommu_device; bool needs_pci_atomics; + uint32_t no_atomic_fw_version; unsigned int num_sdma_engines; unsigned int num_xgmi_sdma_engines; unsigned int num_sdma_queues_per_engine; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 9b1fc54555ee..66c799f5c7cf 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -998,6 +998,8 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_ uint32_t agp_base, agp_bot, agp_top; PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; + memset(pa_config, 0, sizeof(*pa_config)); + logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); @@ -6024,21 +6026,23 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) return 0; #if defined(CONFIG_DRM_AMD_DC_DCN) - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) - return -ENOMEM; + if (dm->vblank_control_workqueue) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return -ENOMEM; - INIT_WORK(&work->work, vblank_control_worker); - work->dm = dm; - work->acrtc = acrtc; - work->enable = enable; + INIT_WORK(&work->work, vblank_control_worker); + work->dm = dm; + work->acrtc = acrtc; + work->enable = enable; - if (acrtc_state->stream) { - dc_stream_retain(acrtc_state->stream); - work->stream = acrtc_state->stream; - } + if (acrtc_state->stream) { + dc_stream_retain(acrtc_state->stream); + work->stream = acrtc_state->stream; + } - queue_work(dm->vblank_control_workqueue, &work->work); + queue_work(dm->vblank_control_workqueue, &work->work); + } #endif return 0; @@ -6792,14 +6796,15 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { #if defined(CONFIG_DRM_AMD_DC_DCN) static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, - struct dc_state *dc_state) + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars) { struct dc_stream_state *stream = NULL; struct drm_connector *connector; struct drm_connector_state *new_con_state; struct amdgpu_dm_connector *aconnector; struct dm_connector_state *dm_conn_state; - int i, j, clock, bpp; + int i, j, clock; int vcpi, pbn_div, pbn = 0; for_each_new_connector_in_state(state, connector, new_con_state, i) { @@ -6838,9 +6843,15 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, } pbn_div = dm_mst_get_pbn_divider(stream->link); - bpp = stream->timing.dsc_cfg.bits_per_pixel; clock = stream->timing.pix_clk_100hz / 10; - pbn = drm_dp_calc_pbn_mode(clock, bpp, true); + /* pbn is calculated by compute_mst_dsc_configs_for_state*/ + for (j = 0; j < dc_state->stream_count; j++) { + if (vars[j].aconnector == aconnector) { + pbn = vars[j].pbn; + break; + } + } + vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, pbn_div, @@ -7519,6 +7530,32 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, } } +static void amdgpu_set_panel_orientation(struct drm_connector *connector) +{ + struct drm_encoder *encoder; + struct amdgpu_encoder *amdgpu_encoder; + const struct drm_display_mode *native_mode; + + if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && + connector->connector_type != DRM_MODE_CONNECTOR_LVDS) + return; + + encoder = amdgpu_dm_connector_to_encoder(connector); + if (!encoder) + return; + + amdgpu_encoder = to_amdgpu_encoder(encoder); + + native_mode = &amdgpu_encoder->native_mode; + if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) + return; + + drm_connector_set_panel_orientation_with_quirk(connector, + DRM_MODE_PANEL_ORIENTATION_UNKNOWN, + native_mode->hdisplay, + native_mode->vdisplay); +} + static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, struct edid *edid) { @@ -7547,6 +7584,8 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, * restored here. */ amdgpu_dm_update_freesync_caps(connector, edid); + + amdgpu_set_panel_orientation(connector); } else { amdgpu_dm_connector->num_modes = 0; } @@ -8058,8 +8097,26 @@ static bool is_content_protection_different(struct drm_connector_state *state, state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; - /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled - * hot-plug, headless s3, dpms + /* Stream removed and re-enabled + * + * Can sometimes overlap with the HPD case, + * thus set update_hdcp to false to avoid + * setting HDCP multiple times. + * + * Handles: DESIRED -> DESIRED (Special case) + */ + if (!(old_state->crtc && old_state->crtc->enabled) && + state->crtc && state->crtc->enabled && + connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { + dm_con_state->update_hdcp = false; + return true; + } + + /* Hot-plug, headless s3, dpms + * + * Only start HDCP if the display is connected/enabled. + * update_hdcp flag will be set to false until the next + * HPD comes in. * * Handles: DESIRED -> DESIRED (Special case) */ @@ -8648,7 +8705,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * If PSR or idle optimizations are enabled then flush out * any pending work before hardware programming. */ - flush_workqueue(dm->vblank_control_workqueue); + if (dm->vblank_control_workqueue) + flush_workqueue(dm->vblank_control_workqueue); #endif bundle->stream_update.stream = acrtc_state->stream; @@ -8983,7 +9041,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) /* if there mode set or reset, disable eDP PSR */ if (mode_set_reset_required) { #if defined(CONFIG_DRM_AMD_DC_DCN) - flush_workqueue(dm->vblank_control_workqueue); + if (dm->vblank_control_workqueue) + flush_workqueue(dm->vblank_control_workqueue); #endif amdgpu_dm_psr_disable_all(dm); } @@ -10243,6 +10302,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, int ret, i; bool lock_and_validation_needed = false; struct dm_crtc_state *dm_old_crtc_state; +#if defined(CONFIG_DRM_AMD_DC_DCN) + struct dsc_mst_fairness_vars vars[MAX_PIPES]; +#endif trace_amdgpu_dm_atomic_check_begin(state); @@ -10473,10 +10535,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; #if defined(CONFIG_DRM_AMD_DC_DCN) - if (!compute_mst_dsc_configs_for_state(state, dm_state->context)) + if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) goto fail; - ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context); + ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); if (ret) goto fail; #endif @@ -10492,7 +10554,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; status = dc_validate_global_state(dc, dm_state->context, false); if (status != DC_OK) { - DC_LOG_WARNING("DC global validation failure: %s (%d)", + drm_dbg_atomic(dev, + "DC global validation failure: %s (%d)", dc_status_to_str(status), status); ret = -EINVAL; goto fail; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 1bcba6943fd7..7af0d58c231b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -518,12 +518,7 @@ struct dsc_mst_fairness_params { uint32_t num_slices_h; uint32_t num_slices_v; uint32_t bpp_overwrite; -}; - -struct dsc_mst_fairness_vars { - int pbn; - bool dsc_enabled; - int bpp_x16; + struct amdgpu_dm_connector *aconnector; }; static int kbps_to_peak_pbn(int kbps) @@ -750,12 +745,12 @@ static void try_disable_dsc(struct drm_atomic_state *state, static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, struct dc_state *dc_state, - struct dc_link *dc_link) + struct dc_link *dc_link, + struct dsc_mst_fairness_vars *vars) { int i; struct dc_stream_state *stream; struct dsc_mst_fairness_params params[MAX_PIPES]; - struct dsc_mst_fairness_vars vars[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; int count = 0; bool debugfs_overwrite = false; @@ -776,6 +771,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, params[count].timing = &stream->timing; params[count].sink = stream->sink; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + params[count].aconnector = aconnector; params[count].port = aconnector->port; params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE) @@ -798,6 +794,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, } /* Try no compression */ for (i = 0; i < count; i++) { + vars[i].aconnector = params[i].aconnector; vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i].dsc_enabled = false; vars[i].bpp_x16 = 0; @@ -851,7 +848,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, } bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, - struct dc_state *dc_state) + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars) { int i, j; struct dc_stream_state *stream; @@ -882,7 +880,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, return false; mutex_lock(&aconnector->mst_mgr.lock); - if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) { + if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars)) { mutex_unlock(&aconnector->mst_mgr.lock); return false; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index b38bd68121ce..900d3f7a8498 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -39,8 +39,17 @@ void dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); #if defined(CONFIG_DRM_AMD_DC_DCN) + +struct dsc_mst_fairness_vars { + int pbn; + bool dsc_enabled; + int bpp_x16; + struct amdgpu_dm_connector *aconnector; +}; + bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, - struct dc_state *dc_state); + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars); #endif #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c index c9f47d167472..b1bf80da3a55 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c @@ -62,7 +62,7 @@ inline void dc_assert_fp_enabled(void) depth = *pcpu; put_cpu_ptr(&fpu_recursion_depth); - ASSERT(depth > 1); + ASSERT(depth >= 1); } /** diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 8bd7f42a8053..1e44b13c1c7d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2586,13 +2586,21 @@ static struct abm *get_abm_from_stream_res(const struct dc_link *link) int dc_link_get_backlight_level(const struct dc_link *link) { - struct abm *abm = get_abm_from_stream_res(link); + struct panel_cntl *panel_cntl = link->panel_cntl; + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; + bool fw_set_brightness = true; - if (abm == NULL || abm->funcs->get_current_backlight == NULL) - return DC_ERROR_UNEXPECTED; + if (dmcu) + fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); - return (int) abm->funcs->get_current_backlight(abm); + if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight) + return panel_cntl->funcs->get_current_backlight(panel_cntl); + else if (abm != NULL && abm->funcs->get_current_backlight != NULL) + return (int) abm->funcs->get_current_backlight(abm); + else + return DC_ERROR_UNEXPECTED; } int dc_link_get_target_backlight_pwm(const struct dc_link *link) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 330edd666b7d..f6dbc5a74757 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1,4 +1,26 @@ -/* Copyright 2015 Advanced Micro Devices, Inc. */ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + */ #include "dm_services.h" #include "dc.h" #include "dc_link_dp.h" @@ -1840,9 +1862,13 @@ bool perform_link_training_with_retries( dp_disable_link_phy(link, signal); /* Abort link training if failure due to sink being unplugged. */ - if (status == LINK_TRAINING_ABORT) - break; - else if (do_fallback) { + if (status == LINK_TRAINING_ABORT) { + enum dc_connection_type type = dc_connection_none; + + dc_link_detect_sink(link, &type); + if (type == dc_connection_none) + break; + } else if (do_fallback) { decide_fallback_link_setting(*link_setting, ¤t_setting, status); /* Fail link training if reduced link bandwidth no longer meets * stream requirements. diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c index e92339235863..e8570060d007 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c @@ -49,7 +49,6 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl) { uint64_t current_backlight; - uint32_t round_result; uint32_t bl_period, bl_int_count; uint32_t bl_pwm, fractional_duty_cycle_en; uint32_t bl_period_mask, bl_pwm_mask; @@ -84,15 +83,6 @@ static unsigned int dce_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_c current_backlight = div_u64(current_backlight, bl_period); current_backlight = (current_backlight + 1) >> 1; - current_backlight = (uint64_t)(current_backlight) * bl_period; - - round_result = (uint32_t)(current_backlight & 0xFFFFFFFF); - - round_result = (round_result >> (bl_int_count-1)) & 1; - - current_backlight >>= bl_int_count; - current_backlight += round_result; - return (uint32_t)(current_backlight); } diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h index 8a08ecc34c69..4884a4e1f261 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h +++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h @@ -33,63 +33,47 @@ #define TABLE_PMSTATUSLOG 3 // Called by Tools for Agm logging #define TABLE_DPMCLOCKS 4 // Called by Driver; defined here, but not used, for backward compatible #define TABLE_MOMENTARY_PM 5 // Called by Tools; defined here, but not used, for backward compatible -#define TABLE_COUNT 6 +#define TABLE_SMU_METRICS 6 // Called by Driver +#define TABLE_COUNT 7 -#define NUM_DSPCLK_LEVELS 8 -#define NUM_SOCCLK_DPM_LEVELS 8 -#define NUM_DCEFCLK_DPM_LEVELS 4 -#define NUM_FCLK_DPM_LEVELS 4 -#define NUM_MEMCLK_DPM_LEVELS 4 +typedef struct SmuMetricsTable_t { + //CPU status + uint16_t CoreFrequency[6]; //[MHz] + uint32_t CorePower[6]; //[mW] + uint16_t CoreTemperature[6]; //[centi-Celsius] + uint16_t L3Frequency[2]; //[MHz] + uint16_t L3Temperature[2]; //[centi-Celsius] + uint16_t C0Residency[6]; //Percentage -#define NUMBER_OF_PSTATES 8 -#define NUMBER_OF_CORES 8 + // GFX status + uint16_t GfxclkFrequency; //[MHz] + uint16_t GfxTemperature; //[centi-Celsius] -typedef enum { - S3_TYPE_ENTRY, - S5_TYPE_ENTRY, -} Sleep_Type_e; + // SOC IP info + uint16_t SocclkFrequency; //[MHz] + uint16_t VclkFrequency; //[MHz] + uint16_t DclkFrequency; //[MHz] + uint16_t MemclkFrequency; //[MHz] -typedef enum { - GFX_OFF = 0, - GFX_ON = 1, -} GFX_Mode_e; + // power, VF info for CPU/GFX telemetry rails, and then socket power total + uint32_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_GFX + uint32_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_GFX + uint32_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_GFX + uint32_t CurrentSocketPower; //[mW] -typedef enum { - CPU_P0 = 0, - CPU_P1, - CPU_P2, - CPU_P3, - CPU_P4, - CPU_P5, - CPU_P6, - CPU_P7 -} CPU_PState_e; + uint16_t SocTemperature; //[centi-Celsius] + uint16_t EdgeTemperature; + uint16_t ThrottlerStatus; + uint16_t Spare; -typedef enum { - CPU_CORE0 = 0, - CPU_CORE1, - CPU_CORE2, - CPU_CORE3, - CPU_CORE4, - CPU_CORE5, - CPU_CORE6, - CPU_CORE7 -} CORE_ID_e; +} SmuMetricsTable_t; -typedef enum { - DF_DPM0 = 0, - DF_DPM1, - DF_DPM2, - DF_DPM3, - DF_PState_Count -} DF_PState_e; - -typedef enum { - GFX_DPM0 = 0, - GFX_DPM1, - GFX_DPM2, - GFX_DPM3, - GFX_PState_Count -} GFX_PState_e; +typedef struct SmuMetrics_t { + SmuMetricsTable_t Current; + SmuMetricsTable_t Average; + uint32_t SampleStartTime; + uint32_t SampleStopTime; + uint32_t Accnt; +} SmuMetrics_t; #endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h index 6f1b1b50d527..18b862a90fbe 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h @@ -226,7 +226,10 @@ __SMU_DUMMY_MAP(SetUclkDpmMode), \ __SMU_DUMMY_MAP(LightSBR), \ __SMU_DUMMY_MAP(GfxDriverResetRecovery), \ - __SMU_DUMMY_MAP(BoardPowerCalibration), + __SMU_DUMMY_MAP(BoardPowerCalibration), \ + __SMU_DUMMY_MAP(RequestGfxclk), \ + __SMU_DUMMY_MAP(ForceGfxVid), \ + __SMU_DUMMY_MAP(UnforceGfxVid), #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h index 6e6088760b18..909a86aa60f3 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h @@ -65,6 +65,13 @@ #define PPSMC_MSG_SetDriverTableVMID 0x34 #define PPSMC_MSG_SetSoftMinCclk 0x35 #define PPSMC_MSG_SetSoftMaxCclk 0x36 -#define PPSMC_Message_Count 0x37 +#define PPSMC_MSG_GetGfxFrequency 0x37 +#define PPSMC_MSG_GetGfxVid 0x38 +#define PPSMC_MSG_ForceGfxFreq 0x39 +#define PPSMC_MSG_UnForceGfxFreq 0x3A +#define PPSMC_MSG_ForceGfxVid 0x3B +#define PPSMC_MSG_UnforceGfxVid 0x3C +#define PPSMC_MSG_GetEnabledSmuFeatures 0x3D +#define PPSMC_Message_Count 0x3E #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 3ab1ce4d3419..04863a797115 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1404,7 +1404,7 @@ static int smu_disable_dpms(struct smu_context *smu) */ if (smu->uploading_custom_pp_table && (adev->asic_type >= CHIP_NAVI10) && - (adev->asic_type <= CHIP_DIMGREY_CAVEFISH)) + (adev->asic_type <= CHIP_BEIGE_GOBY)) return smu_disable_all_features_with_exception(smu, true, SMU_FEATURE_COUNT); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index e343cc218990..082f01893f3d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -771,8 +771,12 @@ static int arcturus_print_clk_levels(struct smu_context *smu, struct smu_11_0_dpm_context *dpm_context = NULL; uint32_t gen_speed, lane_width; - if (amdgpu_ras_intr_triggered()) - return sysfs_emit(buf, "unavailable\n"); + smu_cmn_get_sysfs_buf(&buf, &size); + + if (amdgpu_ras_intr_triggered()) { + size += sysfs_emit_at(buf, size, "unavailable\n"); + return size; + } dpm_context = smu_dpm->dpm_context; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index b05f9541accc..3d4c65bc29dc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -44,6 +44,27 @@ #undef pr_info #undef pr_debug +/* unit: MHz */ +#define CYAN_SKILLFISH_SCLK_MIN 1000 +#define CYAN_SKILLFISH_SCLK_MAX 2000 +#define CYAN_SKILLFISH_SCLK_DEFAULT 1800 + +/* unit: mV */ +#define CYAN_SKILLFISH_VDDC_MIN 700 +#define CYAN_SKILLFISH_VDDC_MAX 1129 +#define CYAN_SKILLFISH_VDDC_MAGIC 5118 // 0x13fe + +static struct gfx_user_settings { + uint32_t sclk; + uint32_t vddc; +} cyan_skillfish_user_settings; + +#define FEATURE_MASK(feature) (1ULL << feature) +#define SMC_DPM_FEATURE ( \ + FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ + FEATURE_MASK(FEATURE_SOC_DPM_BIT) | \ + FEATURE_MASK(FEATURE_GFX_DPM_BIT)) + static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0), @@ -52,14 +73,473 @@ static struct cmn2asic_msg_mapping cyan_skillfish_message_map[SMU_MSG_MAX_COUNT] MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverTableDramAddrLow, 0), MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), + MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0), + MSG_MAP(RequestGfxclk, PPSMC_MSG_RequestGfxclk, 0), + MSG_MAP(ForceGfxVid, PPSMC_MSG_ForceGfxVid, 0), + MSG_MAP(UnforceGfxVid, PPSMC_MSG_UnforceGfxVid, 0), +}; + +static struct cmn2asic_mapping cyan_skillfish_table_map[SMU_TABLE_COUNT] = { + TAB_MAP_VALID(SMU_METRICS), }; +static int cyan_skillfish_tables_init(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *tables = smu_table->tables; + + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, + sizeof(SmuMetrics_t), + PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM); + + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); + if (!smu_table->metrics_table) + goto err0_out; + + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); + smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); + if (!smu_table->gpu_metrics_table) + goto err1_out; + + smu_table->metrics_time = 0; + + return 0; + +err1_out: + smu_table->gpu_metrics_table_size = 0; + kfree(smu_table->metrics_table); +err0_out: + return -ENOMEM; +} + +static int cyan_skillfish_init_smc_tables(struct smu_context *smu) +{ + int ret = 0; + + ret = cyan_skillfish_tables_init(smu); + if (ret) + return ret; + + return smu_v11_0_init_smc_tables(smu); +} + +static int cyan_skillfish_finit_smc_tables(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + + kfree(smu_table->metrics_table); + smu_table->metrics_table = NULL; + + kfree(smu_table->gpu_metrics_table); + smu_table->gpu_metrics_table = NULL; + smu_table->gpu_metrics_table_size = 0; + + smu_table->metrics_time = 0; + + return 0; +} + +static int +cyan_skillfish_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct smu_table_context *smu_table = &smu->smu_table; + SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; + int ret = 0; + + mutex_lock(&smu->metrics_lock); + + ret = smu_cmn_get_metrics_table_locked(smu, NULL, false); + if (ret) { + mutex_unlock(&smu->metrics_lock); + return ret; + } + + switch (member) { + case METRICS_CURR_GFXCLK: + *value = metrics->Current.GfxclkFrequency; + break; + case METRICS_CURR_SOCCLK: + *value = metrics->Current.SocclkFrequency; + break; + case METRICS_CURR_VCLK: + *value = metrics->Current.VclkFrequency; + break; + case METRICS_CURR_DCLK: + *value = metrics->Current.DclkFrequency; + break; + case METRICS_CURR_UCLK: + *value = metrics->Current.MemclkFrequency; + break; + case METRICS_AVERAGE_SOCKETPOWER: + *value = (metrics->Current.CurrentSocketPower << 8) / + 1000; + break; + case METRICS_TEMPERATURE_EDGE: + *value = metrics->Current.GfxTemperature / 100 * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_HOTSPOT: + *value = metrics->Current.SocTemperature / 100 * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_VOLTAGE_VDDSOC: + *value = metrics->Current.Voltage[0]; + break; + case METRICS_VOLTAGE_VDDGFX: + *value = metrics->Current.Voltage[1]; + break; + case METRICS_THROTTLER_STATUS: + *value = metrics->Current.ThrottlerStatus; + break; + default: + *value = UINT_MAX; + break; + } + + mutex_unlock(&smu->metrics_lock); + + return ret; +} + +static int cyan_skillfish_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, + uint32_t *size) +{ + int ret = 0; + + if (!data || !size) + return -EINVAL; + + mutex_lock(&smu->sensor_lock); + + switch (sensor) { + case AMDGPU_PP_SENSOR_GFX_SCLK: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_CURR_GFXCLK, + (uint32_t *)data); + *(uint32_t *)data *= 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_GFX_MCLK: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_CURR_UCLK, + (uint32_t *)data); + *(uint32_t *)data *= 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_POWER: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_AVERAGE_SOCKETPOWER, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_HOTSPOT, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_EDGE_TEMP: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_EDGE, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_VDDNB: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_VOLTAGE_VDDSOC, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_VDDGFX: + ret = cyan_skillfish_get_smu_metrics_data(smu, + METRICS_VOLTAGE_VDDGFX, + (uint32_t *)data); + *size = 4; + break; + default: + ret = -EOPNOTSUPP; + break; + } + + mutex_unlock(&smu->sensor_lock); + + return ret; +} + +static int cyan_skillfish_get_current_clk_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value) +{ + MetricsMember_t member_type; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + member_type = METRICS_CURR_GFXCLK; + break; + case SMU_FCLK: + case SMU_MCLK: + member_type = METRICS_CURR_UCLK; + break; + case SMU_SOCCLK: + member_type = METRICS_CURR_SOCCLK; + break; + case SMU_VCLK: + member_type = METRICS_CURR_VCLK; + break; + case SMU_DCLK: + member_type = METRICS_CURR_DCLK; + break; + default: + return -EINVAL; + } + + return cyan_skillfish_get_smu_metrics_data(smu, member_type, value); +} + +static int cyan_skillfish_print_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + char *buf) +{ + int ret = 0, size = 0; + uint32_t cur_value = 0; + + smu_cmn_get_sysfs_buf(&buf, &size); + + switch (clk_type) { + case SMU_OD_SCLK: + ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK, &cur_value); + if (ret) + return ret; + size += sysfs_emit_at(buf, size,"%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value); + break; + case SMU_OD_VDDC_CURVE: + ret = cyan_skillfish_get_smu_metrics_data(smu, METRICS_VOLTAGE_VDDGFX, &cur_value); + if (ret) + return ret; + size += sysfs_emit_at(buf, size,"%s:\n", "OD_VDDC"); + size += sysfs_emit_at(buf, size, "0: %umV *\n", cur_value); + break; + case SMU_OD_RANGE: + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX); + size += sysfs_emit_at(buf, size, "VDDC: %7umV %10umV\n", + CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); + break; + case SMU_GFXCLK: + case SMU_SCLK: + case SMU_FCLK: + case SMU_MCLK: + case SMU_SOCCLK: + case SMU_VCLK: + case SMU_DCLK: + ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value); + if (ret) + return ret; + size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value); + break; + default: + dev_warn(smu->adev->dev, "Unsupported clock type\n"); + return ret; + } + + return size; +} + +static bool cyan_skillfish_is_dpm_running(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int ret = 0; + uint32_t feature_mask[2]; + uint64_t feature_enabled; + + /* we need to re-init after suspend so return false */ + if (adev->in_suspend) + return false; + + ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2); + + if (ret) + return false; + + feature_enabled = (uint64_t)feature_mask[0] | + ((uint64_t)feature_mask[1] << 32); + + return !!(feature_enabled & SMC_DPM_FEATURE); +} + +static ssize_t cyan_skillfish_get_gpu_metrics(struct smu_context *smu, + void **table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct gpu_metrics_v2_2 *gpu_metrics = + (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table; + SmuMetrics_t metrics; + int i, ret = 0; + + ret = smu_cmn_get_metrics_table(smu, &metrics, true); + if (ret) + return ret; + + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2); + + gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; + gpu_metrics->temperature_soc = metrics.Current.SocTemperature; + + gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; + gpu_metrics->average_soc_power = metrics.Current.Power[0]; + gpu_metrics->average_gfx_power = metrics.Current.Power[1]; + + gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; + gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; + gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; + gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; + gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; + gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; + + gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; + gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; + gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; + gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; + gpu_metrics->current_vclk = metrics.Current.VclkFrequency; + gpu_metrics->current_dclk = metrics.Current.DclkFrequency; + + for (i = 0; i < 6; i++) { + gpu_metrics->temperature_core[i] = metrics.Current.CoreTemperature[i]; + gpu_metrics->average_core_power[i] = metrics.Average.CorePower[i]; + gpu_metrics->current_coreclk[i] = metrics.Current.CoreFrequency[i]; + } + + for (i = 0; i < 2; i++) { + gpu_metrics->temperature_l3[i] = metrics.Current.L3Temperature[i]; + gpu_metrics->current_l3clk[i] = metrics.Current.L3Frequency[i]; + } + + gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); + + *table = (void *)gpu_metrics; + + return sizeof(struct gpu_metrics_v2_2); +} + +static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu, + enum PP_OD_DPM_TABLE_COMMAND type, + long input[], uint32_t size) +{ + int ret = 0; + uint32_t vid; + + switch (type) { + case PP_OD_EDIT_VDDC_CURVE: + if (size != 3 || input[0] != 0) { + dev_err(smu->adev->dev, "Invalid parameter!\n"); + return -EINVAL; + } + + if (input[1] <= CYAN_SKILLFISH_SCLK_MIN || + input[1] > CYAN_SKILLFISH_SCLK_MAX) { + dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n", + CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX); + return -EINVAL; + } + + if (input[2] <= CYAN_SKILLFISH_VDDC_MIN || + input[2] > CYAN_SKILLFISH_VDDC_MAX) { + dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n", + CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); + return -EINVAL; + } + + cyan_skillfish_user_settings.sclk = input[1]; + cyan_skillfish_user_settings.vddc = input[2]; + + break; + case PP_OD_RESTORE_DEFAULT_TABLE: + if (size != 0) { + dev_err(smu->adev->dev, "Invalid parameter!\n"); + return -EINVAL; + } + + cyan_skillfish_user_settings.sclk = CYAN_SKILLFISH_SCLK_DEFAULT; + cyan_skillfish_user_settings.vddc = CYAN_SKILLFISH_VDDC_MAGIC; + + break; + case PP_OD_COMMIT_DPM_TABLE: + if (size != 0) { + dev_err(smu->adev->dev, "Invalid parameter!\n"); + return -EINVAL; + } + + if (cyan_skillfish_user_settings.sclk < CYAN_SKILLFISH_SCLK_MIN || + cyan_skillfish_user_settings.sclk > CYAN_SKILLFISH_SCLK_MAX) { + dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n", + CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX); + return -EINVAL; + } + + if ((cyan_skillfish_user_settings.vddc != CYAN_SKILLFISH_VDDC_MAGIC) && + (cyan_skillfish_user_settings.vddc < CYAN_SKILLFISH_VDDC_MIN || + cyan_skillfish_user_settings.vddc > CYAN_SKILLFISH_VDDC_MAX)) { + dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n", + CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); + return -EINVAL; + } + + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestGfxclk, + cyan_skillfish_user_settings.sclk, NULL); + if (ret) { + dev_err(smu->adev->dev, "Set sclk failed!\n"); + return ret; + } + + if (cyan_skillfish_user_settings.vddc == CYAN_SKILLFISH_VDDC_MAGIC) { + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_UnforceGfxVid, NULL); + if (ret) { + dev_err(smu->adev->dev, "Unforce vddc failed!\n"); + return ret; + } + } else { + /* + * PMFW accepts SVI2 VID code, convert voltage to VID: + * vid = (uint32_t)((1.55 - voltage) * 160.0 + 0.00001) + */ + vid = (1550 - cyan_skillfish_user_settings.vddc) * 160 / 1000; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ForceGfxVid, vid, NULL); + if (ret) { + dev_err(smu->adev->dev, "Force vddc failed!\n"); + return ret; + } + } + + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + static const struct pptable_funcs cyan_skillfish_ppt_funcs = { .check_fw_status = smu_v11_0_check_fw_status, .check_fw_version = smu_v11_0_check_fw_version, .init_power = smu_v11_0_init_power, .fini_power = smu_v11_0_fini_power, + .init_smc_tables = cyan_skillfish_init_smc_tables, + .fini_smc_tables = cyan_skillfish_finit_smc_tables, + .read_sensor = cyan_skillfish_read_sensor, + .print_clk_levels = cyan_skillfish_print_clk_levels, + .is_dpm_running = cyan_skillfish_is_dpm_running, + .get_gpu_metrics = cyan_skillfish_get_gpu_metrics, + .od_edit_dpm_table = cyan_skillfish_od_edit_dpm_table, .register_irq_handler = smu_v11_0_register_irq_handler, .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, @@ -72,5 +552,6 @@ void cyan_skillfish_set_ppt_funcs(struct smu_context *smu) { smu->ppt_funcs = &cyan_skillfish_ppt_funcs; smu->message_map = cyan_skillfish_message_map; + smu->table_map = cyan_skillfish_table_map; smu->is_apu = true; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index a5fc5d7cb6c7..b1ad451af06b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1279,6 +1279,8 @@ static int navi10_print_clk_levels(struct smu_context *smu, struct smu_11_0_overdrive_table *od_settings = smu->od_settings; uint32_t min_value, max_value; + smu_cmn_get_sysfs_buf(&buf, &size); + switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: @@ -1392,7 +1394,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, case SMU_OD_RANGE: if (!smu->od_enabled || !od_table || !od_settings) break; - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) { navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN, @@ -2272,7 +2274,27 @@ static int navi10_baco_enter(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - if (adev->in_runpm) + /* + * This aims the case below: + * amdgpu driver loaded -> runpm suspend kicked -> sound driver loaded + * + * For NAVI10 and later ASICs, we rely on PMFW to handle the runpm. To + * make that possible, PMFW needs to acknowledge the dstate transition + * process for both gfx(function 0) and audio(function 1) function of + * the ASIC. + * + * The PCI device's initial runpm status is RUNPM_SUSPENDED. So as the + * device representing the audio function of the ASIC. And that means + * even if the sound driver(snd_hda_intel) was not loaded yet, it's still + * possible runpm suspend kicked on the ASIC. However without the dstate + * transition notification from audio function, pmfw cannot handle the + * BACO in/exit correctly. And that will cause driver hang on runpm + * resuming. + * + * To address this, we revert to legacy message way(driver masters the + * timing for BACO in/exit) on sound driver missing. + */ + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); else return smu_v11_0_baco_enter(smu); @@ -2282,7 +2304,7 @@ static int navi10_baco_exit(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - if (adev->in_runpm) { + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { /* Wait for PMFW handling for the Dstate change */ msleep(10); return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 5e292c3f5050..ca57221e3962 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1058,6 +1058,8 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, uint32_t min_value, max_value; uint32_t smu_version; + smu_cmn_get_sysfs_buf(&buf, &size); + switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: @@ -1180,7 +1182,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, if (!smu->od_enabled || !od_table || !od_settings) break; - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); if (sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_GFXCLK_LIMITS)) { sienna_cichlid_get_od_setting_range(od_settings, SMU_11_0_7_ODSETTING_GFXCLKFMIN, @@ -2187,7 +2189,7 @@ static int sienna_cichlid_baco_enter(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - if (adev->in_runpm) + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO); else return smu_v11_0_baco_enter(smu); @@ -2197,7 +2199,7 @@ static int sienna_cichlid_baco_exit(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - if (adev->in_runpm) { + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { /* Wait for PMFW handling for the Dstate change */ msleep(10); return smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 3a3421452e57..f6ef0ce6e9e2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -589,10 +589,12 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, if (ret) return ret; + smu_cmn_get_sysfs_buf(&buf, &size); + switch (clk_type) { case SMU_OD_SCLK: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@ -601,7 +603,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, break; case SMU_OD_CCLK: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); + size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@ -610,7 +612,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, break; case SMU_OD_RANGE: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", @@ -688,10 +690,12 @@ static int vangogh_print_clk_levels(struct smu_context *smu, if (ret) return ret; + smu_cmn_get_sysfs_buf(&buf, &size); + switch (clk_type) { case SMU_OD_SCLK: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@ -700,7 +704,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, break; case SMU_OD_CCLK: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size = sysfs_emit(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); + size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", @@ -709,7 +713,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, break; case SMU_OD_RANGE: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 5aa175e12a78..145f13b8c977 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -497,6 +497,8 @@ static int renoir_print_clk_levels(struct smu_context *smu, if (ret) return ret; + smu_cmn_get_sysfs_buf(&buf, &size); + switch (clk_type) { case SMU_OD_RANGE: if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index ab652028e003..5019903db492 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -733,15 +733,19 @@ static int aldebaran_print_clk_levels(struct smu_context *smu, uint32_t freq_values[3] = {0}; uint32_t min_clk, max_clk; - if (amdgpu_ras_intr_triggered()) - return sysfs_emit(buf, "unavailable\n"); + smu_cmn_get_sysfs_buf(&buf, &size); + + if (amdgpu_ras_intr_triggered()) { + size += sysfs_emit_at(buf, size, "unavailable\n"); + return size; + } dpm_context = smu_dpm->dpm_context; switch (type) { case SMU_OD_SCLK: - size = sysfs_emit(buf, "%s:\n", "GFXCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK"); fallthrough; case SMU_SCLK: ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now); @@ -795,7 +799,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu, break; case SMU_OD_MCLK: - size = sysfs_emit(buf, "%s:\n", "MCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "MCLK"); fallthrough; case SMU_MCLK: ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &now); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 627ba2eec7fd..a403657151ba 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -1052,16 +1052,18 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; + smu_cmn_get_sysfs_buf(&buf, &size); + switch (clk_type) { case SMU_OD_SCLK: - size = sysfs_emit(buf, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); size += sysfs_emit_at(buf, size, "0: %10uMhz\n", (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); size += sysfs_emit_at(buf, size, "1: %10uMhz\n", (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); break; case SMU_OD_RANGE: - size = sysfs_emit(buf, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); break; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 66711ab24c15..843d2cbfc71d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -1053,3 +1053,24 @@ int smu_cmn_set_mp1_state(struct smu_context *smu, return ret; } + +bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) +{ + struct pci_dev *p = NULL; + bool snd_driver_loaded; + + /* + * If the ASIC comes with no audio function, we always assume + * it is "enabled". + */ + p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), + adev->pdev->bus->number, 1); + if (!p) + return true; + + snd_driver_loaded = pci_is_enabled(p) ? true : false; + + pci_dev_put(p); + + return snd_driver_loaded; +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 16993daa2ae0..beea03810bca 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -110,5 +110,20 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev); int smu_cmn_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state); +/* + * Helper function to make sysfs_emit_at() happy. Align buf to + * the current page boundary and record the offset. + */ +static inline void smu_cmn_get_sysfs_buf(char **buf, int *offset) +{ + if (!*buf || !offset) + return; + + *offset = offset_in_page(*buf); + *buf -= *offset; +} + +bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev); + #endif #endif diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index 76d38561c910..cf741c5c82d2 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -397,8 +397,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, if (switch_mmu_context) { struct etnaviv_iommu_context *old_context = gpu->mmu_context; - etnaviv_iommu_context_get(mmu_context); - gpu->mmu_context = mmu_context; + gpu->mmu_context = etnaviv_iommu_context_get(mmu_context); etnaviv_iommu_context_put(old_context); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 8f1b5af47dd6..f0b2540e60e4 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -294,8 +294,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( list_del(&mapping->obj_node); } - etnaviv_iommu_context_get(mmu_context); - mapping->context = mmu_context; + mapping->context = etnaviv_iommu_context_get(mmu_context); mapping->use = 1; ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 4dd7d9d541c0..486259e154af 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -532,8 +532,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, goto err_submit_objects; submit->ctx = file->driver_priv; - etnaviv_iommu_context_get(submit->ctx->mmu); - submit->mmu_context = submit->ctx->mmu; + submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu); submit->exec_state = args->exec_state; submit->flags = args->flags; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index c297fffe06eb..cc5b07f86346 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -569,6 +569,12 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) /* We rely on the GPU running, so program the clock */ etnaviv_gpu_update_clock(gpu); + gpu->fe_running = false; + gpu->exec_state = -1; + if (gpu->mmu_context) + etnaviv_iommu_context_put(gpu->mmu_context); + gpu->mmu_context = NULL; + return 0; } @@ -637,19 +643,23 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch) VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE | VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch)); } + + gpu->fe_running = true; } -static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu) +static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu, + struct etnaviv_iommu_context *context) { - u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer, - &gpu->mmu_context->cmdbuf_mapping); u16 prefetch; + u32 address; /* setup the MMU */ - etnaviv_iommu_restore(gpu, gpu->mmu_context); + etnaviv_iommu_restore(gpu, context); /* Start command processor */ prefetch = etnaviv_buffer_init(gpu); + address = etnaviv_cmdbuf_get_va(&gpu->buffer, + &gpu->mmu_context->cmdbuf_mapping); etnaviv_gpu_start_fe(gpu, address, prefetch); } @@ -832,7 +842,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) /* Now program the hardware */ mutex_lock(&gpu->lock); etnaviv_gpu_hw_init(gpu); - gpu->exec_state = -1; mutex_unlock(&gpu->lock); pm_runtime_mark_last_busy(gpu->dev); @@ -1057,8 +1066,6 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) spin_unlock(&gpu->event_spinlock); etnaviv_gpu_hw_init(gpu); - gpu->exec_state = -1; - gpu->mmu_context = NULL; mutex_unlock(&gpu->lock); pm_runtime_mark_last_busy(gpu->dev); @@ -1370,14 +1377,12 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) goto out_unlock; } - if (!gpu->mmu_context) { - etnaviv_iommu_context_get(submit->mmu_context); - gpu->mmu_context = submit->mmu_context; - etnaviv_gpu_start_fe_idleloop(gpu); - } else { - etnaviv_iommu_context_get(gpu->mmu_context); - submit->prev_mmu_context = gpu->mmu_context; - } + if (!gpu->fe_running) + etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context); + + if (submit->prev_mmu_context) + etnaviv_iommu_context_put(submit->prev_mmu_context); + submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context); if (submit->nr_pmrs) { gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre; @@ -1579,7 +1584,7 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms) static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) { - if (gpu->initialized && gpu->mmu_context) { + if (gpu->initialized && gpu->fe_running) { /* Replace the last WAIT with END */ mutex_lock(&gpu->lock); etnaviv_buffer_end(gpu); @@ -1592,8 +1597,7 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) */ etnaviv_gpu_wait_idle(gpu, 100); - etnaviv_iommu_context_put(gpu->mmu_context); - gpu->mmu_context = NULL; + gpu->fe_running = false; } gpu->exec_state = -1; @@ -1741,6 +1745,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master, etnaviv_gpu_hw_suspend(gpu); #endif + if (gpu->mmu_context) + etnaviv_iommu_context_put(gpu->mmu_context); + if (gpu->initialized) { etnaviv_cmdbuf_free(&gpu->buffer); etnaviv_iommu_global_fini(gpu); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 8ea48697d132..1c75c8ed5bce 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -101,6 +101,7 @@ struct etnaviv_gpu { struct workqueue_struct *wq; struct drm_gpu_scheduler sched; bool initialized; + bool fe_running; /* 'ring'-buffer: */ struct etnaviv_cmdbuf buffer; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c index 1a7c89a67bea..afe5dd6a9925 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c @@ -92,6 +92,10 @@ static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu, struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); u32 pgtable; + if (gpu->mmu_context) + etnaviv_iommu_context_put(gpu->mmu_context); + gpu->mmu_context = etnaviv_iommu_context_get(context); + /* set base addresses */ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base); gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c index f8bf488e9d71..d664ae29ae20 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c @@ -172,6 +172,10 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu, if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE) return; + if (gpu->mmu_context) + etnaviv_iommu_context_put(gpu->mmu_context); + gpu->mmu_context = etnaviv_iommu_context_get(context); + prefetch = etnaviv_buffer_config_mmuv2(gpu, (u32)v2_context->mtlb_dma, (u32)context->global->bad_page_dma); @@ -192,6 +196,10 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu, if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE) return; + if (gpu->mmu_context) + etnaviv_iommu_context_put(gpu->mmu_context); + gpu->mmu_context = etnaviv_iommu_context_get(context); + gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW, lower_32_bits(context->global->v2.pta_dma)); gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index dab1b58006d8..9fb1a2aadbcb 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -199,6 +199,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context, */ list_for_each_entry_safe(m, n, &list, scan_node) { etnaviv_iommu_remove_mapping(context, m); + etnaviv_iommu_context_put(m->context); m->context = NULL; list_del_init(&m->mmu_node); list_del_init(&m->scan_node); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h index d1d6902fd13b..e4a0b7d09c2e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h @@ -105,9 +105,11 @@ void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf); struct etnaviv_iommu_context * etnaviv_iommu_context_init(struct etnaviv_iommu_global *global, struct etnaviv_cmdbuf_suballoc *suballoc); -static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx) +static inline struct etnaviv_iommu_context * +etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx) { kref_get(&ctx->refcount); + return ctx; } void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx); void etnaviv_iommu_restore(struct etnaviv_gpu *gpu, diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 642a5b5a1b81..335ba9f43d8f 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -19,7 +19,6 @@ subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers) subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) # clang warnings subdir-ccflags-y += $(call cc-disable-warning, sign-compare) -subdir-ccflags-y += $(call cc-disable-warning, sometimes-uninitialized) subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides) subdir-ccflags-y += $(call cc-disable-warning, frame-address) subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 04175f359fd6..abe3d61b6243 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2445,11 +2445,14 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) */ if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == - sizeof(intel_dp->edp_dpcd)) + sizeof(intel_dp->edp_dpcd)) { drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", (int)sizeof(intel_dp->edp_dpcd), intel_dp->edp_dpcd); + intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14; + } + /* * This has to be called after intel_dp->edp_dpcd is filled, PSR checks * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 053a3c2f7267..508a514c5e37 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -848,7 +848,7 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp, } if (ret) - intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX); + ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX); if (intel_dp->set_idle_link_train) intel_dp->set_idle_link_train(intel_dp, crtc_state); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index cff72679ad7c..9ccf4b29b82e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -986,6 +986,9 @@ void i915_gem_context_release(struct kref *ref) trace_i915_context_free(ctx); GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); + if (ctx->syncobj) + drm_syncobj_put(ctx->syncobj); + mutex_destroy(&ctx->engines_mutex); mutex_destroy(&ctx->lut_mutex); @@ -1205,9 +1208,6 @@ static void context_close(struct i915_gem_context *ctx) if (vm) i915_vm_close(vm); - if (ctx->syncobj) - drm_syncobj_put(ctx->syncobj); - ctx->file_priv = ERR_PTR(-EBADF); /* diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c index ffae7df5e4d7..4a6bb64c3a35 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c @@ -59,13 +59,13 @@ static int igt_dmabuf_import_self(void *arg) err = PTR_ERR(import); goto out_dmabuf; } + import_obj = to_intel_bo(import); if (import != &obj->base) { pr_err("i915_gem_prime_import created a new object!\n"); err = -EINVAL; goto out_import; } - import_obj = to_intel_bo(import); i915_gem_object_lock(import_obj, NULL); err = __i915_gem_object_get_pages(import_obj); @@ -128,6 +128,8 @@ static int igt_dmabuf_import_same_driver_lmem(void *arg) pr_err("i915_gem_prime_import failed with the wrong err=%ld\n", PTR_ERR(import)); err = PTR_ERR(import); + } else { + err = 0; } dma_buf_put(dmabuf); @@ -176,6 +178,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915, err = PTR_ERR(import); goto out_dmabuf; } + import_obj = to_intel_bo(import); if (import == &obj->base) { pr_err("i915_gem_prime_import reused gem object!\n"); @@ -183,8 +186,6 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915, goto out_import; } - import_obj = to_intel_bo(import); - i915_gem_object_lock(import_obj, NULL); err = __i915_gem_object_get_pages(import_obj); if (err) { diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index b20f5621f62b..a2c34e5a1c54 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -581,6 +581,20 @@ static enum i915_mmap_type default_mapping(struct drm_i915_private *i915) return I915_MMAP_TYPE_GTT; } +static struct drm_i915_gem_object * +create_sys_or_internal(struct drm_i915_private *i915, + unsigned long size) +{ + if (HAS_LMEM(i915)) { + struct intel_memory_region *sys_region = + i915->mm.regions[INTEL_REGION_SMEM]; + + return __i915_gem_object_create_user(i915, size, &sys_region, 1); + } + + return i915_gem_object_create_internal(i915, size); +} + static bool assert_mmap_offset(struct drm_i915_private *i915, unsigned long size, int expected) @@ -589,7 +603,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915, u64 offset; int ret; - obj = i915_gem_object_create_internal(i915, size); + obj = create_sys_or_internal(i915, size); if (IS_ERR(obj)) return expected && expected == PTR_ERR(obj); @@ -633,6 +647,7 @@ static int igt_mmap_offset_exhaustion(void *arg) struct drm_mm_node *hole, *next; int loop, err = 0; u64 offset; + int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC; /* Disable background reaper */ disable_retire_worker(i915); @@ -683,14 +698,14 @@ static int igt_mmap_offset_exhaustion(void *arg) } /* Too large */ - if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) { + if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) { pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n"); err = -EINVAL; goto out; } /* Fill the hole, further allocation attempts should then fail */ - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + obj = create_sys_or_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); pr_err("Unable to create object for reclaimed hole\n"); @@ -703,7 +718,7 @@ static int igt_mmap_offset_exhaustion(void *arg) goto err_obj; } - if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) { + if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) { pr_err("Unexpectedly succeeded in inserting object into no holes!\n"); err = -EINVAL; goto err_obj; @@ -839,10 +854,9 @@ static int wc_check(struct drm_i915_gem_object *obj) static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); bool no_map; - if (HAS_LMEM(i915)) + if (obj->ops->mmap_offset) return type == I915_MMAP_TYPE_FIXED; else if (type == I915_MMAP_TYPE_FIXED) return false; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index d812b27835f8..591a5224287e 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -1973,8 +1973,14 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps) u32 intel_rps_read_punit_req(struct intel_rps *rps) { struct intel_uncore *uncore = rps_to_uncore(rps); + struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; + intel_wakeref_t wakeref; + u32 freq = 0; - return intel_uncore_read(uncore, GEN6_RPNSWREQ); + with_intel_runtime_pm_if_in_use(rpm, wakeref) + freq = intel_uncore_read(uncore, GEN6_RPNSWREQ); + + return freq; } static u32 intel_rps_get_req(u32 pureq) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index b104fb7607eb..86c318516e14 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -172,11 +172,6 @@ void intel_uc_driver_remove(struct intel_uc *uc) __uc_free_load_err_log(uc); } -static inline bool guc_communication_enabled(struct intel_guc *guc) -{ - return intel_guc_ct_enabled(&guc->ct); -} - /* * Events triggered while CT buffers are disabled are logged in the SCRATCH_15 * register using the same bits used in the CT message payload. Since our @@ -210,7 +205,7 @@ static void guc_get_mmio_msg(struct intel_guc *guc) static void guc_handle_mmio_msg(struct intel_guc *guc) { /* we need communication to be enabled to reply to GuC */ - GEM_BUG_ON(!guc_communication_enabled(guc)); + GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct)); spin_lock_irq(&guc->irq_lock); if (guc->mmio_msg) { @@ -226,7 +221,7 @@ static int guc_enable_communication(struct intel_guc *guc) struct drm_i915_private *i915 = gt->i915; int ret; - GEM_BUG_ON(guc_communication_enabled(guc)); + GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct)); ret = i915_inject_probe_error(i915, -ENXIO); if (ret) @@ -662,7 +657,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication) return 0; /* Make sure we enable communication if and only if it's disabled */ - GEM_BUG_ON(enable_communication == guc_communication_enabled(guc)); + GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct)); if (enable_communication) guc_enable_communication(guc); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 0473583dcdac..482fb0ae6cb5 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -119,7 +119,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) #endif if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) - rdev->agp = radeon_agp_head_init(rdev->ddev); + rdev->agp = radeon_agp_head_init(dev); if (rdev->agp) { rdev->agp->agp_mtrr = arch_phys_wc_add( rdev->agp->agp_info.aper_base, diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index 8ab3247dbc4a..13c6b857158f 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -1123,7 +1123,7 @@ static int cdn_dp_suspend(struct device *dev) return ret; } -static int cdn_dp_resume(struct device *dev) +static __maybe_unused int cdn_dp_resume(struct device *dev) { struct cdn_dp_device *dp = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index cb38b1a17b09..82cbb29a05aa 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -383,7 +383,8 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, else gfp_flags |= GFP_HIGHUSER; - for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages; + for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages)); + num_pages; order = min_t(unsigned int, order, __fls(num_pages))) { bool apply_caching = false; struct ttm_pool_type *pt; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 4a1115043114..b4b4653fe301 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -167,8 +167,6 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force) struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); bool connected = false; - WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev)); - if (vc4_hdmi->hpd_gpio && gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) { connected = true; @@ -189,12 +187,10 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force) } } - pm_runtime_put(&vc4_hdmi->pdev->dev); return connector_status_connected; } cec_phys_addr_invalidate(vc4_hdmi->cec_adap); - pm_runtime_put(&vc4_hdmi->pdev->dev); return connector_status_disconnected; } @@ -436,7 +432,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder) struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); struct drm_connector *connector = &vc4_hdmi->connector; struct drm_connector_state *cstate = connector->state; - struct drm_crtc *crtc = cstate->crtc; + struct drm_crtc *crtc = encoder->crtc; const struct drm_display_mode *mode = &crtc->state->adjusted_mode; union hdmi_infoframe frame; int ret; @@ -541,11 +537,8 @@ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder, static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder) { + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); - struct drm_connector *connector = &vc4_hdmi->connector; - struct drm_connector_state *cstate = connector->state; - struct drm_crtc *crtc = cstate->crtc; - struct drm_display_mode *mode = &crtc->state->adjusted_mode; if (!vc4_hdmi_supports_scrambling(encoder, mode)) return; @@ -566,18 +559,17 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder) static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); - struct drm_connector *connector = &vc4_hdmi->connector; - struct drm_connector_state *cstate = connector->state; + struct drm_crtc *crtc = encoder->crtc; /* - * At boot, connector->state will be NULL. Since we don't know the + * At boot, encoder->crtc will be NULL. Since we don't know the * state of the scrambler and in order to avoid any * inconsistency, let's disable it all the time. */ - if (cstate && !vc4_hdmi_supports_scrambling(encoder, &cstate->crtc->mode)) + if (crtc && !vc4_hdmi_supports_scrambling(encoder, &crtc->mode)) return; - if (cstate && !vc4_hdmi_mode_needs_scrambling(&cstate->crtc->mode)) + if (crtc && !vc4_hdmi_mode_needs_scrambling(&crtc->mode)) return; if (delayed_work_pending(&vc4_hdmi->scrambling_work)) @@ -635,6 +627,7 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder, vc4_hdmi->variant->phy_disable(vc4_hdmi); clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock); + clk_disable_unprepare(vc4_hdmi->hsm_clock); clk_disable_unprepare(vc4_hdmi->pixel_clock); ret = pm_runtime_put(&vc4_hdmi->pdev->dev); @@ -898,9 +891,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, vc4_hdmi_encoder_get_connector_state(encoder, state); struct vc4_hdmi_connector_state *vc4_conn_state = conn_state_to_vc4_hdmi_conn_state(conn_state); - struct drm_crtc_state *crtc_state = - drm_atomic_get_new_crtc_state(state, conn_state->crtc); - struct drm_display_mode *mode = &crtc_state->adjusted_mode; + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); unsigned long bvb_rate, pixel_rate, hsm_rate; int ret; @@ -947,6 +938,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, return; } + ret = clk_prepare_enable(vc4_hdmi->hsm_clock); + if (ret) { + DRM_ERROR("Failed to turn on HSM clock: %d\n", ret); + clk_disable_unprepare(vc4_hdmi->pixel_clock); + return; + } + vc4_hdmi_cec_update_clk_div(vc4_hdmi); if (pixel_rate > 297000000) @@ -959,6 +957,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate); if (ret) { DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret); + clk_disable_unprepare(vc4_hdmi->hsm_clock); clk_disable_unprepare(vc4_hdmi->pixel_clock); return; } @@ -966,6 +965,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock); if (ret) { DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret); + clk_disable_unprepare(vc4_hdmi->hsm_clock); clk_disable_unprepare(vc4_hdmi->pixel_clock); return; } @@ -985,11 +985,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { - struct drm_connector_state *conn_state = - vc4_hdmi_encoder_get_connector_state(encoder, state); - struct drm_crtc_state *crtc_state = - drm_atomic_get_new_crtc_state(state, conn_state->crtc); - struct drm_display_mode *mode = &crtc_state->adjusted_mode; + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); @@ -1012,11 +1008,7 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder, static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { - struct drm_connector_state *conn_state = - vc4_hdmi_encoder_get_connector_state(encoder, state); - struct drm_crtc_state *crtc_state = - drm_atomic_get_new_crtc_state(state, conn_state->crtc); - struct drm_display_mode *mode = &crtc_state->adjusted_mode; + struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC; @@ -1204,8 +1196,8 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi, static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate) { - struct drm_connector *connector = &vc4_hdmi->connector; - struct drm_crtc *crtc = connector->state->crtc; + struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; + struct drm_crtc *crtc = encoder->crtc; const struct drm_display_mode *mode = &crtc->state->adjusted_mode; u32 n, cts; u64 tmp; @@ -1238,13 +1230,13 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai) static int vc4_hdmi_audio_startup(struct device *dev, void *data) { struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); - struct drm_connector *connector = &vc4_hdmi->connector; + struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base; /* * If the HDMI encoder hasn't probed, or the encoder is * currently in DVI mode, treat the codec dai as missing. */ - if (!connector->state || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & + if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) & VC4_HDMI_RAM_PACKET_ENABLE)) return -ENODEV; @@ -2114,29 +2106,6 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi) return 0; } -#ifdef CONFIG_PM -static int vc4_hdmi_runtime_suspend(struct device *dev) -{ - struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); - - clk_disable_unprepare(vc4_hdmi->hsm_clock); - - return 0; -} - -static int vc4_hdmi_runtime_resume(struct device *dev) -{ - struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); - int ret; - - ret = clk_prepare_enable(vc4_hdmi->hsm_clock); - if (ret) - return ret; - - return 0; -} -#endif - static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) { const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev); @@ -2391,18 +2360,11 @@ static const struct of_device_id vc4_hdmi_dt_match[] = { {} }; -static const struct dev_pm_ops vc4_hdmi_pm_ops = { - SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend, - vc4_hdmi_runtime_resume, - NULL) -}; - struct platform_driver vc4_hdmi_driver = { .probe = vc4_hdmi_dev_probe, .remove = vc4_hdmi_dev_remove, .driver = { .name = "vc4_hdmi", .of_match_table = vc4_hdmi_dt_match, - .pm = &vc4_hdmi_pm_ops, }, }; diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 2aee356840a2..314015d9e912 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -245,6 +245,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) mutex_unlock(&ring_info->ring_buffer_mutex); kfree(ring_info->pkt_buffer); + ring_info->pkt_buffer = NULL; ring_info->pkt_buffer_size = 0; } diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 94fb63a7b357..fe63d5ee201b 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -570,7 +570,7 @@ fail_msg_node: fail_db_node: of_node_put(smu->db_node); fail_bootmem: - memblock_free(__pa(smu), sizeof(struct smu_device)); + memblock_free_ptr(smu, sizeof(struct smu_device)); smu = NULL; fail_np: of_node_put(np); diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 64d6dfa83122..267324889dd6 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -1885,6 +1885,12 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph reset_control_assert(gphy_fw->reset); + /* The vendor BSP uses a 200ms delay after asserting the reset line. + * Without this some users are observing that the PHY is not coming up + * on the MDIO bus. + */ + msleep(200); + ret = request_firmware(&fw, gphy_fw->fw_name, dev); if (ret) { dev_err(dev, "failed to load firmware: %s, error: %i\n", diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 1f63f50f73f1..bda5a9bf4f52 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -643,10 +643,8 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask) } static int -qca8k_mdio_write(struct mii_bus *salve_bus, int phy, int regnum, u16 data) +qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data) { - struct qca8k_priv *priv = salve_bus->priv; - struct mii_bus *bus = priv->bus; u16 r1, r2, page; u32 val; int ret; @@ -682,10 +680,8 @@ exit: } static int -qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum) +qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum) { - struct qca8k_priv *priv = salve_bus->priv; - struct mii_bus *bus = priv->bus; u16 r1, r2, page; u32 val; int ret; @@ -727,6 +723,24 @@ exit: } static int +qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data) +{ + struct qca8k_priv *priv = slave_bus->priv; + struct mii_bus *bus = priv->bus; + + return qca8k_mdio_write(bus, phy, regnum, data); +} + +static int +qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum) +{ + struct qca8k_priv *priv = slave_bus->priv; + struct mii_bus *bus = priv->bus; + + return qca8k_mdio_read(bus, phy, regnum); +} + +static int qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data) { struct qca8k_priv *priv = ds->priv; @@ -775,8 +789,8 @@ qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio) bus->priv = (void *)priv; bus->name = "qca8k slave mii"; - bus->read = qca8k_mdio_read; - bus->write = qca8k_mdio_write; + bus->read = qca8k_internal_mdio_read; + bus->write = qca8k_internal_mdio_write; snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d", ds->index); diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index 8d90fed5d33e..6f0ea2facea9 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -1050,7 +1050,7 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb, #ifdef VORTEX_BUS_MASTER if (vp->bus_master) { /* Set the bus-master controller to transfer the packet. */ - outl((int) (skb->data), ioaddr + Wn7_MasterAddr); + outl(isa_virt_to_bus(skb->data), ioaddr + Wn7_MasterAddr); outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); vp->tx_skb = skb; outw(StartDMADown, ioaddr + EL3_CMD); diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index 53660bc8d6ff..9afc712f5948 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -922,13 +922,16 @@ static void __init ne_add_devices(void) } } -#ifdef MODULE static int __init ne_init(void) { int retval; - ne_add_devices(); + + if (IS_MODULE(CONFIG_NE2000)) + ne_add_devices(); + retval = platform_driver_probe(&ne_driver, ne_drv_probe); - if (retval) { + + if (IS_MODULE(CONFIG_NE2000) && retval) { if (io[0] == 0) pr_notice("ne.c: You must supply \"io=0xNNN\"" " value(s) for ISA cards.\n"); @@ -941,18 +944,8 @@ static int __init ne_init(void) return retval; } module_init(ne_init); -#else /* MODULE */ -static int __init ne_init(void) -{ - int retval = platform_driver_probe(&ne_driver, ne_drv_probe); - - /* Unregister unused platform_devices. */ - ne_loop_rm_unreg(0); - return retval; -} -module_init(ne_init); -#ifdef CONFIG_NETDEV_LEGACY_INIT +#if !defined(MODULE) && defined(CONFIG_NETDEV_LEGACY_INIT) struct net_device * __init ne_probe(int unit) { int this_dev; @@ -994,7 +987,6 @@ struct net_device * __init ne_probe(int unit) return ERR_PTR(-ENODEV); } #endif -#endif /* MODULE */ static void __exit ne_exit(void) { diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c index b5df7ad5a83f..032e8922b482 100644 --- a/drivers/net/ethernet/amd/ni65.c +++ b/drivers/net/ethernet/amd/ni65.c @@ -748,7 +748,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p) #ifdef XMT_VIA_SKB skb_save[i] = p->tmd_skb[i]; #endif - buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer); + buffer[i] = (unsigned long)isa_bus_to_virt(tmdp->u.buffer); blen[i] = tmdp->blen; tmdp->u.s.status = 0x0; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index f255fd0b16db..6fbf735fca31 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1224,7 +1224,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, /* SR-IOV capability was enabled but there are no VFs*/ if (iov->total == 0) { - err = -EINVAL; + err = 0; goto failed; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index ea0c45d33814..037767b370d5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2213,12 +2213,11 @@ static int bnxt_async_event_process(struct bnxt *bp, DIV_ROUND_UP(fw_health->polling_dsecs * HZ, bp->current_interval * 10); fw_health->tmr_counter = fw_health->tmr_multiplier; - if (!fw_health->enabled) { + if (!fw_health->enabled) fw_health->last_fw_heartbeat = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); - fw_health->last_fw_reset_cnt = - bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); - } + fw_health->last_fw_reset_cnt = + bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); netif_info(bp, drv, bp->dev, "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n", fw_health->master, fw_health->last_fw_reset_cnt, @@ -2730,6 +2729,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; int j; + if (!txr->tx_buf_ring) + continue; + for (j = 0; j < max_idx;) { struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; struct sk_buff *skb; @@ -2814,6 +2816,9 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) } skip_rx_tpa_free: + if (!rxr->rx_buf_ring) + goto skip_rx_buf_free; + for (i = 0; i < max_idx; i++) { struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; dma_addr_t mapping = rx_buf->mapping; @@ -2836,6 +2841,11 @@ skip_rx_tpa_free: kfree(data); } } + +skip_rx_buf_free: + if (!rxr->rx_agg_ring) + goto skip_rx_agg_free; + for (i = 0; i < max_agg_idx; i++) { struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; struct page *page = rx_agg_buf->page; @@ -2852,6 +2862,8 @@ skip_rx_tpa_free: __free_page(page); } + +skip_rx_agg_free: if (rxr->rx_page) { __free_page(rxr->rx_page); rxr->rx_page = NULL; @@ -2900,6 +2912,9 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) struct pci_dev *pdev = bp->pdev; int i; + if (!rmem->pg_arr) + goto skip_pages; + for (i = 0; i < rmem->nr_pages; i++) { if (!rmem->pg_arr[i]) continue; @@ -2909,6 +2924,7 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) rmem->pg_arr[i] = NULL; } +skip_pages: if (rmem->pg_tbl) { size_t pg_tbl_size = rmem->nr_pages * 8; @@ -3228,10 +3244,14 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) { + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + kfree(cpr->cp_desc_ring); cpr->cp_desc_ring = NULL; + ring->ring_mem.pg_arr = NULL; kfree(cpr->cp_desc_mapping); cpr->cp_desc_mapping = NULL; + ring->ring_mem.dma_arr = NULL; } static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) @@ -12207,6 +12227,11 @@ static void bnxt_fw_reset_task(struct work_struct *work) return; } + if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && + bp->fw_health->enabled) { + bp->fw_health->last_fw_reset_cnt = + bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + } bp->fw_reset_state = 0; /* Make sure fw_reset_state is 0 before clearing the flag */ smp_mb__before_atomic(); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 46fae1acbeed..e6a4a768b10b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1884,9 +1884,6 @@ bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev) { struct bnxt_flower_indr_block_cb_priv *cb_priv; - /* All callback list access should be protected by RTNL. */ - ASSERT_RTNL(); - list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list) if (cb_priv->tunnel_netdev == netdev) return cb_priv; diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c index 8b7b59908a1a..f66d22de5168 100644 --- a/drivers/net/ethernet/cadence/macb_pci.c +++ b/drivers/net/ethernet/cadence/macb_pci.c @@ -111,9 +111,9 @@ static void macb_remove(struct pci_dev *pdev) struct platform_device *plat_dev = pci_get_drvdata(pdev); struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev); - platform_device_unregister(plat_dev); clk_unregister(plat_data->pclk); clk_unregister(plat_data->hclk); + platform_device_unregister(plat_dev); } static const struct pci_device_id dev_id_table[] = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 22af3d6ce178..adc54a726661 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -61,6 +61,9 @@ static unsigned int tx_sgl = 1; module_param(tx_sgl, uint, 0600); MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping"); +static bool page_pool_enabled = true; +module_param(page_pool_enabled, bool, 0400); + #define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \ sizeof(struct sg_table)) #define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \ @@ -73,6 +76,7 @@ MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to opt #define HNS3_OUTER_VLAN_TAG 2 #define HNS3_MIN_TX_LEN 33U +#define HNS3_MIN_TUN_PKT_LEN 65U /* hns3_pci_tbl - PCI Device ID Table * @@ -1424,8 +1428,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, l4.tcp->doff); break; case IPPROTO_UDP: - if (hns3_tunnel_csum_bug(skb)) - return skb_checksum_help(skb); + if (hns3_tunnel_csum_bug(skb)) { + int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN); + + return ret ? ret : skb_checksum_help(skb); + } hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, @@ -4753,7 +4760,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) goto out_with_desc_cb; if (!HNAE3_IS_TX_RING(ring)) { - hns3_alloc_page_pool(ring); + if (page_pool_enabled) + hns3_alloc_page_pool(ring); ret = hns3_alloc_ring_buffers(ring); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 68ed1715ac52..87d96f82c318 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -1724,6 +1724,10 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len) } bd_num = le32_to_cpu(req->bd_num); + if (!bd_num) { + dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n"); + return -EINVAL; + } desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); if (!desc_src) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index e55ba2e511b1..f1e46ba799f9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1528,9 +1528,10 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) static int hclge_configure(struct hclge_dev *hdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + const struct cpumask *cpumask = cpu_online_mask; struct hclge_cfg cfg; unsigned int i; - int ret; + int node, ret; ret = hclge_get_cfg(hdev, &cfg); if (ret) @@ -1595,11 +1596,12 @@ static int hclge_configure(struct hclge_dev *hdev) hclge_init_kdump_kernel_config(hdev); - /* Set the init affinity based on pci func number */ - i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev))); - i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0; - cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)), - &hdev->affinity_mask); + /* Set the affinity based on numa node */ + node = dev_to_node(&hdev->pdev->dev); + if (node != NUMA_NO_NODE) + cpumask = cpumask_of_node(node); + + cpumask_copy(&hdev->affinity_mask, cpumask); return ret; } @@ -8125,11 +8127,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle) hclge_clear_arfs_rules(hdev); spin_unlock_bh(&hdev->fd_rule_lock); - /* If it is not PF reset, the firmware will disable the MAC, + /* If it is not PF reset or FLR, the firmware will disable the MAC, * so it only need to stop phy here. */ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && - hdev->reset_type != HNAE3_FUNC_RESET) { + hdev->reset_type != HNAE3_FUNC_RESET && + hdev->reset_type != HNAE3_FLR_RESET) { hclge_mac_stop_phy(hdev); hclge_update_link_status(hdev); return; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 82e727020120..a69e892277b3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2465,6 +2465,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) hclgevf_enable_vector(&hdev->misc_vector, false); event_cause = hclgevf_check_evt_cause(hdev, &clearval); + if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) + hclgevf_clear_event_cause(hdev, clearval); switch (event_cause) { case HCLGEVF_VECTOR0_EVENT_RST: @@ -2477,10 +2479,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) break; } - if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { - hclgevf_clear_event_cause(hdev, clearval); + if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) hclgevf_enable_vector(&hdev->misc_vector, true); - } return IRQ_HANDLED; } diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c index b8a40146b895..b482f6f633bd 100644 --- a/drivers/net/ethernet/i825xx/82596.c +++ b/drivers/net/ethernet/i825xx/82596.c @@ -1144,7 +1144,7 @@ static struct net_device * __init i82596_probe(void) err = -ENODEV; goto out; } - memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN); /* YUCK! Get addr from NOVRAM */ + memcpy(eth_addr, absolute_pointer(0xfffc1f2c), ETH_ALEN); /* YUCK! Get addr from NOVRAM */ dev->base_addr = MVME_I596_BASE; dev->irq = (unsigned) MVME16x_IRQ_I596; goto found; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index a775c69e4fd7..a4579b340120 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -4700,6 +4700,22 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, return 0; } + if (adapter->failover_pending) { + adapter->init_done_rc = -EAGAIN; + netdev_dbg(netdev, "Failover pending, ignoring login response\n"); + complete(&adapter->init_done); + /* login response buffer will be released on reset */ + return 0; + } + + if (adapter->failover_pending) { + adapter->init_done_rc = -EAGAIN; + netdev_dbg(netdev, "Failover pending, ignoring login response\n"); + complete(&adapter->init_done); + /* login response buffer will be released on reset */ + return 0; + } + netdev->mtu = adapter->req_mtu - ETH_HLEN; netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index eadcb9958346..3c4f08d20414 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -695,6 +695,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf) { if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) { set_bit(ICE_FLAG_RDMA_ENA, pf->flags); + set_bit(ICE_FLAG_AUX_ENA, pf->flags); ice_plug_aux_dev(pf); } } @@ -707,5 +708,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf) { ice_unplug_aux_dev(pf); clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); + clear_bit(ICE_FLAG_AUX_ENA, pf->flags); } #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index 1f2afdf6cd48..adcc9a251595 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -271,6 +271,12 @@ int ice_plug_aux_dev(struct ice_pf *pf) struct auxiliary_device *adev; int ret; + /* if this PF doesn't support a technology that requires auxiliary + * devices, then gracefully exit + */ + if (!ice_is_aux_ena(pf)) + return 0; + iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); if (!iadev) return -ENOMEM; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index b877efae61df..0e19b4d02e62 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6350,7 +6350,9 @@ static int igc_probe(struct pci_dev *pdev, if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= netdev->features; + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; /* MTU range: 68 - 9216 */ netdev->min_mtu = ETH_MIN_MTU; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index e84287ffc7ce..dcf9f27ba2ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -658,11 +658,10 @@ static const struct devlink_param enable_rdma_param = static int mlx5_devlink_rdma_param_register(struct devlink *devlink) { - struct mlx5_core_dev *dev = devlink_priv(devlink); union devlink_param_value value; int err; - if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev)) + if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) return 0; err = devlink_param_register(devlink, &enable_rdma_param); @@ -679,9 +678,7 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink) static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink) { - struct mlx5_core_dev *dev = devlink_priv(devlink); - - if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev)) + if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) return; devlink_param_unpublish(devlink, &enable_rdma_param); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 3f8a98093f8c..f9cf9fb31547 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -1007,7 +1007,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn); if (err) { mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err); - return err; + goto err_cancel_work; } err = mlx5_fw_tracer_create_mkey(tracer); @@ -1031,6 +1031,7 @@ err_notifier_unregister: mlx5_core_destroy_mkey(dev, &tracer->buff.mkey); err_dealloc_pd: mlx5_core_dealloc_pd(dev, tracer->buff.pdn); +err_cancel_work: cancel_work_sync(&tracer->read_fw_strings_work); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 669a75f3537a..7b8c8187543a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -922,7 +922,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work); int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr); int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr); -int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val); +int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter); int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c index 0c38c2e319be..b5ddaa82755f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c @@ -137,7 +137,7 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr u16 vport_num, esw_owner_vhca_id; struct netlink_ext_ack *extack; int ifindex = upper->ifindex; - int err; + int err = 0; if (!netif_is_bridge_master(upper)) return 0; @@ -244,7 +244,7 @@ mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev, struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info); const struct switchdev_attr *attr = port_attr_info->attr; u16 vport_num, esw_owner_vhca_id; - int err; + int err = 0; if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num, &esw_owner_vhca_id)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index 51a4d80f7fa3..de03684528bb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -300,9 +300,6 @@ mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv, { struct mlx5e_rep_indr_block_priv *cb_priv; - /* All callback list access should be protected by RTNL. */ - ASSERT_RTNL(); - list_for_each_entry(cb_priv, &rpriv->uplink_priv.tc_indr_block_priv_list, list) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c index bf0313e2682b..13056cb9757d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c @@ -572,7 +572,7 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann if (res->features & MLX5E_RX_RES_FEATURE_PTP) { u32 rqn; - if (mlx5e_channels_get_ptp_rqn(chs, &rqn)) + if (!mlx5e_channels_get_ptp_rqn(chs, &rqn)) rqn = res->drop_rqn; err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 2cfd12953909..306fb5d6a36d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1884,7 +1884,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) return set_pflag_cqe_based_moder(netdev, enable, true); } -int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val) +int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val, bool rx_filter) { bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS); struct mlx5e_params new_params; @@ -1896,8 +1896,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val if (curr_val == new_val) return 0; - if (new_val && !priv->profile->rx_ptp_support && - priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) { + if (new_val && !priv->profile->rx_ptp_support && rx_filter) { netdev_err(priv->netdev, "Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n"); return -EINVAL; @@ -1905,7 +1904,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val new_params = priv->channels.params; MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val); - if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) + if (rx_filter) new_params.ptp_rx = new_val; if (new_params.ptp_rx == priv->channels.params.ptp_rx) @@ -1928,12 +1927,14 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; + bool rx_filter; int err; if (!MLX5_CAP_GEN(mdev, cqe_compression)) return -EOPNOTSUPP; - err = mlx5e_modify_rx_cqe_compression_locked(priv, enable); + rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE; + err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 47efd858964d..3fd515e7bf30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3554,14 +3554,14 @@ static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filte if (!rx_filter) /* Reset CQE compression to Admin default */ - return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def); + return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def, false); if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS)) return 0; /* Disable CQE compression */ netdev_warn(priv->netdev, "Disabling RX cqe compression\n"); - err = mlx5e_modify_rx_cqe_compression_locked(priv, false); + err = mlx5e_modify_rx_cqe_compression_locked(priv, false, true); if (err) netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 9fe8e3c204d6..fe501ba88bea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1682,14 +1682,13 @@ static int build_match_list(struct match_list *match_head, curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC); if (!curr_match) { + rcu_read_unlock(); free_match_list(match_head, ft_locked); - err = -ENOMEM; - goto out; + return -ENOMEM; } curr_match->g = g; list_add_tail(&curr_match->list, &match_head->list); } -out: rcu_read_unlock(); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 49ca57c6d31d..ca5690b0a7ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -927,9 +927,12 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev) struct mlx5_core_dev *dev1; struct mlx5_lag *ldev; + ldev = mlx5_lag_dev(dev); + if (!ldev) + return; + mlx5_dev_list_lock(); - ldev = mlx5_lag_dev(dev); dev0 = ldev->pf[MLX5_LAG_P1].dev; dev1 = ldev->pf[MLX5_LAG_P2].dev; @@ -946,8 +949,11 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev) { struct mlx5_lag *ldev; - mlx5_dev_list_lock(); ldev = mlx5_lag_dev(dev); + if (!ldev) + return; + + mlx5_dev_list_lock(); ldev->mode_changes_in_progress--; mlx5_dev_list_unlock(); mlx5_queue_bond_work(ldev, 0); diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c index 3e85b17f5857..6704f5c1aa32 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c @@ -142,6 +142,13 @@ static int mlxbf_gige_open(struct net_device *netdev) err = mlxbf_gige_clean_port(priv); if (err) goto free_irqs; + + /* Clear driver's valid_polarity to match hardware, + * since the above call to clean_port() resets the + * receive polarity used by hardware. + */ + priv->valid_polarity = 0; + err = mlxbf_gige_rx_init(priv); if (err) goto free_irqs; diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index c1310ea1c216..d5c485a6d284 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -398,9 +398,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth, int err; u16 i; - dma_buf = kzalloc(sizeof(*dma_buf) + - q_depth * sizeof(struct hwc_work_request), - GFP_KERNEL); + dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL); if (!dma_buf) return -ENOMEM; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 556c3495211d..64c0ef57ad42 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1767,9 +1767,6 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app, struct nfp_flower_indr_block_cb_priv *cb_priv; struct nfp_flower_priv *priv = app->priv; - /* All callback list access should be protected by RTNL. */ - ASSERT_RTNL(); - list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list) if (cb_priv->netdev == netdev) return cb_priv; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 6e5a6cc97d0e..24cd41567775 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -3367,6 +3367,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, struct qed_nvm_image_att *p_image_att) { enum nvm_image_type type; + int rc; u32 i; /* Translate image_id into MFW definitions */ @@ -3395,7 +3396,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, return -EINVAL; } - qed_mcp_nvm_info_populate(p_hwfn); + rc = qed_mcp_nvm_info_populate(p_hwfn); + if (rc) + return rc; + for (i = 0; i < p_hwfn->nvm_info.num_images; i++) if (type == p_hwfn->nvm_info.image_att[i].image_type) break; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 0a2f34fc8b24..27dffa299ca6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1354,10 +1354,10 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; const struct firmware *fw = fw_info->fw; u32 dest, *p_cache, *temp; - int i, ret = -EIO; __le32 *temp_le; u8 data[16]; size_t size; + int i, ret; u64 addr; temp = vzalloc(fw->size); diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 4b2eca5e08e2..01ef5efd7bc2 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -119,6 +119,8 @@ #define PHY_ST 0x8A /* PHY status register */ #define MAC_SM 0xAC /* MAC status machine */ #define MAC_SM_RST 0x0002 /* MAC status machine reset */ +#define MD_CSC 0xb6 /* MDC speed control register */ +#define MD_CSC_DEFAULT 0x0030 #define MAC_ID 0xBE /* Identifier register */ #define TX_DCNT 0x80 /* TX descriptor count */ @@ -355,8 +357,9 @@ static void r6040_reset_mac(struct r6040_private *lp) { void __iomem *ioaddr = lp->base; int limit = MAC_DEF_TIMEOUT; - u16 cmd; + u16 cmd, md_csc; + md_csc = ioread16(ioaddr + MD_CSC); iowrite16(MAC_RST, ioaddr + MCR1); while (limit--) { cmd = ioread16(ioaddr + MCR1); @@ -368,6 +371,10 @@ static void r6040_reset_mac(struct r6040_private *lp) iowrite16(MAC_SM_RST, ioaddr + MAC_SM); iowrite16(0, ioaddr + MAC_SM); mdelay(5); + + /* Restore MDIO clock frequency */ + if (md_csc != MD_CSC_DEFAULT) + iowrite16(md_csc, ioaddr + MD_CSC); } static void r6040_init_mac_regs(struct net_device *dev) diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index e5b0d795c301..3dbea028b325 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -166,32 +166,46 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, * We need a channel per event queue, plus a VI per tx queue. * This may be more pessimistic than it needs to be. */ - if (n_channels + n_xdp_ev > max_channels) { - netif_err(efx, drv, efx->net_dev, - "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", - n_xdp_ev, n_channels, max_channels); - netif_err(efx, drv, efx->net_dev, - "XDP_TX and XDP_REDIRECT will not work on this interface"); - efx->n_xdp_channels = 0; - efx->xdp_tx_per_channel = 0; - efx->xdp_tx_queue_count = 0; + if (n_channels >= max_channels) { + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; + netif_warn(efx, drv, efx->net_dev, + "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", + n_xdp_ev, n_channels, max_channels); + netif_warn(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT might decrease device's performance\n"); } else if (n_channels + n_xdp_tx > efx->max_vis) { - netif_err(efx, drv, efx->net_dev, - "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n", - n_xdp_tx, n_channels, efx->max_vis); - netif_err(efx, drv, efx->net_dev, - "XDP_TX and XDP_REDIRECT will not work on this interface"); - efx->n_xdp_channels = 0; - efx->xdp_tx_per_channel = 0; - efx->xdp_tx_queue_count = 0; + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; + netif_warn(efx, drv, efx->net_dev, + "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n", + n_xdp_tx, n_channels, efx->max_vis); + netif_warn(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT might decrease device's performance\n"); + } else if (n_channels + n_xdp_ev > max_channels) { + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED; + netif_warn(efx, drv, efx->net_dev, + "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", + n_xdp_ev, n_channels, max_channels); + + n_xdp_ev = max_channels - n_channels; + netif_warn(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n", + DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev)); } else { + efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED; + } + + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) { efx->n_xdp_channels = n_xdp_ev; efx->xdp_tx_per_channel = tx_per_ev; efx->xdp_tx_queue_count = n_xdp_tx; n_channels += n_xdp_ev; netif_dbg(efx, drv, efx->net_dev, "Allocating %d TX and %d event queues for XDP\n", - n_xdp_tx, n_xdp_ev); + n_xdp_ev * tx_per_ev, n_xdp_ev); + } else { + efx->n_xdp_channels = 0; + efx->xdp_tx_per_channel = 0; + efx->xdp_tx_queue_count = n_xdp_tx; } if (vec_count < n_channels) { @@ -858,6 +872,20 @@ rollback: goto out; } +static inline int +efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number, + struct efx_tx_queue *tx_queue) +{ + if (xdp_queue_number >= efx->xdp_tx_queue_count) + return -EINVAL; + + netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", + tx_queue->channel->channel, tx_queue->label, + xdp_queue_number, tx_queue->queue); + efx->xdp_tx_queues[xdp_queue_number] = tx_queue; + return 0; +} + int efx_set_channels(struct efx_nic *efx) { struct efx_tx_queue *tx_queue; @@ -896,20 +924,9 @@ int efx_set_channels(struct efx_nic *efx) if (efx_channel_is_xdp_tx(channel)) { efx_for_each_channel_tx_queue(tx_queue, channel) { tx_queue->queue = next_queue++; - - /* We may have a few left-over XDP TX - * queues owing to xdp_tx_queue_count - * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL. - * We still allocate and probe those - * TXQs, but never use them. - */ - if (xdp_queue_number < efx->xdp_tx_queue_count) { - netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", - channel->channel, tx_queue->label, - xdp_queue_number, tx_queue->queue); - efx->xdp_tx_queues[xdp_queue_number] = tx_queue; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); + if (rc == 0) xdp_queue_number++; - } } } else { efx_for_each_channel_tx_queue(tx_queue, channel) { @@ -918,10 +935,35 @@ int efx_set_channels(struct efx_nic *efx) channel->channel, tx_queue->label, tx_queue->queue); } + + /* If XDP is borrowing queues from net stack, it must use the queue + * with no csum offload, which is the first one of the channel + * (note: channel->tx_queue_by_type is not initialized yet) + */ + if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) { + tx_queue = &channel->tx_queue[0]; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); + if (rc == 0) + xdp_queue_number++; + } } } } - WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count); + WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED && + xdp_queue_number != efx->xdp_tx_queue_count); + WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED && + xdp_queue_number > efx->xdp_tx_queue_count); + + /* If we have more CPUs than assigned XDP TX queues, assign the already + * existing queues to the exceeding CPUs + */ + next_queue = 0; + while (xdp_queue_number < efx->xdp_tx_queue_count) { + tx_queue = efx->xdp_tx_queues[next_queue++]; + rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); + if (rc == 0) + xdp_queue_number++; + } rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); if (rc) diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 9b4b25704271..f6981810039d 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -782,6 +782,12 @@ struct efx_async_filter_insertion { #define EFX_RPS_MAX_IN_FLIGHT 8 #endif /* CONFIG_RFS_ACCEL */ +enum efx_xdp_tx_queues_mode { + EFX_XDP_TX_QUEUES_DEDICATED, /* one queue per core, locking not needed */ + EFX_XDP_TX_QUEUES_SHARED, /* each queue used by more than 1 core */ + EFX_XDP_TX_QUEUES_BORROWED /* queues borrowed from net stack */ +}; + /** * struct efx_nic - an Efx NIC * @name: Device name (net device name or bus id before net device registered) @@ -820,6 +826,7 @@ struct efx_async_filter_insertion { * should be allocated for this NIC * @xdp_tx_queue_count: Number of entries in %xdp_tx_queues. * @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit. + * @xdp_txq_queues_mode: XDP TX queues sharing strategy. * @rxq_entries: Size of receive queues requested by user. * @txq_entries: Size of transmit queues requested by user. * @txq_stop_thresh: TX queue fill level at or above which we stop it. @@ -979,6 +986,7 @@ struct efx_nic { unsigned int xdp_tx_queue_count; struct efx_tx_queue **xdp_tx_queues; + enum efx_xdp_tx_queues_mode xdp_txq_queues_mode; unsigned rxq_entries; unsigned txq_entries; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 0c6650d2e239..d16e031e95f4 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -428,23 +428,32 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, unsigned int len; int space; int cpu; - int i; + int i = 0; - cpu = raw_smp_processor_id(); + if (unlikely(n && !xdpfs)) + return -EINVAL; + if (unlikely(!n)) + return 0; - if (!efx->xdp_tx_queue_count || - unlikely(cpu >= efx->xdp_tx_queue_count)) + cpu = raw_smp_processor_id(); + if (unlikely(cpu >= efx->xdp_tx_queue_count)) return -EINVAL; tx_queue = efx->xdp_tx_queues[cpu]; if (unlikely(!tx_queue)) return -EINVAL; - if (unlikely(n && !xdpfs)) - return -EINVAL; + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) + HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu); - if (!n) - return 0; + /* If we're borrowing net stack queues we have to handle stop-restart + * or we might block the queue and it will be considered as frozen + */ + if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) { + if (netif_tx_queue_stopped(tx_queue->core_txq)) + goto unlock; + efx_tx_maybe_stop_queue(tx_queue); + } /* Check for available space. We should never need multiple * descriptors per frame. @@ -484,6 +493,10 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, if (flush && i > 0) efx_nic_push_buffers(tx_queue); +unlock: + if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) + HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq); + return i == 0 ? -EIO : i; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ece02b35a6ce..553c4403258a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -309,7 +309,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) priv->clk_csr = STMMAC_CSR_100_150M; else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) priv->clk_csr = STMMAC_CSR_150_250M; - else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) + else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) priv->clk_csr = STMMAC_CSR_250_300M; } @@ -7118,7 +7118,6 @@ int stmmac_suspend(struct device *dev) struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); u32 chan; - int ret; if (!ndev || !netif_running(ndev)) return 0; @@ -7150,13 +7149,6 @@ int stmmac_suspend(struct device *dev) } else { stmmac_mac_set(priv, priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); - /* Disable clock in case of PWM is off */ - clk_disable_unprepare(priv->plat->clk_ptp_ref); - ret = pm_runtime_force_suspend(dev); - if (ret) { - mutex_unlock(&priv->lock); - return ret; - } } mutex_unlock(&priv->lock); @@ -7242,12 +7234,6 @@ int stmmac_resume(struct device *dev) priv->irq_wake = 0; } else { pinctrl_pm_select_default_state(priv->device); - /* enable the clk previously disabled */ - ret = pm_runtime_force_resume(dev); - if (ret) - return ret; - if (priv->plat->clk_ptp_ref) - clk_prepare_enable(priv->plat->clk_ptp_ref); /* reset the phy so that it's ready */ if (priv->mii) stmmac_mdio_reset(priv->mii); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 5ca710844cc1..62cec9bfcd33 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -9,6 +9,7 @@ *******************************************************************************/ #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/module.h> #include <linux/io.h> #include <linux/of.h> @@ -771,9 +772,52 @@ static int __maybe_unused stmmac_runtime_resume(struct device *dev) return stmmac_bus_clks_config(priv, true); } +static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + int ret; + + if (!netif_running(ndev)) + return 0; + + if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { + /* Disable clock in case of PWM is off */ + clk_disable_unprepare(priv->plat->clk_ptp_ref); + + ret = pm_runtime_force_suspend(dev); + if (ret) + return ret; + } + + return 0; +} + +static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + int ret; + + if (!netif_running(ndev)) + return 0; + + if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { + /* enable the clk previously disabled */ + ret = pm_runtime_force_resume(dev); + if (ret) + return ret; + + clk_prepare_enable(priv->plat->clk_ptp_ref); + } + + return 0; +} + const struct dev_pm_ops stmmac_pltfr_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume) SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL) + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume) }; EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 8fe8887d506a..6192244b304a 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -68,9 +68,9 @@ #define SIXP_DAMA_OFF 0 /* default level 2 parameters */ -#define SIXP_TXDELAY (HZ/4) /* in 1 s */ +#define SIXP_TXDELAY 25 /* 250 ms */ #define SIXP_PERSIST 50 /* in 256ths */ -#define SIXP_SLOTTIME (HZ/10) /* in 1 s */ +#define SIXP_SLOTTIME 10 /* 100 ms */ #define SIXP_INIT_RESYNC_TIMEOUT (3*HZ/2) /* in 1 s */ #define SIXP_RESYNC_TIMEOUT 5*HZ /* in 1 s */ diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index b50b7fafd8d6..f4c3efc3e074 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -973,7 +973,7 @@ static inline void tx_on(struct scc_priv *priv) flags = claim_dma_lock(); set_dma_mode(priv->param.dma, DMA_MODE_WRITE); set_dma_addr(priv->param.dma, - (int) priv->tx_buf[priv->tx_tail] + n); + virt_to_bus(priv->tx_buf[priv->tx_tail]) + n); set_dma_count(priv->param.dma, priv->tx_len[priv->tx_tail] - n); release_dma_lock(flags); @@ -1020,7 +1020,7 @@ static inline void rx_on(struct scc_priv *priv) flags = claim_dma_lock(); set_dma_mode(priv->param.dma, DMA_MODE_READ); set_dma_addr(priv->param.dma, - (int) priv->rx_buf[priv->rx_head]); + virt_to_bus(priv->rx_buf[priv->rx_head])); set_dma_count(priv->param.dma, BUF_SIZE); release_dma_lock(flags); enable_dma(priv->param.dma); @@ -1233,7 +1233,7 @@ static void special_condition(struct scc_priv *priv, int rc) if (priv->param.dma >= 0) { flags = claim_dma_lock(); set_dma_addr(priv->param.dma, - (int) priv->rx_buf[priv->rx_head]); + virt_to_bus(priv->rx_buf[priv->rx_head])); set_dma_count(priv->param.dma, BUF_SIZE); release_dma_lock(flags); } else { diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c index 2324e1b93e37..1da334f54944 100644 --- a/drivers/net/ipa/ipa_table.c +++ b/drivers/net/ipa/ipa_table.c @@ -430,7 +430,8 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter, * table region determines the number of entries it has. */ if (filter) { - count = hweight32(ipa->filter_map); + /* Include one extra "slot" to hold the filter map itself */ + count = 1 + hweight32(ipa->filter_map); hash_count = hash_mem->size ? count : 0; } else { count = mem->size / sizeof(__le64); diff --git a/drivers/net/phy/dp83640_reg.h b/drivers/net/phy/dp83640_reg.h index 21aa24c741b9..daae7fa58fb8 100644 --- a/drivers/net/phy/dp83640_reg.h +++ b/drivers/net/phy/dp83640_reg.h @@ -5,7 +5,7 @@ #ifndef HAVE_DP83640_REGISTERS #define HAVE_DP83640_REGISTERS -#define PAGE0 0x0000 +/* #define PAGE0 0x0000 */ #define PHYCR2 0x001c /* PHY Control Register 2 */ #define PAGE4 0x0004 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 9e2891d8e8dd..ba5ad86ec826 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -233,9 +233,11 @@ static DEFINE_MUTEX(phy_fixup_lock); static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) { + struct device_driver *drv = phydev->mdio.dev.driver; + struct phy_driver *phydrv = to_phy_driver(drv); struct net_device *netdev = phydev->attached_dev; - if (!phydev->drv->suspend) + if (!drv || !phydrv->suspend) return false; /* PHY not attached? May suspend if the PHY has not already been diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index a1464b764d4d..0a0abe8e4be0 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1607,6 +1607,32 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, if (config.an_enabled && phylink_is_empty_linkmode(config.advertising)) return -EINVAL; + /* If this link is with an SFP, ensure that changes to advertised modes + * also cause the associated interface to be selected such that the + * link can be configured correctly. + */ + if (pl->sfp_port && pl->sfp_bus) { + config.interface = sfp_select_interface(pl->sfp_bus, + config.advertising); + if (config.interface == PHY_INTERFACE_MODE_NA) { + phylink_err(pl, + "selection of interface failed, advertisement %*pb\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, + config.advertising); + return -EINVAL; + } + + /* Revalidate with the selected interface */ + linkmode_copy(support, pl->supported); + if (phylink_validate(pl, support, &config)) { + phylink_err(pl, "validation of %s/%s with support %*pb failed\n", + phylink_an_mode_str(pl->cur_link_an_mode), + phy_modes(config.interface), + __ETHTOOL_LINK_MODE_MASK_NBITS, support); + return -EINVAL; + } + } + mutex_lock(&pl->state_mutex); pl->link_config.speed = config.speed; pl->link_config.duplex = config.duplex; @@ -2186,7 +2212,9 @@ static int phylink_sfp_config(struct phylink *pl, u8 mode, if (phy_interface_mode_is_8023z(iface) && pl->phydev) return -EINVAL; - changed = !linkmode_equal(pl->supported, support); + changed = !linkmode_equal(pl->supported, support) || + !linkmode_equal(pl->link_config.advertising, + config.advertising); if (changed) { linkmode_copy(pl->supported, support); linkmode_copy(pl->link_config.advertising, config.advertising); diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index f6b92efffc94..480bcd1f6c1c 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -34,6 +34,8 @@ obj-$(CONFIG_SLIC_DS26522) += slic_ds26522.o clean-files := wanxlfw.inc $(obj)/wanxl.o: $(obj)/wanxlfw.inc +CROSS_COMPILE_M68K = m68k-linux-gnu- + ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y) ifeq ($(ARCH),m68k) M68KCC = $(CC) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 7efb31b87f37..6600e138945e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3524,7 +3524,9 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys, lockdep_assert_held(&subsys->lock); list_for_each_entry(h, &subsys->nsheads, entry) { - if (h->ns_id == nsid && nvme_tryget_ns_head(h)) + if (h->ns_id != nsid) + continue; + if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) return h; } @@ -3843,6 +3845,10 @@ static void nvme_ns_remove(struct nvme_ns *ns) mutex_lock(&ns->ctrl->subsys->lock); list_del_rcu(&ns->siblings); + if (list_empty(&ns->head->list)) { + list_del_init(&ns->head->entry); + last_path = true; + } mutex_unlock(&ns->ctrl->subsys->lock); /* guarantee not available in head->list */ @@ -3856,20 +3862,11 @@ static void nvme_ns_remove(struct nvme_ns *ns) nvme_cdev_del(&ns->cdev, &ns->cdev_device); del_gendisk(ns->disk); blk_cleanup_queue(ns->queue); - if (blk_get_integrity(ns->disk)) - blk_integrity_unregister(ns->disk); down_write(&ns->ctrl->namespaces_rwsem); list_del_init(&ns->list); up_write(&ns->ctrl->namespaces_rwsem); - /* Synchronize with nvme_init_ns_head() */ - mutex_lock(&ns->head->subsys->lock); - if (list_empty(&ns->head->list)) { - list_del_init(&ns->head->entry); - last_path = true; - } - mutex_unlock(&ns->head->subsys->lock); if (last_path) nvme_mpath_shutdown_disk(ns->head); nvme_put_ns(ns); diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 5d7bc58a27bd..e8ccdd398f78 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -600,14 +600,17 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl, down_read(&ctrl->namespaces_rwsem); list_for_each_entry(ns, &ctrl->namespaces, list) { - unsigned nsid = le32_to_cpu(desc->nsids[n]); - + unsigned nsid; +again: + nsid = le32_to_cpu(desc->nsids[n]); if (ns->head->ns_id < nsid) continue; if (ns->head->ns_id == nsid) nvme_update_ns_ana_state(desc, ns); if (++n == nr_nsids) break; + if (ns->head->ns_id > nsid) + goto again; } up_read(&ctrl->namespaces_rwsem); return 0; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index a68704e39084..042c594bc57e 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -656,8 +656,8 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) return; - nvme_rdma_destroy_queue_ib(queue); rdma_destroy_id(queue->cm_id); + nvme_rdma_destroy_queue_ib(queue); mutex_destroy(&queue->queue_lock); } @@ -1815,14 +1815,10 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue) for (i = 0; i < queue->queue_size; i++) { ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]); if (ret) - goto out_destroy_queue_ib; + return ret; } return 0; - -out_destroy_queue_ib: - nvme_rdma_destroy_queue_ib(queue); - return ret; } static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue, @@ -1916,14 +1912,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) if (ret) { dev_err(ctrl->ctrl.device, "rdma_connect_locked failed (%d).\n", ret); - goto out_destroy_queue_ib; + return ret; } return 0; - -out_destroy_queue_ib: - nvme_rdma_destroy_queue_ib(queue); - return ret; } static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, @@ -1954,8 +1946,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, case RDMA_CM_EVENT_ROUTE_ERROR: case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_UNREACHABLE: - nvme_rdma_destroy_queue_ib(queue); - fallthrough; case RDMA_CM_EVENT_ADDR_ERROR: dev_dbg(queue->ctrl->ctrl.device, "CM error event %d\n", ev->event); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index e2ab12f3f51c..e4249b7dc056 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -274,6 +274,12 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) } while (ret > 0); } +static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) +{ + return !list_empty(&queue->send_list) || + !llist_empty(&queue->req_list) || queue->more_requests; +} + static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, bool sync, bool last) { @@ -294,9 +300,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, nvme_tcp_send_all(queue); queue->more_requests = false; mutex_unlock(&queue->send_mutex); - } else if (last) { - queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); } + + if (last && nvme_tcp_queue_more(queue)) + queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); } static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue) @@ -906,12 +913,6 @@ done: read_unlock_bh(&sk->sk_callback_lock); } -static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) -{ - return !list_empty(&queue->send_list) || - !llist_empty(&queue->req_list) || queue->more_requests; -} - static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) { queue->request = NULL; @@ -1145,8 +1146,7 @@ static void nvme_tcp_io_work(struct work_struct *w) pending = true; else if (unlikely(result < 0)) break; - } else - pending = !llist_empty(&queue->req_list); + } result = nvme_tcp_try_recv(queue); if (result > 0) diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index d784f3c200b4..be5d82421e3a 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -1067,7 +1067,7 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item, { struct nvmet_subsys *subsys = to_subsys(item); - return snprintf(page, PAGE_SIZE, "%*s\n", + return snprintf(page, PAGE_SIZE, "%.*s\n", NVMET_SN_MAX_SIZE, subsys->serial); } diff --git a/drivers/of/device.c b/drivers/of/device.c index 5b043ee30824..b0800c260f64 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -85,7 +85,11 @@ of_dma_set_restricted_buffer(struct device *dev, struct device_node *np) break; } - if (i != count && of_reserved_mem_device_init_by_idx(dev, of_node, i)) + /* + * Attempt to initialize a restricted-dma-pool region if one was found. + * Note that count can hold a negative error code. + */ + if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i)) dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n"); } diff --git a/drivers/of/property.c b/drivers/of/property.c index 3fd74bb34819..a3483484a5a2 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -1291,7 +1291,6 @@ DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells") DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells") DEFINE_SIMPLE_PROP(leds, "leds", NULL) DEFINE_SIMPLE_PROP(backlight, "backlight", NULL) -DEFINE_SIMPLE_PROP(phy_handle, "phy-handle", NULL) DEFINE_SUFFIX_PROP(regulators, "-supply", NULL) DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells") @@ -1380,7 +1379,6 @@ static const struct supplier_bindings of_supplier_bindings[] = { { .parse_prop = parse_resets, }, { .parse_prop = parse_leds, }, { .parse_prop = parse_backlight, }, - { .parse_prop = parse_phy_handle, }, { .parse_prop = parse_gpio_compat, }, { .parse_prop = parse_interrupts, }, { .parse_prop = parse_regulators, }, diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index a1b1e2a01632..0f40943a9a18 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -937,7 +937,7 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev); void pci_set_acpi_fwnode(struct pci_dev *dev) { - if (!ACPI_COMPANION(&dev->dev) && !pci_dev_is_added(dev)) + if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev)) ACPI_COMPANION_SET(&dev->dev, acpi_pci_find_companion(&dev->dev)); } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index e5089af8ad90..4537d1ea14fd 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -5435,7 +5435,7 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda); /* - * Create device link for NVIDIA GPU with integrated USB xHCI Host + * Create device link for GPUs with integrated USB xHCI Host * controller to VGA. */ static void quirk_gpu_usb(struct pci_dev *usb) @@ -5444,9 +5444,11 @@ static void quirk_gpu_usb(struct pci_dev *usb) } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb); +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID, + PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb); /* - * Create device link for NVIDIA GPU with integrated Type-C UCSI controller + * Create device link for GPUs with integrated Type-C UCSI controller * to VGA. Currently there is no class code defined for UCSI device over PCI * so using UNKNOWN class for now and it will be updated when UCSI * over PCI gets a class code. @@ -5459,6 +5461,9 @@ static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi) DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_CLASS_SERIAL_UNKNOWN, 8, quirk_gpu_usb_typec_ucsi); +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID, + PCI_CLASS_SERIAL_UNKNOWN, 8, + quirk_gpu_usb_typec_ucsi); /* * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c index 25557b272a4f..4be24890132e 100644 --- a/drivers/pci/vpd.c +++ b/drivers/pci/vpd.c @@ -99,6 +99,24 @@ error: return off ?: PCI_VPD_SZ_INVALID; } +static bool pci_vpd_available(struct pci_dev *dev) +{ + struct pci_vpd *vpd = &dev->vpd; + + if (!vpd->cap) + return false; + + if (vpd->len == 0) { + vpd->len = pci_vpd_size(dev); + if (vpd->len == PCI_VPD_SZ_INVALID) { + vpd->cap = 0; + return false; + } + } + + return true; +} + /* * Wait for last operation to complete. * This code has to spin since there is no other notification from the PCI @@ -145,7 +163,7 @@ static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count, loff_t end = pos + count; u8 *buf = arg; - if (!vpd->cap) + if (!pci_vpd_available(dev)) return -ENODEV; if (pos < 0) @@ -206,7 +224,7 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count, loff_t end = pos + count; int ret = 0; - if (!vpd->cap) + if (!pci_vpd_available(dev)) return -ENODEV; if (pos < 0 || (pos & 3) || (count & 3)) @@ -242,14 +260,11 @@ static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count, void pci_vpd_init(struct pci_dev *dev) { + if (dev->vpd.len == PCI_VPD_SZ_INVALID) + return; + dev->vpd.cap = pci_find_capability(dev, PCI_CAP_ID_VPD); mutex_init(&dev->vpd.lock); - - if (!dev->vpd.len) - dev->vpd.len = pci_vpd_size(dev); - - if (dev->vpd.len == PCI_VPD_SZ_INVALID) - dev->vpd.cap = 0; } static ssize_t vpd_read(struct file *filp, struct kobject *kobj, @@ -294,13 +309,14 @@ const struct attribute_group pci_dev_vpd_attr_group = { void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size) { - unsigned int len = dev->vpd.len; + unsigned int len; void *buf; int cnt; - if (!dev->vpd.cap) + if (!pci_vpd_available(dev)) return ERR_PTR(-ENODEV); + len = dev->vpd.len; buf = kmalloc(len, GFP_KERNEL); if (!buf) return ERR_PTR(-ENOMEM); diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index eb15067a605e..4eb53412b808 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -1047,7 +1047,9 @@ static void cmos_check_wkalrm(struct device *dev) * ACK the rtc irq here */ if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) { + local_irq_disable(); cmos_interrupt(0, (void *)cmos->rtc); + local_irq_enable(); return; } diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index 2f3515fa242a..f3d5c7f4c13d 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -45,13 +45,14 @@ static void __init sclp_early_facilities_detect(void) sclp.has_gisaf = !!(sccb->fac118 & 0x08); sclp.has_hvs = !!(sccb->fac119 & 0x80); sclp.has_kss = !!(sccb->fac98 & 0x01); - sclp.has_sipl = !!(sccb->cbl & 0x4000); if (sccb->fac85 & 0x02) S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; if (sccb->fac91 & 0x40) S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST; if (sccb->cpuoff > 134) sclp.has_diag318 = !!(sccb->byte_134 & 0x80); + if (sccb->cpuoff > 137) + sclp.has_sipl = !!(sccb->cbl & 0x4000); sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; sclp.rzm <<= 20; diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index f433428057d9..d9b804943d19 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -213,7 +213,6 @@ static inline int ap_fetch_qci_info(struct ap_config_info *info) * ap_init_qci_info(): Allocate and query qci config info. * Does also update the static variables ap_max_domain_id * and ap_max_adapter_id if this info is available. - */ static void __init ap_init_qci_info(void) { @@ -439,6 +438,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused) /** * ap_interrupt_handler() - Schedule ap_tasklet on interrupt * @airq: pointer to adapter interrupt descriptor + * @floating: ignored */ static void ap_interrupt_handler(struct airq_struct *airq, bool floating) { @@ -1786,6 +1786,7 @@ static inline void ap_scan_adapter(int ap) /** * ap_scan_bus(): Scan the AP bus for new devices * Runs periodically, workqueue timer (ap_config_time) + * @unused: Unused pointer. */ static void ap_scan_bus(struct work_struct *unused) { diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index d70c4d3d0907..9ea48bf0ee40 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -20,7 +20,7 @@ static void __ap_flush_queue(struct ap_queue *aq); /** * ap_queue_enable_irq(): Enable interrupt support on this AP queue. - * @qid: The AP queue number + * @aq: The AP queue * @ind: the notification indicator byte * * Enables interruption on AP queue via ap_aqic(). Based on the return @@ -311,7 +311,7 @@ static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq) /** * ap_sm_reset(): Reset an AP queue. - * @qid: The AP queue number + * @aq: The AP queue * * Submit the Reset command to an AP queue. */ diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index 8ce840c7ecc8..3226c4e1c7c0 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -1182,8 +1182,7 @@ static int tegra_slink_resume(struct device *dev) } #endif -#ifdef CONFIG_PM -static int tegra_slink_runtime_suspend(struct device *dev) +static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct tegra_slink_data *tspi = spi_master_get_devdata(master); @@ -1195,7 +1194,7 @@ static int tegra_slink_runtime_suspend(struct device *dev) return 0; } -static int tegra_slink_runtime_resume(struct device *dev) +static int __maybe_unused tegra_slink_runtime_resume(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct tegra_slink_data *tspi = spi_master_get_devdata(master); @@ -1208,7 +1207,6 @@ static int tegra_slink_runtime_resume(struct device *dev) } return 0; } -#endif /* CONFIG_PM */ static const struct dev_pm_ops slink_pm_ops = { SET_RUNTIME_PM_OPS(tegra_slink_runtime_suspend, diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 3a249ee7e144..28ef323882fb 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -467,7 +467,7 @@ static void vhost_tx_batch(struct vhost_net *net, .num = nvq->batched_xdp, .ptr = nvq->xdp, }; - int err; + int i, err; if (nvq->batched_xdp == 0) goto signal_used; @@ -476,6 +476,15 @@ static void vhost_tx_batch(struct vhost_net *net, err = sock->ops->sendmsg(sock, msghdr, 0); if (unlikely(err < 0)) { vq_err(&nvq->vq, "Fail to batch sending packets\n"); + + /* free pages owned by XDP; since this is an unlikely error path, + * keep it simple and avoid more complex bulk update for the + * used pages + */ + for (i = 0; i < nvq->batched_xdp; ++i) + put_page(virt_to_head_page(nvq->xdp[i].data)); + nvq->batched_xdp = 0; + nvq->done_idx = 0; return; } diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index d33c5cd684c0..b26b79dfcac9 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -582,7 +582,9 @@ config FB_HP300 config FB_TGA tristate "TGA/SFB+ framebuffer support" - depends on FB && (ALPHA || TC) + depends on FB + depends on PCI || TC + depends on ALPHA || TC select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 5f1ce59b44b9..a37eb52fb401 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -214,7 +214,7 @@ config XEN_PVCALLS_FRONTEND implements them. config XEN_PVCALLS_BACKEND - bool "XEN PV Calls backend driver" + tristate "XEN PV Calls backend driver" depends on INET && XEN && XEN_BACKEND help Experimental backend for the Xen PV Calls protocol diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 671c71245a7b..2d2803883306 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -43,6 +43,8 @@ #include <linux/sched.h> #include <linux/cred.h> #include <linux/errno.h> +#include <linux/freezer.h> +#include <linux/kthread.h> #include <linux/mm.h> #include <linux/memblock.h> #include <linux/pagemap.h> @@ -115,7 +117,7 @@ static struct ctl_table xen_root[] = { #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1) /* - * balloon_process() state: + * balloon_thread() state: * * BP_DONE: done or nothing to do, * BP_WAIT: wait to be rescheduled, @@ -130,6 +132,8 @@ enum bp_state { BP_ECANCELED }; +/* Main waiting point for xen-balloon thread. */ +static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq); static DEFINE_MUTEX(balloon_mutex); @@ -144,10 +148,6 @@ static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)]; static LIST_HEAD(ballooned_pages); static DECLARE_WAIT_QUEUE_HEAD(balloon_wq); -/* Main work function, always executed in process context. */ -static void balloon_process(struct work_struct *work); -static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); - /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ #define GFP_BALLOON \ @@ -366,7 +366,7 @@ static void xen_online_page(struct page *page, unsigned int order) static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v) { if (val == MEM_ONLINE) - schedule_delayed_work(&balloon_worker, 0); + wake_up(&balloon_thread_wq); return NOTIFY_OK; } @@ -491,18 +491,43 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) } /* - * As this is a work item it is guaranteed to run as a single instance only. + * Stop waiting if either state is not BP_EAGAIN and ballooning action is + * needed, or if the credit has changed while state is BP_EAGAIN. + */ +static bool balloon_thread_cond(enum bp_state state, long credit) +{ + if (state != BP_EAGAIN) + credit = 0; + + return current_credit() != credit || kthread_should_stop(); +} + +/* + * As this is a kthread it is guaranteed to run as a single instance only. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ -static void balloon_process(struct work_struct *work) +static int balloon_thread(void *unused) { enum bp_state state = BP_DONE; long credit; + unsigned long timeout; + + set_freezable(); + for (;;) { + if (state == BP_EAGAIN) + timeout = balloon_stats.schedule_delay * HZ; + else + timeout = 3600 * HZ; + credit = current_credit(); + wait_event_interruptible_timeout(balloon_thread_wq, + balloon_thread_cond(state, credit), timeout); + + if (kthread_should_stop()) + return 0; - do { mutex_lock(&balloon_mutex); credit = current_credit(); @@ -529,12 +554,7 @@ static void balloon_process(struct work_struct *work) mutex_unlock(&balloon_mutex); cond_resched(); - - } while (credit && state == BP_DONE); - - /* Schedule more work if there is some still to be done. */ - if (state == BP_EAGAIN) - schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); + } } /* Resets the Xen limit, sets new target, and kicks off processing. */ @@ -542,7 +562,7 @@ void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ balloon_stats.target_pages = target; - schedule_delayed_work(&balloon_worker, 0); + wake_up(&balloon_thread_wq); } EXPORT_SYMBOL_GPL(balloon_set_new_target); @@ -647,7 +667,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages) /* The balloon may be too large now. Shrink it if needed. */ if (current_credit()) - schedule_delayed_work(&balloon_worker, 0); + wake_up(&balloon_thread_wq); mutex_unlock(&balloon_mutex); } @@ -679,6 +699,8 @@ static void __init balloon_add_region(unsigned long start_pfn, static int __init balloon_init(void) { + struct task_struct *task; + if (!xen_domain()) return -ENODEV; @@ -722,6 +744,12 @@ static int __init balloon_init(void) } #endif + task = kthread_run(balloon_thread, NULL, "xen-balloon"); + if (IS_ERR(task)) { + pr_err("xen-balloon thread could not be started, ballooning will not work!\n"); + return PTR_ERR(task); + } + /* Init the xen-balloon driver. */ xen_balloon_init(); diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 643fe440c46e..8c10edf9efe6 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -106,27 +106,26 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr) static int xen_swiotlb_fixup(void *buf, unsigned long nslabs) { - int i, rc; - int dma_bits; + int rc; + unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT); + unsigned int i, dma_bits = order + PAGE_SHIFT; dma_addr_t dma_handle; phys_addr_t p = virt_to_phys(buf); - dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; + BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1)); + BUG_ON(nslabs % IO_TLB_SEGSIZE); i = 0; do { - int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE); - do { rc = xen_create_contiguous_region( - p + (i << IO_TLB_SHIFT), - get_order(slabs << IO_TLB_SHIFT), + p + (i << IO_TLB_SHIFT), order, dma_bits, &dma_handle); } while (rc && dma_bits++ < MAX_DMA_BITS); if (rc) return rc; - i += slabs; + i += IO_TLB_SEGSIZE; } while (i < nslabs); return 0; } @@ -153,9 +152,7 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err) return ""; } -#define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE) - -int __ref xen_swiotlb_init(void) +int xen_swiotlb_init(void) { enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN; unsigned long bytes = swiotlb_size_or_default(); @@ -185,7 +182,7 @@ retry: order--; } if (!start) - goto error; + goto exit; if (order != get_order(bytes)) { pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n", (PAGE_SIZE << order) >> 20); @@ -208,15 +205,15 @@ retry: swiotlb_set_max_segment(PAGE_SIZE); return 0; error: - if (repeat--) { + if (nslabs > 1024 && repeat--) { /* Min is 2MB */ - nslabs = max(1024UL, (nslabs >> 1)); - pr_info("Lowering to %luMB\n", - (nslabs << IO_TLB_SHIFT) >> 20); + nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE)); + bytes = nslabs << IO_TLB_SHIFT; + pr_info("Lowering to %luMB\n", bytes >> 20); goto retry; } +exit: pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc); - free_pages((unsigned long)start, order); return rc; } @@ -244,9 +241,9 @@ retry: rc = xen_swiotlb_fixup(start, nslabs); if (rc) { memblock_free(__pa(start), PAGE_ALIGN(bytes)); - if (repeat--) { + if (nslabs > 1024 && repeat--) { /* Min is 2MB */ - nslabs = max(1024UL, (nslabs >> 1)); + nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE)); bytes = nslabs << IO_TLB_SHIFT; pr_info("Lowering to %luMB\n", bytes >> 20); goto retry; @@ -254,7 +251,7 @@ retry: panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc); } - if (swiotlb_init_with_tbl(start, nslabs, false)) + if (swiotlb_init_with_tbl(start, nslabs, true)) panic("Cannot allocate SWIOTLB buffer"); swiotlb_set_max_segment(PAGE_SIZE); } |